From bcc9261a03ad4ffba68c46e4b2fc3d23f4e8921b Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Thu, 4 Jul 2024 11:53:30 -0700 Subject: [PATCH 01/11] initial commit of data generation code --- README.md | 18 +- {mimicgen_envs => mimicgen}/__init__.py | 37 +- mimicgen/configs/__init__.py | 7 + mimicgen/configs/config.py | 153 ++++ mimicgen/configs/robosuite.py | 679 ++++++++++++++++ mimicgen/configs/task_spec.py | 139 ++++ .../utils => mimicgen/datagen}/__init__.py | 2 +- mimicgen/datagen/data_generator.py | 409 ++++++++++ mimicgen/datagen/datagen_info.py | 78 ++ mimicgen/datagen/selection_strategy.py | 306 +++++++ mimicgen/datagen/waypoint.py | 415 ++++++++++ .../env_interfaces}/__init__.py | 2 +- mimicgen/env_interfaces/base.py | 205 +++++ mimicgen/env_interfaces/robosuite.py | 749 ++++++++++++++++++ .../robosuite => mimicgen/envs}/__init__.py | 2 +- .../envs/robosuite}/__init__.py | 2 +- .../envs/robosuite/coffee.py | 10 +- .../envs/robosuite/hammer_cleanup.py | 8 +- .../envs/robosuite/kitchen.py | 8 +- .../envs/robosuite/mug_cleanup.py | 12 +- .../envs/robosuite/nut_assembly.py | 4 +- .../envs/robosuite/pick_place.py | 2 +- .../envs/robosuite/single_arm_env_mg.py | 10 +- .../envs/robosuite/stack.py | 4 +- .../envs/robosuite/threading.py | 6 +- .../envs/robosuite/three_piece_assembly.py | 6 +- mimicgen/exps/templates/robosuite/coffee.json | 70 ++ .../robosuite/coffee_preparation.json | 112 +++ .../templates/robosuite/hammer_cleanup.json | 84 ++ .../exps/templates/robosuite/kitchen.json | 140 ++++ .../exps/templates/robosuite/mug_cleanup.json | 84 ++ .../templates/robosuite/nut_assembly.json | 100 +++ .../exps/templates/robosuite/pick_place.json | 153 ++++ mimicgen/exps/templates/robosuite/square.json | 72 ++ mimicgen/exps/templates/robosuite/stack.json | 74 ++ .../exps/templates/robosuite/stack_three.json | 106 +++ .../exps/templates/robosuite/threading.json | 70 ++ .../robosuite/three_piece_assembly.json | 98 +++ .../robosuite/assets/objects/coffee_base.xml | 0 .../robosuite/assets/objects/coffee_body.xml | 0 .../robosuite/assets/objects/coffee_lid.xml | 0 .../robosuite/assets/objects/coffee_pod.xml | 0 .../robosuite/assets/objects/drawer.xml | 0 .../robosuite/assets/objects/drawer_long.xml | 0 .../assets/objects/meshes/coffee_base.stl | Bin .../assets/objects/meshes/coffee_body.stl | Bin .../assets/objects/meshes/coffee_lid.stl | Bin .../assets/objects/meshes/coffee_pod.stl | Bin .../assets/objects/serving_region.xml | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/128ecbc1/meta.json | 0 .../shapenet_core/mugs/128ecbc1/model.xml | 0 .../mugs/128ecbc1/visual/material_0.mtl | 0 .../128ecbc1/visual/model_normalized_0.obj | 0 .../128ecbc1/visual/model_normalized_1.obj | 0 .../128ecbc1/visual/model_normalized_2.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/3143a4ac/meta.json | 0 .../shapenet_core/mugs/3143a4ac/model.xml | 0 .../mugs/3143a4ac/visual/material_0.mtl | 0 .../3143a4ac/visual/model_normalized_0.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/345d3e72/meta.json | 0 .../shapenet_core/mugs/345d3e72/model.xml | 0 .../mugs/345d3e72/visual/material_0.jpeg | Bin .../mugs/345d3e72/visual/material_0.mtl | 0 .../345d3e72/visual/model_normalized_0.obj | 0 .../345d3e72/visual/model_normalized_1.obj | 0 .../345d3e72/visual/model_normalized_2.obj | 0 .../345d3e72/visual/model_normalized_3.obj | 0 .../345d3e72/visual/model_normalized_4.obj | 0 .../mugs/345d3e72/visual/texture0.png | Bin .../mugs/345d3e72/visual/texture1.png | Bin .../mugs/345d3e72/visual/texture2.png | Bin .../mugs/345d3e72/visual/texture3.png | Bin .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/34ae0b61/meta.json | 0 .../shapenet_core/mugs/34ae0b61/model.xml | 0 .../mugs/34ae0b61/visual/material_0.mtl | 0 .../34ae0b61/visual/model_normalized_0.obj | 0 .../34ae0b61/visual/model_normalized_1.obj | 0 .../34ae0b61/visual/model_normalized_2.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/48e260a6/meta.json | 0 .../shapenet_core/mugs/48e260a6/model.xml | 0 .../mugs/48e260a6/visual/material_0.mtl | 0 .../48e260a6/visual/model_normalized_0.obj | 0 .../48e260a6/visual/model_normalized_1.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/5fe74bab/meta.json | 0 .../shapenet_core/mugs/5fe74bab/model.xml | 0 .../mugs/5fe74bab/visual/material_0.mtl | 0 .../5fe74bab/visual/model_normalized_0.obj | 0 .../5fe74bab/visual/model_normalized_1.obj | 0 .../5fe74bab/visual/model_normalized_2.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/8012f52d/meta.json | 0 .../shapenet_core/mugs/8012f52d/model.xml | 0 .../mugs/8012f52d/visual/material_0.jpeg | Bin .../mugs/8012f52d/visual/material_0.mtl | 0 .../8012f52d/visual/model_normalized_0.obj | 0 .../8012f52d/visual/model_normalized_1.obj | 0 .../8012f52d/visual/model_normalized_2.obj | 0 .../8012f52d/visual/model_normalized_3.obj | 0 .../8012f52d/visual/model_normalized_4.obj | 0 .../mugs/8012f52d/visual/texture0.png | Bin .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/b4ae56d6/meta.json | 0 .../shapenet_core/mugs/b4ae56d6/model.xml | 0 .../mugs/b4ae56d6/visual/material_0.mtl | 0 .../b4ae56d6/visual/model_normalized_0.obj | 0 .../b4ae56d6/visual/model_normalized_1.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/c2eacc52/meta.json | 0 .../shapenet_core/mugs/c2eacc52/model.xml | 0 .../mugs/c2eacc52/visual/material_0.jpeg | Bin .../mugs/c2eacc52/visual/material_0.mtl | 0 .../c2eacc52/visual/model_normalized_0.obj | 0 .../c2eacc52/visual/model_normalized_1.obj | 0 .../c2eacc52/visual/model_normalized_2.obj | 0 .../mugs/c2eacc52/visual/texture0.png | Bin .../mugs/c2eacc52/visual/texture1.png | Bin .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/d75af64a/meta.json | 0 .../shapenet_core/mugs/d75af64a/model.xml | 0 .../mugs/d75af64a/visual/material_0.mtl | 0 .../d75af64a/visual/model_normalized_0.obj | 0 .../d75af64a/visual/model_normalized_1.obj | 0 .../d75af64a/visual/model_normalized_2.obj | 0 .../d75af64a/visual/model_normalized_3.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/e94e46bc/meta.json | 0 .../shapenet_core/mugs/e94e46bc/model.xml | 0 .../mugs/e94e46bc/visual/material_0.mtl | 0 .../e94e46bc/visual/model_normalized_0.obj | 0 .../e94e46bc/visual/model_normalized_1.obj | 0 .../model_normalized_collision_0.obj | 0 .../model_normalized_collision_1.obj | 0 .../model_normalized_collision_10.obj | 0 .../model_normalized_collision_11.obj | 0 .../model_normalized_collision_12.obj | 0 .../model_normalized_collision_13.obj | 0 .../model_normalized_collision_14.obj | 0 .../model_normalized_collision_15.obj | 0 .../model_normalized_collision_16.obj | 0 .../model_normalized_collision_17.obj | 0 .../model_normalized_collision_18.obj | 0 .../model_normalized_collision_19.obj | 0 .../model_normalized_collision_2.obj | 0 .../model_normalized_collision_20.obj | 0 .../model_normalized_collision_21.obj | 0 .../model_normalized_collision_22.obj | 0 .../model_normalized_collision_23.obj | 0 .../model_normalized_collision_24.obj | 0 .../model_normalized_collision_25.obj | 0 .../model_normalized_collision_26.obj | 0 .../model_normalized_collision_27.obj | 0 .../model_normalized_collision_28.obj | 0 .../model_normalized_collision_29.obj | 0 .../model_normalized_collision_3.obj | 0 .../model_normalized_collision_30.obj | 0 .../model_normalized_collision_31.obj | 0 .../model_normalized_collision_4.obj | 0 .../model_normalized_collision_5.obj | 0 .../model_normalized_collision_6.obj | 0 .../model_normalized_collision_7.obj | 0 .../model_normalized_collision_8.obj | 0 .../model_normalized_collision_9.obj | 0 .../shapenet_core/mugs/fad118b3/meta.json | 0 .../shapenet_core/mugs/fad118b3/model.xml | 0 .../mugs/fad118b3/visual/material_0.mtl | 0 .../fad118b3/visual/model_normalized_0.obj | 0 .../robosuite/assets/textures/ceramic.png | Bin .../models/robosuite/objects/__init__.py | 2 +- .../robosuite/objects/composite/__init__.py | 2 +- .../objects/composite/box_pattern_object.py | 2 +- .../objects/composite/hollow_cylinder.py | 2 +- .../robosuite/objects/composite/needle.py | 2 +- .../objects/composite/ring_tripod.py | 2 +- .../objects/composite_body/__init__.py | 2 +- .../objects/composite_body/coffee_machine.py | 6 +- .../robosuite/objects/composite_body/cup.py | 4 +- .../models/robosuite/objects/xml_objects.py | 6 +- mimicgen/scripts/__init__.py | 3 + mimicgen/scripts/annotate_subtasks.py | 409 ++++++++++ .../scripts/demo_random_action.py | 8 +- .../scripts/download_datasets.py | 10 +- mimicgen/scripts/generate_config_templates.py | 37 + mimicgen/scripts/generate_core_configs.py | 378 +++++++++ .../scripts/generate_core_training_configs.py | 337 ++++++++ mimicgen/scripts/generate_dataset.py | 638 +++++++++++++++ .../generate_robot_transfer_configs.py | 225 ++++++ ...te_training_configs_for_public_datasets.py | 12 +- .../scripts/get_reset_videos.py | 4 +- mimicgen/scripts/get_source_info.py | 60 ++ mimicgen/scripts/merge_hdf5.py | 110 +++ mimicgen/scripts/prepare_all_src_datasets.sh | 76 ++ mimicgen/scripts/prepare_src_dataset.py | 262 ++++++ mimicgen/scripts/visualize_subtasks.py | 357 +++++++++ mimicgen/utils/__init__.py | 3 + mimicgen/utils/config_utils.py | 500 ++++++++++++ mimicgen/utils/file_utils.py | 475 +++++++++++ mimicgen/utils/misc_utils.py | 254 ++++++ mimicgen/utils/pose_utils.py | 297 +++++++ mimicgen/utils/robomimic_utils.py | 156 ++++ mimicgen_envs/utils/file_utils.py | 45 -- requirements-docs.txt | 8 + setup.py | 4 +- 549 files changed, 8994 insertions(+), 140 deletions(-) rename {mimicgen_envs => mimicgen}/__init__.py (90%) create mode 100644 mimicgen/configs/__init__.py create mode 100644 mimicgen/configs/config.py create mode 100644 mimicgen/configs/robosuite.py create mode 100644 mimicgen/configs/task_spec.py rename {mimicgen_envs/utils => mimicgen/datagen}/__init__.py (57%) create mode 100644 mimicgen/datagen/data_generator.py create mode 100644 mimicgen/datagen/datagen_info.py create mode 100644 mimicgen/datagen/selection_strategy.py create mode 100644 mimicgen/datagen/waypoint.py rename {mimicgen_envs/envs => mimicgen/env_interfaces}/__init__.py (57%) create mode 100644 mimicgen/env_interfaces/base.py create mode 100644 mimicgen/env_interfaces/robosuite.py rename {mimicgen_envs/envs/robosuite => mimicgen/envs}/__init__.py (57%) rename {mimicgen_envs/scripts => mimicgen/envs/robosuite}/__init__.py (57%) rename {mimicgen_envs => mimicgen}/envs/robosuite/coffee.py (99%) rename {mimicgen_envs => mimicgen}/envs/robosuite/hammer_cleanup.py (98%) rename {mimicgen_envs => mimicgen}/envs/robosuite/kitchen.py (98%) rename {mimicgen_envs => mimicgen}/envs/robosuite/mug_cleanup.py (98%) rename {mimicgen_envs => mimicgen}/envs/robosuite/nut_assembly.py (99%) rename {mimicgen_envs => mimicgen}/envs/robosuite/pick_place.py (89%) rename {mimicgen_envs => mimicgen}/envs/robosuite/single_arm_env_mg.py (93%) rename {mimicgen_envs => mimicgen}/envs/robosuite/stack.py (99%) rename {mimicgen_envs => mimicgen}/envs/robosuite/threading.py (99%) rename {mimicgen_envs => mimicgen}/envs/robosuite/three_piece_assembly.py (99%) create mode 100644 mimicgen/exps/templates/robosuite/coffee.json create mode 100644 mimicgen/exps/templates/robosuite/coffee_preparation.json create mode 100644 mimicgen/exps/templates/robosuite/hammer_cleanup.json create mode 100644 mimicgen/exps/templates/robosuite/kitchen.json create mode 100644 mimicgen/exps/templates/robosuite/mug_cleanup.json create mode 100644 mimicgen/exps/templates/robosuite/nut_assembly.json create mode 100644 mimicgen/exps/templates/robosuite/pick_place.json create mode 100644 mimicgen/exps/templates/robosuite/square.json create mode 100644 mimicgen/exps/templates/robosuite/stack.json create mode 100644 mimicgen/exps/templates/robosuite/stack_three.json create mode 100644 mimicgen/exps/templates/robosuite/threading.json create mode 100644 mimicgen/exps/templates/robosuite/three_piece_assembly.json rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/coffee_base.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/coffee_body.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/coffee_lid.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/coffee_pod.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/drawer.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/drawer_long.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/meshes/coffee_base.stl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/meshes/coffee_body.stl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/meshes/coffee_lid.stl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/meshes/coffee_pod.stl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/objects/serving_region.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.jpeg (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture0.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture1.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture2.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture3.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.jpeg (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/texture0.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.jpeg (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture0.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture1.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_1.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_10.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_11.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_12.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_13.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_14.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_15.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_16.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_17.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_18.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_19.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_2.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_20.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_21.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_22.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_23.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_24.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_25.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_26.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_27.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_28.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_29.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_3.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_30.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_31.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_4.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_5.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_6.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_7.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_8.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_9.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/meta.json (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/model.xml (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/material_0.mtl (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/model_normalized_0.obj (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/assets/textures/ceramic.png (100%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/__init__.py (72%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite/__init__.py (80%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite/box_pattern_object.py (98%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite/hollow_cylinder.py (98%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite/needle.py (98%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite/ring_tripod.py (99%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite_body/__init__.py (71%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite_body/coffee_machine.py (95%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/composite_body/cup.py (97%) rename {mimicgen_envs => mimicgen}/models/robosuite/objects/xml_objects.py (97%) create mode 100644 mimicgen/scripts/__init__.py create mode 100644 mimicgen/scripts/annotate_subtasks.py rename {mimicgen_envs => mimicgen}/scripts/demo_random_action.py (92%) rename {mimicgen_envs => mimicgen}/scripts/download_datasets.py (92%) create mode 100644 mimicgen/scripts/generate_config_templates.py create mode 100644 mimicgen/scripts/generate_core_configs.py create mode 100644 mimicgen/scripts/generate_core_training_configs.py create mode 100644 mimicgen/scripts/generate_dataset.py create mode 100644 mimicgen/scripts/generate_robot_transfer_configs.py rename mimicgen_envs/scripts/generate_training_configs.py => mimicgen/scripts/generate_training_configs_for_public_datasets.py (96%) rename {mimicgen_envs => mimicgen}/scripts/get_reset_videos.py (98%) create mode 100644 mimicgen/scripts/get_source_info.py create mode 100644 mimicgen/scripts/merge_hdf5.py create mode 100644 mimicgen/scripts/prepare_all_src_datasets.sh create mode 100644 mimicgen/scripts/prepare_src_dataset.py create mode 100644 mimicgen/scripts/visualize_subtasks.py create mode 100644 mimicgen/utils/__init__.py create mode 100644 mimicgen/utils/config_utils.py create mode 100644 mimicgen/utils/file_utils.py create mode 100644 mimicgen/utils/misc_utils.py create mode 100644 mimicgen/utils/pose_utils.py create mode 100644 mimicgen/utils/robomimic_utils.py delete mode 100644 mimicgen_envs/utils/file_utils.py create mode 100644 requirements-docs.txt diff --git a/README.md b/README.md index 4f92f95..a9ac588 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ pip install mujoco==2.3.2 The following script can be used to try random actions in a task. ```sh -cd mimicgen_envs/scripts +cd mimicgen/scripts python demo_random_action.py ``` @@ -136,7 +136,7 @@ We provide more information on the amount of demonstrations for each dataset typ #### Method 1: Using `download_datasets.py` (Recommended) -`download_datasets.py` (located at `mimicgen_envs/scripts`) is a python script that provides a programmatic way of downloading the datasets. This is the preferred method, because this script also sets up a directory structure for the datasets that works out of the box with the code for reproducing policy learning results. +`download_datasets.py` (located at `mimicgen/scripts`) is a python script that provides a programmatic way of downloading the datasets. This is the preferred method, because this script also sets up a directory structure for the datasets that works out of the box with the code for reproducing policy learning results. A few examples of using this script are provided below: @@ -168,22 +168,22 @@ You can download the datasets through Hugging Face. ## Reproducing Policy Learning Results -After downloading the appropriate datasets you’re interested in using by running the `download_datasets.py` script, the `generate_training_configs.py` script (located at `mimicgen_envs/scripts`) can be used to generate all training config json files necessary to reproduce the experiments in the paper. A few examples are below. +After downloading the appropriate datasets you’re interested in using by running the `download_datasets.py` script, the `generate_training_configs_for_public_datasets.py` script (located at `mimicgen/scripts`) can be used to generate all training config json files necessary to reproduce the experiments in the paper. A few examples are below. ```sh -# Assume datasets already exist in mimicgen_envs/../datasets folder. Configs will be generated under mimicgen_envs/exps/paper, and training results will be at mimicgen_envs/../training_results after launching training runs. -python generate_training_configs.py +# Assume datasets already exist in mimicgen/../datasets folder. Configs will be generated under mimicgen/exps/paper, and training results will be at mimicgen/../training_results after launching training runs. +python generate_training_configs_for_public_datasets.py # Alternatively, specify where datasets exist, and specify where configs should be generated. -python generate_training_configs.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results +python generate_training_configs_for_public_datasets.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results ``` -Then, to reproduce a specific set of training runs for different experiment groups (see [Dataset Types](#dataset-types)), we can simply navigate to the generated config directory, and copy training commands from the generated shell script there. As an example, we can reproduce the image training results on the Coffee D0 dataset, by looking for the correct set of commands in `mimicgen_envs/exps/paper/core.sh` and running them. The relevant section of the shell script is reproduced below. +Then, to reproduce a specific set of training runs for different experiment groups (see [Dataset Types](#dataset-types)), we can simply navigate to the generated config directory, and copy training commands from the generated shell script there. As an example, we can reproduce the image training results on the Coffee D0 dataset, by looking for the correct set of commands in `mimicgen/exps/paper/core.sh` and running them. The relevant section of the shell script is reproduced below. ```sh # task: coffee_d0 # obs modality: image -python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen_envs/exps/paper/core/coffee_d0/image/bc_rnn.json +python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen/exps/paper/core/coffee_d0/image/bc_rnn.json ``` **Note 1**: Another option is to directly run `robomimic/scripts/train.py` with any generated config jsons of interest -- the commands in the shell files do exactly this. @@ -197,7 +197,7 @@ python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen_envs/exps/ We provide a convenience script to write videos for each task's reset distribution at `scripts/get_reset_videos.py`. Set the `OUTPUT_FOLDER` global variable to the folder where you want to write the videos, and set `DATASET_INFOS` appropriately if you would like to limit the environments visualized. Then run the script. -The environments are also readily compatible with robosuite visualization scripts such as the [demo_random_action.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/demos/demo_random_action.py) script and the [make_reset_video.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/scripts/make_reset_video.py) script, but you will need to modify these files to add a `import mimicgen_envs` line to make sure that `robosuite` can find these environments. +The environments are also readily compatible with robosuite visualization scripts such as the [demo_random_action.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/demos/demo_random_action.py) script and the [make_reset_video.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/scripts/make_reset_video.py) script, but you will need to modify these files to add a `import mimicgen` line to make sure that `robosuite` can find these environments. **Note**: You can find task reset visualizations on the [website](https://mimicgen.github.io), but they may look a little different as they were generated with robosuite v1.2. diff --git a/mimicgen_envs/__init__.py b/mimicgen/__init__.py similarity index 90% rename from mimicgen_envs/__init__.py rename to mimicgen/__init__.py index 593ef92..cda3f28 100644 --- a/mimicgen_envs/__init__.py +++ b/mimicgen/__init__.py @@ -1,24 +1,35 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. -__version__ = "0.1.1" +__version__ = "1.0.0" +# try to import all environment interfaces here +try: + from mimicgen.env_interfaces.robosuite import * +except ImportError as e: + print("WARNING: robosuite environment interfaces not imported...") + print("Got error: {}".format(e)) # import tasks to make sure they are added to robosuite task registry -from mimicgen_envs.envs.robosuite.threading import * -from mimicgen_envs.envs.robosuite.coffee import * -from mimicgen_envs.envs.robosuite.three_piece_assembly import * -from mimicgen_envs.envs.robosuite.mug_cleanup import * -from mimicgen_envs.envs.robosuite.stack import * -from mimicgen_envs.envs.robosuite.nut_assembly import * -from mimicgen_envs.envs.robosuite.pick_place import * +try: + from mimicgen.envs.robosuite.threading import * + from mimicgen.envs.robosuite.coffee import * + from mimicgen.envs.robosuite.three_piece_assembly import * + from mimicgen.envs.robosuite.mug_cleanup import * + from mimicgen.envs.robosuite.stack import * + from mimicgen.envs.robosuite.nut_assembly import * + from mimicgen.envs.robosuite.pick_place import * +except ImportError as e: + print("WARNING: robosuite environments not imported...") + print("Got error: {}".format(e)) try: - from mimicgen_envs.envs.robosuite.hammer_cleanup import * - from mimicgen_envs.envs.robosuite.kitchen import * -except ImportError: - print("WARNING: robosuite task zoo environments not imported since robosuite task zoo is not installed!") + from mimicgen.envs.robosuite.hammer_cleanup import * + from mimicgen.envs.robosuite.kitchen import * +except ImportError as e: + print("WARNING: robosuite task zoo environments not imported, possibly because robosuite_task_zoo is not installed...") + print("Got error: {}".format(e)) # stores released dataset links and rollout horizons in global dictionary. # Structure is given below for each type of dataset: diff --git a/mimicgen/configs/__init__.py b/mimicgen/configs/__init__.py new file mode 100644 index 0000000..51a51b5 --- /dev/null +++ b/mimicgen/configs/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +from mimicgen.configs.config import MG_Config, get_all_registered_configs, config_factory +from mimicgen.configs.task_spec import MG_TaskSpec +from mimicgen.configs.robosuite import * \ No newline at end of file diff --git a/mimicgen/configs/config.py b/mimicgen/configs/config.py new file mode 100644 index 0000000..3c9a34c --- /dev/null +++ b/mimicgen/configs/config.py @@ -0,0 +1,153 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Base MG_Config object for mimicgen data generation. +""" +import six +from copy import deepcopy + +import robomimic +from robomimic.config.config import Config + + +# global dictionary for remembering name - class mappings +REGISTERED_CONFIGS = {} + + +def get_all_registered_configs(): + """ + Give access to dictionary of all registered configs for external use. + """ + return deepcopy(REGISTERED_CONFIGS) + + +def config_factory(name, config_type, dic=None): + """ + Creates an instance of a config from the algo name. Optionally pass + a dictionary to instantiate the config from the dictionary. + """ + if (config_type not in REGISTERED_CONFIGS) or (name not in REGISTERED_CONFIGS[config_type]): + raise Exception("Config for name {} and type {} not found. Make sure it is a registered config among: {}".format( + name, config_type, ', '.join(REGISTERED_CONFIGS))) + return REGISTERED_CONFIGS[config_type][name](dict_to_load=dic) + + +class ConfigMeta(type): + """ + Define a metaclass for constructing a config class. + It registers configs into the global registry. + """ + def __new__(meta, name, bases, class_dict): + cls = super(ConfigMeta, meta).__new__(meta, name, bases, class_dict) + if cls.__name__ != "MG_Config": + if cls.TYPE not in REGISTERED_CONFIGS: + REGISTERED_CONFIGS[cls.TYPE] = dict() + REGISTERED_CONFIGS[cls.TYPE][cls.NAME] = cls + return cls + + +@six.add_metaclass(ConfigMeta) +class MG_Config(Config): + def __init__(self, dict_to_load=None): + if dict_to_load is not None: + super(MG_Config, self).__init__(dict_to_load) + return + + super(MG_Config, self).__init__() + + # store name class property in the config (must be implemented by subclasses) + self.name = type(self).NAME + self.type = type(self).TYPE + + self.experiment_config() + self.obs_config() + self.task_config() + + # After init, new keys cannot be added to the config, except under nested + # attributes that have called @do_not_lock_keys + self.lock_keys() + + @property + @classmethod + def NAME(cls): + # must be specified by subclasses + raise NotImplementedError + + @property + @classmethod + def TYPE(cls): + # must be specified by subclasses + raise NotImplementedError + + def experiment_config(self): + """ + This function populates the `config.experiment` attribute of the config, + which has general settings related to the dataset generation (e.g. + which environment, robot, and gripper to use for generation, how many + demonstrations to try collecting, etc). + """ + + # set the name of the experiment - which will be used to name the dataset folder that is generated + self.experiment.name = "demo" + + # settings related to source dataset + self.experiment.source.dataset_path = None # path to source hdf5 dataset + self.experiment.source.filter_key = None # filter key, to select a subset of trajectories in the source hdf5 dataset + self.experiment.source.n = None # if provided, use only the first @n trajectories in source hdf5 dataset + self.experiment.source.start = None # if provided, exclude the first @start trajectories in source hdf5 dataset + + # settings related to data generation + self.experiment.generation.path = None # path where new dataset folder will be created + self.experiment.generation.guarantee = False # whether to keep running data collection until we have @num_trials successful trajectories + self.experiment.generation.keep_failed = True # whether to keep failed trajectories as well + self.experiment.generation.num_trials = 10 # number of attempts to collect new data + + # if True, select a different source demonstration for each subtask during data generation, else + # keep the same one for the entire episode + self.experiment.generation.select_src_per_subtask = False + + # if True, each subtask segment will consist of the first robot pose and the target poses instead of just the target poses. + # Can sometimes help improve data generation quality as the interpolation segment will interpolate to where the robot + # started in the source segment instead of the first target pose. Note that the first subtask segment of each episode + # will always include the first robot pose, regardless of this argument. + self.experiment.generation.transform_first_robot_pose = False + + # if True, each interpolation segment will start from the last target pose in the previous subtask segment, instead + # of the current robot pose. Can sometimes improve data generation quality. + self.experiment.generation.interpolate_from_last_target_pose = True + + # settings related to task used for data generation + self.experiment.task.name = None # if provided, override the env name in env meta to collect data on a different environment from the one in source data + self.experiment.task.robot = None # if provided, override the robot name in env meta to collect data on a different robot from the one in source data + self.experiment.task.gripper = None # if provided, override the gripper in env meta to collect data on a different robot gripper from the one in source data + self.experiment.task.interface = None # if provided, override the environment interface class to use for this task to use a different one from the one in source data + self.experiment.task.interface_type = None # if provided, specify environment interface type (usually one per simulator) to use a different one from the one in source data + + # general settings + self.experiment.max_num_failures = 50 # maximum number of failure demos to save + self.experiment.render_video = True # whether to render some generated demos to video + self.experiment.num_demo_to_render = 50 # maxumum number of demos to render to video + self.experiment.num_fail_demo_to_render = 50 # maxumum number of failure demos to render to video + self.experiment.log_every_n_attempts = 50 # logs important info every N generation attempts + + # random seed for generation + self.experiment.seed = 1 + + def obs_config(self): + """ + This function populates the `config.obs` attribute of the config, which has + setings for which observations to collect during data generation. + """ + self.obs.collect_obs = True # whether to collect observations + self.obs.camera_names = [] # which cameras to render observations from + self.obs.camera_height = 84 # camera height + self.obs.camera_width = 84 # camera width + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. + """ + raise NotImplementedError diff --git a/mimicgen/configs/robosuite.py b/mimicgen/configs/robosuite.py new file mode 100644 index 0000000..b63a664 --- /dev/null +++ b/mimicgen/configs/robosuite.py @@ -0,0 +1,679 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Task configs for robosuite. + +See @Coffee_Config below for an explanation of each parameter. +""" +import mimicgen +from mimicgen.configs.config import MG_Config + + +class Coffee_Config(MG_Config): + """ + Corresponds to robosuite Coffee task and variants. + """ + NAME = "coffee" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + # Each subtask involves manipulation with respect to a single object frame. + # This string should specify the object for this subtask. The name should be + # consistent with the "datagen_info" from the environment interface and dataset. + object_ref="coffee_pod", + # The "datagen_info" from the environment and dataset includes binary indicators + # for each subtask of the task at each timestep. This key should correspond + # to the key in "datagen_info" that should be used to infer when this subtask + # is finished (e.g. on a 0 to 1 edge of the binary indicator). Should provide + # None for the final subtask. + subtask_term_signal="grasp", + # if not None, specifies time offsets to be used during data generation when splitting + # a trajectory into subtask segments. On each data generation attempt, an offset is sampled + # and added to the boundary defined by @subtask_term_signal. + subtask_term_offset_range=(5, 10), + # specifies how the source subtask segment should be selected during data generation + # from the set of source human demos + selection_strategy="random", + # optional keyword arguments for the selection strategy function used + selection_strategy_kwargs=None, + # amount of action noise to apply during this subtask + action_noise=0.05, + # number of interpolation steps to bridge previous subtask segment to this one + num_interpolation_steps=5, + # number of additional steps (with constant target pose of beginning of this subtask segment) to + # add to give the robot time to reach the pose needed to carry out this subtask segment + num_fixed_steps=0, + # if True, apply action noise during interpolation phase leading up to this subtask, as + # well as during the execution of this subtask + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="coffee_machine", + # end of final subtask does not need to be detected + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + # allow downstream code to completely replace the task spec from an external config + self.task.task_spec.do_not_lock_keys() + + +class Threading_Config(MG_Config): + """ + Corresponds to robosuite Threading task and variants. + """ + NAME = "threading" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="needle", + subtask_term_signal="grasp", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="tripod", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class ThreePieceAssembly_Config(MG_Config): + """ + Corresponds to robosuite ThreePieceAssembly task and variants. + """ + NAME = "three_piece_assembly" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="piece_1", + subtask_term_signal="grasp_1", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="base", + subtask_term_signal="insert_1", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="piece_2", + subtask_term_signal="grasp_2", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="piece_1", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class Square_Config(MG_Config): + """ + Corresponds to robosuite Square task and variants. + """ + NAME = "square" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="square_nut", + subtask_term_signal="grasp", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="square_peg", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class Stack_Config(MG_Config): + """ + Corresponds to robosuite Stack task and variants. + """ + NAME = "stack" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="cubeA", + subtask_term_signal="grasp", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="cubeB", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class StackThree_Config(MG_Config): + """ + Corresponds to robosuite StackThree task and variants. + """ + NAME = "stack_three" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="cubeA", + subtask_term_signal="grasp_1", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="cubeB", + subtask_term_signal="stack_1", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="cubeC", + subtask_term_signal="grasp_2", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="cubeA", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class HammerCleanup_Config(MG_Config): + """ + Corresponds to robosuite HammerCleanup task and variants. + """ + NAME = "hammer_cleanup" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="drawer", + subtask_term_signal="open", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="hammer", + subtask_term_signal="grasp", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="drawer", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class MugCleanup_Config(MG_Config): + """ + Corresponds to robosuite MugCleanup task and variants. + """ + NAME = "mug_cleanup" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="drawer", + subtask_term_signal="open", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="object", + subtask_term_signal="grasp", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="drawer", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class NutAssembly_Config(MG_Config): + """ + Corresponds to robosuite NutAssembly task and variants. + """ + NAME = "nut_assembly" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="square_nut", + subtask_term_signal="grasp_square_nut", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="square_peg", + subtask_term_signal="insert_square_nut", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="round_nut", + subtask_term_signal="grasp_round_nut", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="round_peg", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class PickPlace_Config(MG_Config): + """ + Corresponds to robosuite PickPlace task and variants. + """ + NAME = "pick_place" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + for i, obj in enumerate(["milk", "cereal", "bread", "can"]): + self.task.task_spec["subtask_{}".format(2 * i + 1)] = dict( + object_ref=obj, + subtask_term_signal="grasp_{}".format(obj), + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + # last subtask does not need subtask termination signal but all others do + self.task.task_spec["subtask_{}".format(2 * i + 2)] = dict( + object_ref=None, + subtask_term_signal=None if (obj == "can") else "place_{}".format(obj), + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class Kitchen_Config(MG_Config): + """ + Corresponds to robosuite Kitchen task and variants. + """ + NAME = "kitchen" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="button", + subtask_term_signal="stove_on", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="pot", + subtask_term_signal="grasp_pot", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="stove", + subtask_term_signal="place_pot_on_stove", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="bread", + subtask_term_signal="grasp_bread", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_5 = dict( + object_ref="pot", + subtask_term_signal="place_bread_in_pot", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_6 = dict( + object_ref="serving_region", + subtask_term_signal="serve", + subtask_term_offset_range=(10, 20), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_7 = dict( + object_ref="button", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() + + +class CoffeePreparation_Config(MG_Config): + """ + Corresponds to robosuite CoffeePreparation task and variants. + """ + NAME = "coffee_preparation" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="mug", + subtask_term_signal="mug_grasp", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="coffee_machine", + subtask_term_signal="mug_place", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="drawer", + subtask_term_signal="drawer_open", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="coffee_pod", + subtask_term_signal="pod_grasp", + subtask_term_offset_range=(5, 10), + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_5 = dict( + object_ref="coffee_machine", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() diff --git a/mimicgen/configs/task_spec.py b/mimicgen/configs/task_spec.py new file mode 100644 index 0000000..95e71ec --- /dev/null +++ b/mimicgen/configs/task_spec.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Defines task specification objects, which are used to store task-specific settings +for data generation. +""" +import json + +import mimicgen +from mimicgen.datagen.selection_strategy import assert_selection_strategy_exists + +class MG_TaskSpec: + """ + Stores task-specific settings for data generation. Each task is a sequence of + object-centric subtasks, and each subtask stores relevant settings used during + the data generation process. + """ + def __init__(self): + self.spec = [] + + def add_subtask( + self, + object_ref, + subtask_term_signal, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0., + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ): + """ + Add subtask to this task spec. + + Args: + object_ref (str): each subtask involves manipulation with + respect to a single object frame. This string should + specify the object for this subtask. The name + should be consistent with the "datagen_info" from the + environment interface and dataset. + + subtask_term_signal (str or None): the "datagen_info" from the environment + and dataset includes binary indicators for each subtask + of the task at each timestep. This key should correspond + to the key in "datagen_info" that should be used to + infer when this subtask is finished (e.g. on a 0 to 1 + edge of the binary indicator). Should provide None for the final + subtask. + + subtask_term_offset_range (2-tuple): if provided, specifies time offsets to + be used during data generation when splitting a trajectory into + subtask segments. On each data generation attempt, an offset is sampled + and added to the boundary defined by @subtask_term_signal. + + selection_strategy (str): specifies how the source subtask segment should be + selected during data generation from the set of source human demos + + selection_strategy_kwargs (dict or None): optional keyword arguments for the selection + strategy function used + + action_noise (float): amount of action noise to apply during this subtask + + num_interpolation_steps (int): number of interpolation steps to bridge previous subtask segment + to this one + + num_fixed_steps (int): number of additional steps (with constant target pose of beginning of + this subtask segment) to add to give the robot time to reach the pose needed to carry + out this subtask segment + + apply_noise_during_interpolation (bool): if True, apply action noise during interpolation phase + leading up to this subtask, as well as during the execution of this subtask + """ + if subtask_term_offset_range is None: + # corresponds to no offset + subtask_term_offset_range = (0, 0) + assert isinstance(subtask_term_offset_range, tuple) + assert len(subtask_term_offset_range) == 2 + assert subtask_term_offset_range[0] <= subtask_term_offset_range[1] + assert_selection_strategy_exists(selection_strategy) + self.spec.append(dict( + object_ref=object_ref, + subtask_term_signal=subtask_term_signal, + subtask_term_offset_range=subtask_term_offset_range, + selection_strategy=selection_strategy, + selection_strategy_kwargs=selection_strategy_kwargs, + action_noise=action_noise, + num_interpolation_steps=num_interpolation_steps, + num_fixed_steps=num_fixed_steps, + apply_noise_during_interpolation=apply_noise_during_interpolation, + )) + + @classmethod + def from_json(cls, json_string=None, json_dict=None): + """ + Instantiate a TaskSpec object from a json string. This should + be consistent with the output of @serialize. + + Args: + json_string (str): top-level of json has a key per subtask in-order (e.g. + "subtask_1", "subtask_2", "subtask_3") and under each subtask, there should + be an entry for each argument of @add_subtask + + json_dict (dict): optionally directly pass json dict + """ + if json_dict is None: + json_dict = json.loads(json_string) + task_spec = cls() + for subtask_name in json_dict: + if json_dict[subtask_name]["subtask_term_offset_range"] is not None: + json_dict[subtask_name]["subtask_term_offset_range"] = tuple(json_dict[subtask_name]["subtask_term_offset_range"]) + task_spec.add_subtask(**json_dict[subtask_name]) + return task_spec + + def serialize(self): + """ + Return a json string corresponding to this task spec object. Compatible with + @from_json classmethod. + """ + json_dict = dict() + for i, elem in enumerate(self.spec): + json_dict["subtask_{}".format(i + 1)] = elem + return json.dumps(json_dict, indent=4) + + def __len__(self): + return len(self.spec) + + def __getitem__(self, ind): + """Support list-like indexing""" + return self.spec[ind] + + def __iter__(self): + """Support list-like iteration.""" + return iter(self.spec) + + def __repr__(self): + return json.dumps(self.spec, indent=4) diff --git a/mimicgen_envs/utils/__init__.py b/mimicgen/datagen/__init__.py similarity index 57% rename from mimicgen_envs/utils/__init__.py rename to mimicgen/datagen/__init__.py index 981400f..8edfcfd 100644 --- a/mimicgen_envs/utils/__init__.py +++ b/mimicgen/datagen/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen/datagen/data_generator.py b/mimicgen/datagen/data_generator.py new file mode 100644 index 0000000..bc07081 --- /dev/null +++ b/mimicgen/datagen/data_generator.py @@ -0,0 +1,409 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Base class for data generator. +""" +import h5py +import sys +import numpy as np + +import mimicgen +import mimicgen.utils.pose_utils as PoseUtils +import mimicgen.utils.file_utils as MG_FileUtils + +from mimicgen.configs.task_spec import MG_TaskSpec +from mimicgen.datagen.datagen_info import DatagenInfo +from mimicgen.datagen.selection_strategy import make_selection_strategy +from mimicgen.datagen.waypoint import WaypointSequence, WaypointTrajectory + + +class DataGenerator(object): + """ + The main data generator object that loads a source dataset, parses it, and + generates new trajectories. + """ + def __init__( + self, + task_spec, + dataset_path, + demo_keys=None, + ): + """ + Args: + task_spec (MG_TaskSpec instance): task specification that will be + used to generate data + dataset_path (str): path to hdf5 dataset to use for generation + demo_keys (list of str): list of demonstration keys to use + in file. If not provided, all demonstration keys will be + used. + """ + assert isinstance(task_spec, MG_TaskSpec) + self.task_spec = task_spec + self.dataset_path = dataset_path + + # sanity check on task spec offset ranges - final subtask should not have any offset randomization + assert self.task_spec[-1]["subtask_term_offset_range"][0] == 0 + assert self.task_spec[-1]["subtask_term_offset_range"][1] == 0 + + # demonstration keys to use from hdf5 as source dataset + if demo_keys is None: + # get all demonstration keys from file + demo_keys = MG_FileUtils.get_all_demos_from_dataset(dataset_path=self.dataset) + self.demo_keys = demo_keys + + # parse source dataset + self._load_dataset(dataset_path=dataset_path, demo_keys=demo_keys) + + def _load_dataset(self, dataset_path, demo_keys): + """ + Load important information from a dataset into internal memory. + """ + print("\nDataGenerator: loading dataset at path {}...".format(dataset_path)) + self.src_dataset_infos, self.src_subtask_indices, self.subtask_names, _ = MG_FileUtils.parse_source_dataset( + dataset_path=dataset_path, + demo_keys=demo_keys, + task_spec=self.task_spec, + ) + print("\nDataGenerator: done loading\n") + + def __repr__(self): + """ + Pretty print this object. + """ + msg = str(self.__class__.__name__) + msg += " (\n\tdataset_path={}\n\tdemo_keys={}\n)".format( + self.dataset_path, + self.demo_keys, + ) + return msg + + def randomize_subtask_boundaries(self): + """ + Apply random offsets to sample subtask boundaries according to the task spec. + Recall that each demonstration is segmented into a set of subtask segments, and the + end index of each subtask can have a random offset. + """ + + # initial subtask start and end indices - shape (N, S, 2) + src_subtask_indices = np.array(self.src_subtask_indices) + + # for each subtask (except last one), sample all end offsets at once for each demonstration + # add them to subtask end indices, and then set them as the start indices of next subtask too + for i in range(src_subtask_indices.shape[1] - 1): + end_offsets = np.random.randint( + low=self.task_spec[i]["subtask_term_offset_range"][0], + high=self.task_spec[i]["subtask_term_offset_range"][1] + 1, + size=src_subtask_indices.shape[0] + ) + src_subtask_indices[:, i, 1] = src_subtask_indices[:, i, 1] + end_offsets + # don't forget to set these as start indices for next subtask too + src_subtask_indices[:, i + 1, 0] = src_subtask_indices[:, i, 1] + + # ensure non-empty subtasks + assert np.all((src_subtask_indices[:, :, 1] - src_subtask_indices[:, :, 0]) > 0), "got empty subtasks!" + + # ensure subtask indices increase (both starts and ends) + assert np.all((src_subtask_indices[:, 1:, :] - src_subtask_indices[:, :-1, :]) > 0), "subtask indices do not strictly increase" + + # ensure subtasks are in order + subtask_inds_flat = src_subtask_indices.reshape(src_subtask_indices.shape[0], -1) + assert np.all((subtask_inds_flat[:, 1:] - subtask_inds_flat[:, :-1]) >= 0), "subtask indices not in order" + + return src_subtask_indices + + def select_source_demo( + self, + eef_pose, + object_pose, + subtask_ind, + src_subtask_inds, + subtask_object_name, + selection_strategy_name, + selection_strategy_kwargs=None, + ): + """ + Helper method to run source subtask segment selection. + + Args: + eef_pose (np.array): current end effector pose + object_pose (np.array): current object pose for this subtask + subtask_ind (int): index of subtask + src_subtask_inds (np.array): start and end indices for subtask segment in source demonstrations of shape (N, 2) + subtask_object_name (str): name of reference object for this subtask + selection_strategy_name (str): name of selection strategy + selection_strategy_kwargs (dict): extra kwargs for running selection strategy + + Returns: + selected_src_demo_ind (int): selected source demo index + """ + if subtask_object_name is None: + # no reference object - only random selection is supported + assert selection_strategy_name == "random" + + # We need to collect the datagen info objects over the timesteps for the subtask segment in each source + # demo, so that it can be used by the selection strategy. + src_subtask_datagen_infos = [] + for i in range(len(self.demo_keys)): + # datagen info over all timesteps of the src trajectory + src_ep_datagen_info = self.src_dataset_infos[i] + + # time indices for subtask + subtask_start_ind = src_subtask_inds[i][0] + subtask_end_ind = src_subtask_inds[i][1] + + # get subtask segment using indices + src_subtask_datagen_infos.append(DatagenInfo( + eef_pose=src_ep_datagen_info.eef_pose[subtask_start_ind : subtask_end_ind], + # only include object pose for relevant object in subtask + object_poses={ subtask_object_name : src_ep_datagen_info.object_poses[subtask_object_name][subtask_start_ind : subtask_end_ind] } if (subtask_object_name is not None) else None, + # subtask termination signal is unused + subtask_term_signals=None, + target_pose=src_ep_datagen_info.target_pose[subtask_start_ind : subtask_end_ind], + gripper_action=src_ep_datagen_info.gripper_action[subtask_start_ind : subtask_end_ind], + )) + + # make selection strategy object + selection_strategy_obj = make_selection_strategy(selection_strategy_name) + + # run selection + if selection_strategy_kwargs is None: + selection_strategy_kwargs = dict() + selected_src_demo_ind = selection_strategy_obj.select_source_demo( + eef_pose=eef_pose, + object_pose=object_pose, + src_subtask_datagen_infos=src_subtask_datagen_infos, + **selection_strategy_kwargs, + ) + + return selected_src_demo_ind + + def generate( + self, + env, + env_interface, + select_src_per_subtask=False, + transform_first_robot_pose=False, + interpolate_from_last_target_pose=True, + render=False, + video_writer=None, + video_skip=5, + camera_names=None, + pause_subtask=False, + ): + """ + Attempt to generate a new demonstration. + + Args: + env (robomimic EnvBase instance): environment to use for data collection + + env_interface (MG_EnvInterface instance): environment interface for some data generation operations + + select_src_per_subtask (bool): if True, select a different source demonstration for each subtask + during data generation, else keep the same one for the entire episode + + transform_first_robot_pose (bool): if True, each subtask segment will consist of the first + robot pose and the target poses instead of just the target poses. Can sometimes help + improve data generation quality as the interpolation segment will interpolate to where + the robot started in the source segment instead of the first target pose. Note that the + first subtask segment of each episode will always include the first robot pose, regardless + of this argument. + + interpolate_from_last_target_pose (bool): if True, each interpolation segment will start from + the last target pose in the previous subtask segment, instead of the current robot pose. Can + sometimes improve data generation quality. + + render (bool): if True, render on-screen + + video_writer (imageio writer): video writer + + video_skip (int): determines rate at which environment frames are written to video + + camera_names (list): determines which camera(s) are used for rendering. Pass more than + one to output a video with multiple camera views concatenated horizontally. + + pause_subtask (bool): if True, pause after every subtask during generation, for + debugging. + + Returns: + results (dict): dictionary with the following items: + initial_state (dict): initial simulator state for the executed trajectory + states (list): simulator state at each timestep + observations (list): observation dictionary at each timestep + datagen_infos (list): datagen_info at each timestep + actions (np.array): action executed at each timestep + success (bool): whether the trajectory successfully solved the task or not + src_demo_inds (list): list of selected source demonstration indices for each subtask + src_demo_labels (np.array): same as @src_demo_inds, but repeated to have a label for each timestep of the trajectory + """ + + # sample new task instance + env.reset() + new_initial_state = env.get_state() + + # sample new subtask boundaries + all_subtask_inds = self.randomize_subtask_boundaries() # shape [N, S, 2], last dim is start and end action lengths + + # some state variables used during generation + selected_src_demo_ind = None + prev_executed_traj = None + + # save generated data in these variables + generated_states = [] + generated_obs = [] + generated_datagen_infos = [] + generated_actions = [] + generated_success = False + generated_src_demo_inds = [] # store selected src demo ind for each subtask in each trajectory + generated_src_demo_labels = [] # like @generated_src_demo_inds, but padded to align with size of @generated_actions + + for subtask_ind in range(len(self.task_spec)): + + # some things only happen on first subtask + is_first_subtask = (subtask_ind == 0) + + # get datagen info in current environment to get required info for selection (e.g. eef pose, object pose) + cur_datagen_info = env_interface.get_datagen_info() + + # name of object for this subtask + subtask_object_name = self.task_spec[subtask_ind]["object_ref"] + + # corresponding current object pose + cur_object_pose = cur_datagen_info.object_poses[subtask_object_name] if (subtask_object_name is not None) else None + + # We need source demonstration selection for the first subtask (always), and possibly for + # other subtasks if @select_src_per_subtask is set. + need_source_demo_selection = (is_first_subtask or select_src_per_subtask) + + # Run source demo selection or use selected demo from previous iteration + if need_source_demo_selection: + selected_src_demo_ind = self.select_source_demo( + eef_pose=cur_datagen_info.eef_pose, + object_pose=cur_object_pose, + subtask_ind=subtask_ind, + src_subtask_inds=all_subtask_inds[:, subtask_ind], + subtask_object_name=subtask_object_name, + selection_strategy_name=self.task_spec[subtask_ind]["selection_strategy"], + selection_strategy_kwargs=self.task_spec[subtask_ind]["selection_strategy_kwargs"], + ) + assert (selected_src_demo_ind is not None) + + # selected subtask segment time indices + selected_src_subtask_inds = all_subtask_inds[selected_src_demo_ind, subtask_ind] + + # get subtask segment, consisting of the sequence of robot eef poses, target poses, gripper actions + src_ep_datagen_info = self.src_dataset_infos[selected_src_demo_ind] + src_subtask_eef_poses = src_ep_datagen_info.eef_pose[selected_src_subtask_inds[0] : selected_src_subtask_inds[1]] + src_subtask_target_poses = src_ep_datagen_info.target_pose[selected_src_subtask_inds[0] : selected_src_subtask_inds[1]] + src_subtask_gripper_actions = src_ep_datagen_info.gripper_action[selected_src_subtask_inds[0] : selected_src_subtask_inds[1]] + + # get reference object pose from source demo + src_subtask_object_pose = src_ep_datagen_info.object_poses[subtask_object_name][selected_src_subtask_inds[0]] if (subtask_object_name is not None) else None + + if is_first_subtask or transform_first_robot_pose: + # Source segment consists of first robot eef pose and the target poses. This ensures that + # we will interpolate to the first robot eef pose in this source segment, instead of the + # first robot target pose. + src_eef_poses = np.concatenate([src_subtask_eef_poses[0:1], src_subtask_target_poses], axis=0) + else: + # Source segment consists of just the target poses. + src_eef_poses = np.array(src_subtask_target_poses) + + # account for extra timestep added to @src_eef_poses + src_subtask_gripper_actions = np.concatenate([src_subtask_gripper_actions[0:1], src_subtask_gripper_actions], axis=0) + + # Transform source demonstration segment using relevant object pose. + if subtask_object_name is not None: + transformed_eef_poses = PoseUtils.transform_source_data_segment_using_object_pose( + obj_pose=cur_object_pose, + src_eef_poses=src_eef_poses, + src_obj_pose=src_subtask_object_pose, + ) + else: + # skip transformation if no reference object is provided + transformed_eef_poses = src_eef_poses + + # We will construct a WaypointTrajectory instance to keep track of robot control targets + # that will be executed and then execute it. + traj_to_execute = WaypointTrajectory() + + if interpolate_from_last_target_pose and (not is_first_subtask): + # Interpolation segment will start from last target pose (which may not have been achieved). + assert prev_executed_traj is not None + last_waypoint = prev_executed_traj.last_waypoint + init_sequence = WaypointSequence(sequence=[last_waypoint]) + else: + # Interpolation segment will start from current robot eef pose. + init_sequence = WaypointSequence.from_poses( + poses=cur_datagen_info.eef_pose[None], + gripper_actions=src_subtask_gripper_actions[0:1], + action_noise=self.task_spec[subtask_ind]["action_noise"], + ) + traj_to_execute.add_waypoint_sequence(init_sequence) + + # Construct trajectory for the transformed segment. + transformed_seq = WaypointSequence.from_poses( + poses=transformed_eef_poses, + gripper_actions=src_subtask_gripper_actions, + action_noise=self.task_spec[subtask_ind]["action_noise"], + ) + transformed_traj = WaypointTrajectory() + transformed_traj.add_waypoint_sequence(transformed_seq) + + # Merge this trajectory into our trajectory using linear interpolation. + # Interpolation will happen from the initial pose (@init_sequence) to the first element of @transformed_seq. + traj_to_execute.merge( + transformed_traj, + num_steps_interp=self.task_spec[subtask_ind]["num_interpolation_steps"], + num_steps_fixed=self.task_spec[subtask_ind]["num_fixed_steps"], + action_noise=(float(self.task_spec[subtask_ind]["apply_noise_during_interpolation"]) * self.task_spec[subtask_ind]["action_noise"]), + ) + + # We initialized @traj_to_execute with a pose to allow @merge to handle linear interpolation + # for us. However, we can safely discard that first waypoint now, and just start by executing + # the rest of the trajectory (interpolation segment and transformed subtask segment). + traj_to_execute.pop_first() + + # Execute the trajectory and collect data. + exec_results = traj_to_execute.execute( + env=env, + env_interface=env_interface, + render=render, + video_writer=video_writer, + video_skip=video_skip, + camera_names=camera_names, + ) + + # check that trajectory is non-empty + if len(exec_results["states"]) > 0: + generated_states += exec_results["states"] + generated_obs += exec_results["observations"] + generated_datagen_infos += exec_results["datagen_infos"] + generated_actions.append(exec_results["actions"]) + generated_success = generated_success or exec_results["success"] + generated_src_demo_inds.append(selected_src_demo_ind) + generated_src_demo_labels.append(selected_src_demo_ind * np.ones((exec_results["actions"].shape[0], 1), dtype=int)) + + # remember last trajectory + prev_executed_traj = traj_to_execute + + if pause_subtask: + input("Pausing after subtask {} execution. Press any key to continue...".format(subtask_ind)) + + # merge numpy arrays + if len(generated_actions) > 0: + generated_actions = np.concatenate(generated_actions, axis=0) + generated_src_demo_labels = np.concatenate(generated_src_demo_labels, axis=0) + + results = dict( + initial_state=new_initial_state, + states=generated_states, + observations=generated_obs, + datagen_infos=generated_datagen_infos, + actions=generated_actions, + success=generated_success, + src_demo_inds=generated_src_demo_inds, + src_demo_labels=generated_src_demo_labels, + ) + return results diff --git a/mimicgen/datagen/datagen_info.py b/mimicgen/datagen/datagen_info.py new file mode 100644 index 0000000..a46b0b2 --- /dev/null +++ b/mimicgen/datagen/datagen_info.py @@ -0,0 +1,78 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Defines structure of information that is needed from an environment for data generation. +""" +import numpy as np +from copy import deepcopy + + +class DatagenInfo(object): + """ + Structure of information needed from an environment for data generation. To allow for + flexibility, not all information must be present. + """ + def __init__( + self, + eef_pose=None, + object_poses=None, + subtask_term_signals=None, + target_pose=None, + gripper_action=None, + ): + """ + Args: + eef_pose (np.array or None): robot end effector poses of shape [..., 4, 4] + object_poses (dict or None): dictionary mapping object name to object poses + of shape [..., 4, 4] + subtask_term_signals (dict or None): dictionary mapping subtask name to a binary + indicator (0 or 1) on whether subtask has been completed. Each value in the + dictionary could be an int, float, or np.array of shape [..., 1]. + target_pose (np.array or None): target end effector poses of shape [..., 4, 4] + gripper_action (np.array or None): gripper actions of shape [..., D] where D + is the dimension of the gripper actuation action for the robot arm + """ + self.eef_pose = None + if eef_pose is not None: + self.eef_pose = np.array(eef_pose) + + self.object_poses = None + if object_poses is not None: + self.object_poses = { k : np.array(object_poses[k]) for k in object_poses } + + self.subtask_term_signals = None + if subtask_term_signals is not None: + self.subtask_term_signals = dict() + for k in subtask_term_signals: + if isinstance(subtask_term_signals[k], float) or isinstance(subtask_term_signals[k], int): + self.subtask_term_signals[k] = subtask_term_signals[k] + else: + # only create numpy array if value is not a single value + self.subtask_term_signals[k] = np.array(subtask_term_signals[k]) + + self.target_pose = None + if target_pose is not None: + self.target_pose = np.array(target_pose) + + self.gripper_action = None + if gripper_action is not None: + self.gripper_action = np.array(gripper_action) + + def to_dict(self): + """ + Convert this instance to a dictionary containing the same information. + """ + ret = dict() + if self.eef_pose is not None: + ret["eef_pose"] = np.array(self.eef_pose) + if self.object_poses is not None: + ret["object_poses"] = deepcopy(self.object_poses) + if self.subtask_term_signals is not None: + ret["subtask_term_signals"] = deepcopy(self.subtask_term_signals) + if self.target_pose is not None: + ret["target_pose"] = np.array(self.target_pose) + if self.gripper_action is not None: + ret["gripper_action"] = np.array(self.gripper_action) + return ret diff --git a/mimicgen/datagen/selection_strategy.py b/mimicgen/datagen/selection_strategy.py new file mode 100644 index 0000000..68d81e9 --- /dev/null +++ b/mimicgen/datagen/selection_strategy.py @@ -0,0 +1,306 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Selection strategies used by MimicGen to select subtask segments from +source human demonstrations. +""" +import abc # for abstract base class definitions +import six # preserve metaclass compatibility between python 2 and 3 + +import numpy as np + +import mimicgen.utils.pose_utils as PoseUtils + + +# Global dictionary for remembering name to class mappings. +REGISTERED_SELECTION_STRATEGIES = {} + + +def make_selection_strategy(name, *args, **kwargs): + """ + Creates an instance of a selection strategy class, specified by @name, + which is used to look it up in the registry. + """ + assert_selection_strategy_exists(name) + return REGISTERED_SELECTION_STRATEGIES[name](*args, **kwargs) + + +def register_selection_strategy(cls): + """ + Register selection strategy class into global registry. + """ + ignore_classes = ["MG_SelectionStrategy"] + if cls.__name__ not in ignore_classes: + REGISTERED_SELECTION_STRATEGIES[cls.NAME] = cls + + +def assert_selection_strategy_exists(name): + """ + Allow easy way to check if selection strategy exists. + """ + if name not in REGISTERED_SELECTION_STRATEGIES: + raise Exception("assert_selection_strategy_exists: name {} not found. Make sure it is a registered selection strategy among {}".format(", ".join(REGISTERED_SELECTION_STRATEGIES))) + + +class MG_SelectionStrategyMeta(type): + """ + This metaclass adds selection strategy classes into the global registry. + """ + def __new__(meta, name, bases, class_dict): + cls = super(MG_SelectionStrategyMeta, meta).__new__(meta, name, bases, class_dict) + register_selection_strategy(cls) + return cls + + +@six.add_metaclass(MG_SelectionStrategyMeta) +class MG_SelectionStrategy(object): + """ + Defines methods and functions for selection strategies to implement. + """ + def __init__(self): + pass + + @property + @classmethod + def NAME(self): + """ + This name (str) will be used to register the selection strategy class in the global + registry. + """ + raise NotImplementedError + + @abc.abstractmethod + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ + raise NotImplementedError + + +class RandomStrategy(MG_SelectionStrategy): + """ + Pick source demonstration randomly. + """ + + # name for registering this class into registry + NAME = "random" + + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ + + # random selection + n_src_demo = len(src_subtask_datagen_infos) + return np.random.randint(0, n_src_demo) + + +class NearestNeighborObjectStrategy(MG_SelectionStrategy): + """ + Pick source demonstration to be the one with the closest object pose to the object + in the current scene. + """ + + # name for registering this class into registry + NAME = "nearest_neighbor_object" + + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + pos_weight=1., + rot_weight=1., + nn_k=3, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + pos_weight (float): weight on position for minimizing pose distance + rot_weight (float): weight on rotation for minimizing pose distance + nn_k (int): pick source demo index uniformly at randomly from the top @nn_k nearest neighbors + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ + + # collect object poses from start of subtask source segments into array of shape [N, 4, 4] + src_object_poses = [] + for di in src_subtask_datagen_infos: + src_obj_pose = list(di.object_poses.values()) + assert len(src_obj_pose) == 1 + # use object pose at start of subtask segment + src_object_poses.append(src_obj_pose[0][0]) + src_object_poses = np.array(src_object_poses) + + # split into positions and rotations + all_src_obj_pos, all_src_obj_rot = PoseUtils.unmake_pose(src_object_poses) + obj_pos, obj_rot = PoseUtils.unmake_pose(object_pose) + + # prepare for broadcasting + obj_pos = obj_pos.reshape(-1, 3) + obj_rot_T = obj_rot.T.reshape(-1, 3, 3) + + # pos dist is just L2 between positions + pos_dists = np.sqrt(((all_src_obj_pos - obj_pos) ** 2).sum(axis=-1)) + + # get angle (in axis-angle representation of delta rotation matrix) using the following formula + # (see http://www.boris-belousov.net/2016/12/01/quat-dist/) + + # batched matrix mult, [N, 3, 3] x [1, 3, 3] -> [N, 3, 3] + delta_R = np.matmul(all_src_obj_rot, obj_rot_T) + arc_cos_in = (np.trace(delta_R, axis1=-2, axis2=-1) - 1.) / 2. + arc_cos_in = np.clip(arc_cos_in, -1., 1.) # clip for numerical stability + rot_dists = np.arccos(arc_cos_in) + + # weight distances with coefficients + dists_to_minimize = pos_weight * pos_dists + rot_weight * rot_dists + + # clip top-k parameter to max possible value + nn_k = min(nn_k, len(dists_to_minimize)) + + # return one of the top-K nearest neighbors uniformly at random + rand_k = np.random.randint(0, nn_k) + top_k_neighbors_in_order = np.argsort(dists_to_minimize)[:nn_k] + return top_k_neighbors_in_order[rand_k] + + +class NearestNeighborRobotDistanceStrategy(MG_SelectionStrategy): + """ + Pick source demonstration to be the one that minimizes the distance the robot + end effector will need to travel from the current pose to the first pose + in the transformed segment. + """ + + # name for registering this class into registry + NAME = "nearest_neighbor_robot_distance" + + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + pos_weight=1., + rot_weight=1., + nn_k=3, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + pos_weight (float): weight on position for minimizing pose distance + rot_weight (float): weight on rotation for minimizing pose distance + nn_k (int): pick source demo index uniformly at randomly from the top @nn_k nearest neighbors + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ + + # collect eef and object poses from start of subtask source segments into arrays of shape [N, 4, 4] + src_eef_poses = [] + src_object_poses = [] + for di in src_subtask_datagen_infos: + # use eef pose at start of subtask segment + src_eef_poses.append(di.eef_pose[0]) + # use object pose at start of subtask segment + src_obj_pose = list(di.object_poses.values()) + assert len(src_obj_pose) == 1 + src_object_poses.append(src_obj_pose[0][0]) + src_eef_poses = np.array(src_eef_poses) + src_object_poses = np.array(src_object_poses) + + # Get source eef poses with respect to object frames. + # note: frame A is world, frame B is object + src_object_poses_inv = PoseUtils.pose_inv(src_object_poses) + src_eef_poses_in_obj = PoseUtils.pose_in_A_to_pose_in_B( + pose_in_A=src_eef_poses, + pose_A_in_B=src_object_poses_inv, + ) + + # Use this to find the first pose for the transformed subtask segment for each source demo. + # Note this is the same logic used in PoseUtils.transform_source_data_segment_using_object_pose + transformed_eef_poses = PoseUtils.pose_in_A_to_pose_in_B( + pose_in_A=src_eef_poses_in_obj, + pose_A_in_B=object_pose, + ) + + # split into positions and rotations + all_transformed_eef_pos, all_transformed_eef_rot = PoseUtils.unmake_pose(transformed_eef_poses) + eef_pos, eef_rot = PoseUtils.unmake_pose(eef_pose) + + # now measure distance from each of these transformed eef poses to our current eef pose + # and choose the source demo that minimizes this distance + + # prepare for broadcasting + eef_pos = eef_pos.reshape(-1, 3) + eef_rot_T = eef_rot.T.reshape(-1, 3, 3) + + # pos dist is just L2 between positions + pos_dists = np.sqrt(((all_transformed_eef_pos - eef_pos) ** 2).sum(axis=-1)) + + # get angle (in axis-angle representation of delta rotation matrix) using the following formula + # (see http://www.boris-belousov.net/2016/12/01/quat-dist/) + + # batched matrix mult, [N, 3, 3] x [1, 3, 3] -> [N, 3, 3] + delta_R = np.matmul(all_transformed_eef_rot, eef_rot_T) + arc_cos_in = (np.trace(delta_R, axis1=-2, axis2=-1) - 1.) / 2. + arc_cos_in = np.clip(arc_cos_in, -1., 1.) # clip for numerical stability + rot_dists = np.arccos(arc_cos_in) + + # weight distances with coefficients + dists_to_minimize = pos_weight * pos_dists + rot_weight * rot_dists + + # clip top-k parameter to max possible value + nn_k = min(nn_k, len(dists_to_minimize)) + + # return one of the top-K nearest neighbors uniformly at random + rand_k = np.random.randint(0, nn_k) + top_k_neighbors_in_order = np.argsort(dists_to_minimize)[:nn_k] + return top_k_neighbors_in_order[rand_k] diff --git a/mimicgen/datagen/waypoint.py b/mimicgen/datagen/waypoint.py new file mode 100644 index 0000000..fb1c534 --- /dev/null +++ b/mimicgen/datagen/waypoint.py @@ -0,0 +1,415 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A collection of classes used to represent waypoints and trajectories. +""" +import json +import numpy as np +from copy import deepcopy + +import mimicgen +import mimicgen.utils.pose_utils as PoseUtils + + +class Waypoint(object): + """ + Represents a single desired 6-DoF waypoint, along with corresponding gripper actuation for this point. + """ + def __init__(self, pose, gripper_action, noise=None): + """ + Args: + pose (np.array): 4x4 pose target for robot controller + gripper_action (np.array): gripper action for robot controller + noise (float or None): action noise amplitude to apply during execution at this timestep + (for arm actions, not gripper actions) + """ + self.pose = np.array(pose) + self.gripper_action = np.array(gripper_action) + self.noise = noise + assert len(self.gripper_action.shape) == 1 + + +class WaypointSequence(object): + """ + Represents a sequence of Waypoint objects. + """ + def __init__(self, sequence=None): + """ + Args: + sequence (list or None): if provided, should be an list of Waypoint objects + """ + if sequence is None: + self.sequence = [] + else: + for waypoint in sequence: + assert isinstance(waypoint, Waypoint) + self.sequence = deepcopy(sequence) + + @classmethod + def from_poses(cls, poses, gripper_actions, action_noise): + """ + Instantiate a WaypointSequence object given a sequence of poses, + gripper actions, and action noise. + + Args: + poses (np.array): sequence of pose matrices of shape (T, 4, 4) + gripper_actions (np.array): sequence of gripper actions + that should be applied at each timestep of shape (T, D). + action_noise (float or np.array): sequence of action noise + magnitudes that should be applied at each timestep. If a + single float is provided, the noise magnitude will be + constant over the trajectory. + """ + assert isinstance(action_noise, float) or isinstance(action_noise, np.ndarray) + + # handle scalar to numpy array conversion + num_timesteps = poses.shape[0] + if isinstance(action_noise, float): + action_noise = action_noise * np.ones((num_timesteps, 1)) + action_noise = action_noise.reshape(-1, 1) + + # make WaypointSequence instance + sequence = [ + Waypoint( + pose=poses[t], + gripper_action=gripper_actions[t], + noise=action_noise[t, 0], + ) + for t in range(num_timesteps) + ] + return cls(sequence=sequence) + + def __len__(self): + # length of sequence + return len(self.sequence) + + def __getitem__(self, ind): + """ + Returns waypoint at index. + + Returns: + waypoint (Waypoint instance) + """ + return self.sequence[ind] + + def __add__(self, other): + """ + Defines addition (concatenation) of sequences + """ + return WaypointSequence(sequence=(self.sequence + other.sequence)) + + @property + def last_waypoint(self): + """ + Return last waypoint in sequence. + + Returns: + waypoint (Waypoint instance) + """ + return deepcopy(self.sequence[-1]) + + def split(self, ind): + """ + Splits this sequence into 2 pieces, the part up to time index @ind, and the + rest. Returns 2 WaypointSequence objects. + """ + seq_1 = self.sequence[:ind] + seq_2 = self.sequence[ind:] + return WaypointSequence(sequence=seq_1), WaypointSequence(sequence=seq_2) + + +class WaypointTrajectory(object): + """ + A sequence of WaypointSequence objects that corresponds to a full 6-DoF trajectory. + """ + def __init__(self): + self.waypoint_sequences = [] + + def __len__(self): + # sum up length of all waypoint sequences + return sum(len(s) for s in self.waypoint_sequences) + + def __getitem__(self, ind): + """ + Returns waypoint at time index. + + Returns: + waypoint (Waypoint instance) + """ + assert len(self.waypoint_sequences) > 0 + assert (ind >= 0) and (ind < len(self)) + + # find correct waypoint sequence we should index + end_ind = 0 + for seq_ind in range(len(self.waypoint_sequences)): + start_ind = end_ind + end_ind += len(self.waypoint_sequences[seq_ind]) + if (ind >= start_ind) and (ind < end_ind): + break + + # index within waypoint sequence + return self.waypoint_sequences[seq_ind][ind - start_ind] + + @property + def last_waypoint(self): + """ + Return last waypoint in sequence. + + Returns: + waypoint (Waypoint instance) + """ + return self.waypoint_sequences[-1].last_waypoint + + def add_waypoint_sequence(self, sequence): + """ + Directly append sequence to list (no interpolation). + + Args: + sequence (WaypointSequence instance): sequence to add + """ + assert isinstance(sequence, WaypointSequence) + self.waypoint_sequences.append(sequence) + + def add_waypoint_sequence_for_target_pose( + self, + pose, + gripper_action, + num_steps, + skip_interpolation=False, + action_noise=0., + ): + """ + Adds a new waypoint sequence corresponding to a desired target pose. A new WaypointSequence + will be constructed consisting of @num_steps intermediate Waypoint objects. These can either + be constructed with linear interpolation from the last waypoint (default) or be a + constant set of target poses (set @skip_interpolation to True). + + Args: + pose (np.array): 4x4 target pose + + gripper_action (np.array): value for gripper action + + num_steps (int): number of action steps when trying to reach this waypoint. Will + add intermediate linearly interpolated points between the last pose on this trajectory + and the target pose, so that the total number of steps is @num_steps. + + skip_interpolation (bool): if True, keep the target pose fixed and repeat it @num_steps + times instead of using linearly interpolated targets. + + action_noise (float): scale of random gaussian noise to add during action execution (e.g. + when @execute is called) + """ + if (len(self.waypoint_sequences) == 0): + assert skip_interpolation, "cannot interpolate since this is the first waypoint sequence" + + if skip_interpolation: + # repeat the target @num_steps times + assert num_steps is not None + poses = np.array([pose for _ in range(num_steps)]) + gripper_actions = np.array([[gripper] for _ in range(num_steps)]) + else: + # linearly interpolate between the last pose and the new waypoint + last_waypoint = self.last_waypoint + poses, num_steps_2 = PoseUtils.interpolate_poses( + pose_1=last_waypoint.pose, + pose_2=pose, + num_steps=num_steps, + ) + assert num_steps == num_steps_2 + gripper_actions = np.array([gripper_action for _ in range(num_steps + 2)]) + # make sure to skip the first element of the new path, which already exists on the current trajectory path + poses = poses[1:] + gripper_actions = gripper_actions[1:] + + # add waypoint sequence for this set of poses + sequence = WaypointSequence.from_poses( + poses=poses, + gripper_actions=gripper_actions, + action_noise=action_noise, + ) + self.add_waypoint_sequence(sequence) + + def pop_first(self): + """ + Removes first waypoint in first waypoint sequence and returns it. If the first waypoint + sequence is now empty, it is also removed. + + Returns: + waypoint (Waypoint instance) + """ + first, rest = self.waypoint_sequences[0].split(1) + if len(rest) == 0: + # remove empty waypoint sequence + self.waypoint_sequences = self.waypoint_sequences[1:] + else: + # update first waypoint sequence + self.waypoint_sequences[0] = rest + return first + + def merge( + self, + other, + num_steps_interp=None, + num_steps_fixed=None, + action_noise=0., + ): + """ + Merge this trajectory with another (@other). + + Args: + other (WaypointTrajectory object): the other trajectory to merge into this one + + num_steps_interp (int or None): if not None, add a waypoint sequence that interpolates + between the end of the current trajectory and the start of @other + + num_steps_fixed (int or None): if not None, add a waypoint sequence that has constant + target poses corresponding to the first target pose in @other + + action_noise (float): noise to use during the interpolation segment + """ + need_interp = (num_steps_interp is not None) and (num_steps_interp > 0) + need_fixed = (num_steps_fixed is not None) and (num_steps_fixed > 0) + use_interpolation_segment = (need_interp or need_fixed) + + if use_interpolation_segment: + # pop first element of other trajectory + other_first = other.pop_first() + + # Get first target pose of other trajectory. + # The interpolated segment will include this first element as its last point. + target_for_interpolation = other_first[0] + + if need_interp: + # interpolation segment + self.add_waypoint_sequence_for_target_pose( + pose=target_for_interpolation.pose, + gripper_action=target_for_interpolation.gripper_action, + num_steps=num_steps_interp, + action_noise=action_noise, + skip_interpolation=False, + ) + + if need_fixed: + # segment of constant target poses equal to @other's first target pose + + # account for the fact that we pop'd the first element of @other in anticipation of an interpolation segment + num_steps_fixed_to_use = num_steps_fixed if need_interp else (num_steps_fixed + 1) + self.add_waypoint_sequence_for_target_pose( + pose=target_for_interpolation.pose, + gripper_action=target_for_interpolation.gripper_action, + num_steps=num_steps_fixed_to_use, + action_noise=action_noise, + skip_interpolation=True, + ) + + # make sure to preserve noise from first element of other trajectory + self.waypoint_sequences[-1][-1].noise = target_for_interpolation.noise + + # concatenate the trajectories + self.waypoint_sequences += other.waypoint_sequences + + def execute( + self, + env, + env_interface, + render=False, + video_writer=None, + video_skip=5, + camera_names=None, + ): + """ + Main function to execute the trajectory. Will use env_interface.target_pose_to_action to + convert each target pose at each waypoint to an action command, and pass that along to + env.step. + + Args: + env (robomimic EnvBase instance): environment to use for executing trajectory + env_interface (MG_EnvInterface instance): environment interface for executing trajectory + render (bool): if True, render on-screen + video_writer (imageio writer): video writer + video_skip (int): determines rate at which environment frames are written to video + camera_names (list): determines which camera(s) are used for rendering. Pass more than + one to output a video with multiple camera views concatenated horizontally. + + Returns: + results (dict): dictionary with the following items for the executed trajectory: + states (list): simulator state at each timestep + observations (list): observation dictionary at each timestep + datagen_infos (list): datagen_info at each timestep + actions (list): action executed at each timestep + success (bool): whether the trajectory successfully solved the task or not + """ + write_video = (video_writer is not None) + video_count = 0 + + states = [] + actions = [] + observations = [] + datagen_infos = [] + success = { k: False for k in env.is_success() } # success metrics + + # iterate over waypoint sequences + for seq in self.waypoint_sequences: + + # iterate over waypoints in each sequence + for j in range(len(seq)): + + # on-screen render + if render: + env.render(mode="human", camera_name=camera_names[0]) + + # video render + if write_video: + if video_count % video_skip == 0: + video_img = [] + for cam_name in camera_names: + video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) + video_img = np.concatenate(video_img, axis=1) # concatenate horizontally + video_writer.append_data(video_img) + video_count += 1 + + # current waypoint + waypoint = seq[j] + + # current state and obs + state = env.get_state()["states"] + obs = env.get_observation() + + # convert target pose to arm action + action_pose = env_interface.target_pose_to_action(target_pose=waypoint.pose) + + # maybe add noise to action + if waypoint.noise is not None: + action_pose += waypoint.noise * np.random.randn(*action_pose.shape) + action_pose = np.clip(action_pose, -1., 1.) + + # add in gripper action + play_action = np.concatenate([action_pose, waypoint.gripper_action], axis=0) + + # store datagen info too + datagen_info = env_interface.get_datagen_info(action=play_action) + + # step environment + env.step(play_action) + + # collect data + states.append(state) + play_action_record = play_action + actions.append(play_action_record) + observations.append(obs) + datagen_infos.append(datagen_info) + + cur_success_metrics = env.is_success() + for k in success: + success[k] = success[k] or cur_success_metrics[k] + + results = dict( + states=states, + observations=observations, + datagen_infos=datagen_infos, + actions=np.array(actions), + success=bool(success["task"]), + ) + return results diff --git a/mimicgen_envs/envs/__init__.py b/mimicgen/env_interfaces/__init__.py similarity index 57% rename from mimicgen_envs/envs/__init__.py rename to mimicgen/env_interfaces/__init__.py index 981400f..8edfcfd 100644 --- a/mimicgen_envs/envs/__init__.py +++ b/mimicgen/env_interfaces/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen/env_interfaces/base.py b/mimicgen/env_interfaces/base.py new file mode 100644 index 0000000..643b4aa --- /dev/null +++ b/mimicgen/env_interfaces/base.py @@ -0,0 +1,205 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Base class for environment interfaces used by MimicGen. Defines a set of +functions that should be implemented for every set of environments, and +a global registry. +""" +import abc # for abstract base class definitions +import six # preserve metaclass compatibility between python 2 and 3 + +import numpy as np + +from mimicgen.datagen.datagen_info import DatagenInfo + + +# Global dictionary for remembering name - class mappings. +# +# Organization: +# interface_type (str) +# class_name (str) +# class object +REGISTERED_ENV_INTERFACES = {} + + +def make_interface(name, interface_type, *args, **kwargs): + """ + Creates an instance of a env interface. Make sure to pass any other needed arguments. + """ + if interface_type not in REGISTERED_ENV_INTERFACES: + raise Exception("make_interface: interface type {} not found. Make sure it is a registered interface type among: {}".format(interface_type, ", ".join(REGISTERED_ENV_INTERFACES))) + if name not in REGISTERED_ENV_INTERFACES[interface_type]: + raise Exception("make_interface: interface name {} not found. Make sure it is a registered interface name among: {}".format(name, ', '.join(REGISTERED_ENV_INTERFACES[interface_type]))) + return REGISTERED_ENV_INTERFACES[interface_type][name](*args, **kwargs) + + +def register_env_interface(cls): + """ + Register environment interface class into global registry. + """ + ignore_classes = ["MG_EnvInterface"] + if cls.__name__ not in ignore_classes: + if cls.INTERFACE_TYPE not in REGISTERED_ENV_INTERFACES: + REGISTERED_ENV_INTERFACES[cls.INTERFACE_TYPE] = dict() + REGISTERED_ENV_INTERFACES[cls.INTERFACE_TYPE][cls.__name__] = cls + + +class MG_EnvInterfaceMeta(type): + """ + This metaclass adds env interface classes into the global registry. + """ + def __new__(meta, name, bases, class_dict): + cls = super(MG_EnvInterfaceMeta, meta).__new__(meta, name, bases, class_dict) + register_env_interface(cls) + return cls + + +@six.add_metaclass(MG_EnvInterfaceMeta) +class MG_EnvInterface(object): + """ + Environment interface API that MimicGen environment interfaces should conform to. + """ + def __init__(self, env): + """ + Args: + env: environment object + """ + self.env = env + self.interface_type = type(self).INTERFACE_TYPE + + def __repr__(self): + """ + Pretty-print env description. + """ + return self.__class__.__name__ + + """ + These should be filled out by simulator subclasses (e.g. robosuite). + """ + @property + @classmethod + def INTERFACE_TYPE(self): + """ + Returns string corresponding to interface type. This is used to group + all subclasses together in the interface registry (for example, all robosuite + interfaces) and helps avoid name conflicts. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_robot_eef_pose(self): + """ + Get current robot end effector pose. Should be the same frame as used by the robot end-effector controller. + + Returns: + pose (np.array): 4x4 eef pose matrix + """ + raise NotImplementedError + + @abc.abstractmethod + def target_pose_to_action(self, target_pose, relative=True): + """ + Takes a target pose for the end effector controller and returns an action + (usually a normalized delta pose action) to try and achieve that target pose. + + Args: + target_pose (np.array): 4x4 target eef pose + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + action (np.array): action compatible with env.step (minus gripper actuation) + """ + raise NotImplementedError + + @abc.abstractmethod + def action_to_target_pose(self, action, relative=True): + """ + Converts action (compatible with env.step) to a target pose for the end effector controller. + Inverse of @target_pose_to_action. Usually used to infer a sequence of target controller poses + from a demonstration trajectory using the recorded actions. + + Args: + action (np.array): environment action + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + target_pose (np.array): 4x4 target eef pose that @action corresponds to + """ + raise NotImplementedError + + @abc.abstractmethod + def action_to_gripper_action(self, action): + """ + Extracts the gripper actuation part of an action (compatible with env.step). + + Args: + action (np.array): environment action + + Returns: + gripper_action (np.array): subset of environment action for gripper actuation + """ + raise NotImplementedError + + """ + These should be filled out by each simulation domain (e.g. nut assembly, coffee). + """ + @abc.abstractmethod + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + raise NotImplementedError + + @abc.abstractmethod + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + raise NotImplementedError + + def get_datagen_info(self, action=None): + """ + Get information needed for data generation, at the current + timestep of simulation. If @action is provided, it will be used to + compute the target eef pose for the controller, otherwise that + will be excluded. + + Returns: + datagen_info (DatagenInfo instance) + """ + + # current eef pose + eef_pose = self.get_robot_eef_pose() + + # object poses + object_poses = self.get_object_poses() + + # subtask termination signals + subtask_term_signals = self.get_subtask_term_signals() + + # these must be extracted from provided action + target_pose = None + gripper_action = None + if action is not None: + target_pose = self.action_to_target_pose(action=action, relative=True) + gripper_action = self.action_to_gripper_action(action=action) + + datagen_info = DatagenInfo( + eef_pose=eef_pose, + object_poses=object_poses, + subtask_term_signals=subtask_term_signals, + target_pose=target_pose, + gripper_action=gripper_action, + ) + return datagen_info diff --git a/mimicgen/env_interfaces/robosuite.py b/mimicgen/env_interfaces/robosuite.py new file mode 100644 index 0000000..a0d77fd --- /dev/null +++ b/mimicgen/env_interfaces/robosuite.py @@ -0,0 +1,749 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +MimicGen environment interface classes for basic robosuite environments. +""" +import numpy as np + +import robosuite +import robosuite.utils.transform_utils as T + +import mimicgen.utils.pose_utils as PoseUtils +from mimicgen.env_interfaces.base import MG_EnvInterface + + +class RobosuiteInterface(MG_EnvInterface): + """ + MimicGen environment interface base class for basic robosuite environments. + """ + + # Note: base simulator interface class must fill out interface type as a class property + INTERFACE_TYPE = "robosuite" + + def get_robot_eef_pose(self): + """ + Get current robot end effector pose. Should be the same frame as used by the robot end-effector controller. + + Returns: + pose (np.array): 4x4 eef pose matrix + """ + + # OSC control frame is a MuJoCo site - just retrieve its current pose + return self.get_object_pose( + obj_name=self.env.robots[0].controller.eef_name, + obj_type="site", + ) + + def target_pose_to_action(self, target_pose, relative=True): + """ + Takes a target pose for the end effector controller and returns an action + (usually a normalized delta pose action) to try and achieve that target pose. + + Args: + target_pose (np.array): 4x4 target eef pose + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + action (np.array): action compatible with env.step (minus gripper actuation) + """ + + # version check for robosuite - must be v1.2+, so that we're using the correct controller convention + assert (robosuite.__version__.split(".")[0] == "1") + assert (robosuite.__version__.split(".")[1] >= "2") + + # target position and rotation + target_pos, target_rot = PoseUtils.unmake_pose(target_pose) + + # current position and rotation + curr_pose = self.get_robot_eef_pose() + curr_pos, curr_rot = PoseUtils.unmake_pose(curr_pose) + + # get maximum position and rotation action bounds + max_dpos = self.env.robots[0].controller.output_max[0] + max_drot = self.env.robots[0].controller.output_max[3] + + if relative: + # normalized delta position action + delta_position = target_pos - curr_pos + delta_position = np.clip(delta_position / max_dpos, -1., 1.) + + # normalized delta rotation action + delta_rot_mat = target_rot.dot(curr_rot.T) + delta_quat = T.mat2quat(delta_rot_mat) + delta_rotation = T.quat2axisangle(delta_quat) + delta_rotation = np.clip(delta_rotation / max_drot, -1., 1.) + return np.concatenate([delta_position, delta_rotation]) + + # absolute position and rotation action + target_quat = T.mat2quat(target_rot) + abs_rotation = T.quat2axisangle(target_quat) + return np.concatenate([target_pos, abs_rotation]) + + def action_to_target_pose(self, action, relative=True): + """ + Converts action (compatible with env.step) to a target pose for the end effector controller. + Inverse of @target_pose_to_action. Usually used to infer a sequence of target controller poses + from a demonstration trajectory using the recorded actions. + + Args: + action (np.array): environment action + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + target_pose (np.array): 4x4 target eef pose that @action corresponds to + """ + + # version check for robosuite - must be v1.2+, so that we're using the correct controller convention + assert (robosuite.__version__.split(".")[0] == "1") + assert (robosuite.__version__.split(".")[1] >= "2") + + if (not relative): + # convert absolute action to absolute pose + target_pos = action[:3] + target_quat = T.axisangle2quat(action[3:6]) + target_rot = T.quat2mat(target_quat) + else: + # get maximum position and rotation action bounds + max_dpos = self.env.robots[0].controller.output_max[0] + max_drot = self.env.robots[0].controller.output_max[3] + + # unscale actions + delta_position = action[:3] * max_dpos + delta_rotation = action[3:6] * max_drot + + # current position and rotation + curr_pose = self.get_robot_eef_pose() + curr_pos, curr_rot = PoseUtils.unmake_pose(curr_pose) + + # get pose target + target_pos = curr_pos + delta_position + delta_quat = T.axisangle2quat(delta_rotation) + delta_rot_mat = T.quat2mat(delta_quat) + target_rot = delta_rot_mat.dot(curr_rot) + + target_pose = PoseUtils.make_pose(target_pos, target_rot) + return target_pose + + def action_to_gripper_action(self, action): + """ + Extracts the gripper actuation part of an action (compatible with env.step). + + Args: + action (np.array): environment action + + Returns: + gripper_action (np.array): subset of environment action for gripper actuation + """ + + # last dimension is gripper action + return action[-1:] + + # robosuite-specific helper method for getting object poses + def get_object_pose(self, obj_name, obj_type): + """ + Returns 4x4 object pose given the name of the object and the type. + + Args: + obj_name (str): name of object + obj_type (str): type of object - either "body", "geom", or "site" + + Returns: + obj_pose (np.array): 4x4 object pose + """ + assert obj_type in ["body", "geom", "site"] + + if obj_type == "body": + obj_id = self.env.sim.model.body_name2id(obj_name) + obj_pos = np.array(self.env.sim.data.body_xpos[obj_id]) + obj_rot = np.array(self.env.sim.data.body_xmat[obj_id].reshape(3, 3)) + elif obj_type == "geom": + obj_id = self.env.sim.model.geom_name2id(obj_name) + obj_pos = np.array(self.env.sim.data.geom_xpos[obj_id]) + obj_rot = np.array(self.env.sim.data.geom_xmat[obj_id].reshape(3, 3)) + elif obj_type == "site": + obj_id = self.env.sim.model.site_name2id(obj_name) + obj_pos = np.array(self.env.sim.data.site_xpos[obj_id]) + obj_rot = np.array(self.env.sim.data.site_xmat[obj_id].reshape(3, 3)) + + return PoseUtils.make_pose(obj_pos, obj_rot) + + +class MG_Coffee(RobosuiteInterface): + """ + Corresponds to robosuite Coffee task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - coffee pod and coffee machine + return dict( + coffee_pod=self.get_object_pose(obj_name=self.env.coffee_pod.root_body, obj_type="body"), + coffee_machine=self.get_object_pose(obj_name=self.env.coffee_machine.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + metrics = self.env._get_partial_task_metrics() + + # first subtask is grasping coffee pod (motion relative to pod) + signals["grasp"] = int(metrics["grasp"]) + + # final subtask is inserting pod into machine and closing the lid (motion relative to machine) - but final subtask signal is not needed + return signals + + +class MG_Threading(RobosuiteInterface): + """ + Corresponds to robosuite Threading task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - needle and tripod + return dict( + needle=self.get_object_pose(obj_name=self.env.needle.root_body, obj_type="body"), + tripod=self.get_object_pose(obj_name=self.env.tripod.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is grasping needle (motion relative to needle) + signals["grasp"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.needle.contact_geoms]) + ) + + # final subtask is inserting needle into tripod (motion relative to tripod) - but final subtask signal is not needed + return signals + + +class MG_ThreePieceAssembly(RobosuiteInterface): + """ + Corresponds to robosuite ThreePieceAssembly task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # three relevant objects - base piece, piece_1, piece_2 + return dict( + base=self.get_object_pose(obj_name=self.env.base.root_body, obj_type="body"), + piece_1=self.get_object_pose(obj_name=self.env.piece_1.root_body, obj_type="body"), + piece_2=self.get_object_pose(obj_name=self.env.piece_2.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + metrics = self.env._get_partial_task_metrics() + + # first subtask is grasping piece_1 (motion relative to piece_1) + signals["grasp_1"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.piece_1.contact_geoms]) + ) + + # second subtask is inserting piece_1 into the base (motion relative to base) + signals["insert_1"] = int(metrics["first_piece_assembled"]) + + # third subtask is grasping piece_2 (motion relative to piece_2) + signals["grasp_2"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.piece_2.contact_geoms]) + ) + + # final subtask is inserting piece_2 into piece_1 (motion relative to piece_1) - but final subtask signal is not needed + return signals + + +class MG_Square(RobosuiteInterface): + """ + Corresponds to robosuite Square task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - square nut and peg + return dict( + square_nut=self.get_object_pose(obj_name=self.env.nuts[self.env.nut_to_id["square"]].root_body, obj_type="body"), + square_peg=self.get_object_pose(obj_name="peg1", obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is grasping square nut (motion relative to square nut) + signals["grasp"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.nuts[self.env.nut_to_id["square"]].contact_geoms]) + ) + + # final subtask is inserting square nut onto square peg (motion relative to square peg) - but final subtask signal is not needed + return signals + + +class MG_Stack(RobosuiteInterface): + """ + Corresponds to robosuite Stack task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - cubeA and cubeB + return dict( + cubeA=self.get_object_pose(obj_name=self.env.cubeA.root_body, obj_type="body"), + cubeB=self.get_object_pose(obj_name=self.env.cubeB.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is grasping cubeA (motion relative to cubeA) + signals["grasp"] = int(self.env._check_grasp(gripper=self.env.robots[0].gripper, object_geoms=self.env.cubeA)) + + # final subtask is placing cubeA on cubeB (motion relative to cubeB) - but final subtask signal is not needed + return signals + + +class MG_StackThree(RobosuiteInterface): + """ + Corresponds to robosuite StackThree task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # three relevant objects - three cubes + return dict( + cubeA=self.get_object_pose(obj_name=self.env.cubeA.root_body, obj_type="body"), + cubeB=self.get_object_pose(obj_name=self.env.cubeB.root_body, obj_type="body"), + cubeC=self.get_object_pose(obj_name=self.env.cubeC.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is grasping cubeA (motion relative to cubeA) + signals["grasp_1"] = int(self.env._check_grasp(gripper=self.env.robots[0].gripper, object_geoms=self.env.cubeA)) + + # second subtask is placing cubeA on cubeB (motion relative to cubeB) + signals["stack_1"] = int(self.env._check_cubeA_stacked()) + + # third subtask is grasping cubeC (motion relative to cubeC) + signals["grasp_2"] = int(self.env._check_grasp(gripper=self.env.robots[0].gripper, object_geoms=self.env.cubeC)) + + # final subtask is placing cubeC on cubeA (motion relative to cubeA) - but final subtask signal is not needed + return signals + + +class MG_HammerCleanup(RobosuiteInterface): + """ + Corresponds to robosuite HammerCleanup task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - hammer and drawer + return dict( + hammer=self.get_object_pose(obj_name=self.env.sorting_object.root_body, obj_type="body"), + drawer=self.get_object_pose(obj_name=self.env.cabinet_object.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is opening the drawer (motion relative to drawer) + # check that drawer is open enough and end effector is far enough from drawer after opening it + drawer_pos, _ = PoseUtils.unmake_pose(self.get_object_pose(obj_name="CabinetObject_drawer_link", obj_type="body")) + eef_pos, _ = PoseUtils.unmake_pose(self.get_robot_eef_pose()) + eef_drawer_dist = np.linalg.norm(eef_pos - drawer_pos) + signals["open"] = int( + (self.env.sim.data.qpos[self.env.cabinet_qpos_addrs] < -0.10) and (eef_drawer_dist > 0.24) + ) + + # second subtask is grasping the hammer (motion relative to hammer) + signals["grasp"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.sorting_object.contact_geoms] + )) + + # final subtask is placing the hammer into the drawer and closing the drawer (motion relative to drawer) - but final subtask signal not needed + return signals + + +class MG_MugCleanup(RobosuiteInterface): + """ + Corresponds to robosuite MugCleanup task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # two relevant objects - mug and drawer + return dict( + object=self.get_object_pose(obj_name=self.env.cleanup_object.root_body, obj_type="body"), + drawer=self.get_object_pose(obj_name=self.env.drawer.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is opening the drawer (motion relative to drawer) + # check that drawer is open enough and end effector is far enough from drawer after opening it + drawer_pos, _ = PoseUtils.unmake_pose(self.get_object_pose(obj_name="DrawerObject_drawer_link", obj_type="body")) + eef_pos, _ = PoseUtils.unmake_pose(self.get_robot_eef_pose()) + eef_drawer_dist = np.linalg.norm(eef_pos - drawer_pos) + signals["open"] = int( + (self.env.sim.data.qpos[self.env.drawer_qpos_addr] < -0.10) and (eef_drawer_dist > 0.24) + ) + + # second subtask is grasping the mug (motion relative to mug) + signals["grasp"] = int(self.env._check_grasp_tolerant( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.cleanup_object.contact_geoms] + )) + + # final subtask is placing the mug into the drawer and closing the drawer (motion relative to drawer) - but final subtask signal not needed + return signals + + +class MG_NutAssembly(RobosuiteInterface): + """ + Corresponds to robosuite NutAssembly task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # four relevant objects - square and round nuts and pegs + return dict( + square_nut=self.get_object_pose(obj_name=self.env.nuts[self.env.nut_to_id["square"]].root_body, obj_type="body"), + round_nut=self.get_object_pose(obj_name=self.env.nuts[self.env.nut_to_id["round"]].root_body, obj_type="body"), + square_peg=self.get_object_pose(obj_name="peg1", obj_type="body"), + round_peg=self.get_object_pose(obj_name="peg2", obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # checks which objects are on their correct pegs and records them in @self.objects_on_pegs + self.env._check_success() + + # first subtask is grasping square nut (motion relative to square nut) + signals["grasp_square_nut"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.nuts[self.env.nut_to_id["square"]].contact_geoms]) + ) + + # second subtask is inserting square nut onto square peg (motion relative to square peg) + signals["insert_square_nut"] = int(self.env.objects_on_pegs[self.env.nut_to_id["square"]]) + + # third subtask is grasping round nut (motion relative to round nut) + signals["grasp_round_nut"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.nuts[self.env.nut_to_id["round"]].contact_geoms]) + ) + + # final subtask is inserting round nut onto round peg (motion relative to round peg) - but final subtask signal is not needed + return signals + + +class MG_PickPlace(RobosuiteInterface): + """ + Corresponds to robosuite PickPlace task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # four relevant objects - milk, bread, cereal, can + object_poses = dict() + for obj_name in self.env.object_to_id: + obj = self.env.objects[self.env.object_to_id[obj_name]] + object_poses[obj_name] = self.get_object_pose(obj_name=obj.root_body, obj_type="body") + return object_poses + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # checks which objects are in their correct bins and records them in @self.objects_in_bins + self.env._check_success() + + object_names_in_order = ["milk", "cereal", "bread", "can"] + assert set(self.env.object_to_id.keys()) == set(object_names_in_order) + n_obj = len(object_names_in_order) + + # each subtask is a grasp and then a place + for i, obj_name in enumerate(object_names_in_order): + obj_id = self.env.object_to_id[obj_name] + + # first subtask for each object is grasping (motion relative to the object) + signals["grasp_{}".format(obj_name)] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.objects[obj_id].contact_geoms]) + ) + + # skip final subtask - unneeded + if i < (n_obj - 1): + # second subtask for each object is placement into bin (motion relative to bin) + signals["place_{}".format(obj_name)] = int(self.env.objects_in_bins[obj_id]) + + return signals + + +class MG_Kitchen(RobosuiteInterface): + """ + Corresponds to robosuite Kitchen task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # five relevant objects - bread, pot, stove, button, and serving region + return dict( + bread=self.get_object_pose(obj_name=self.env.bread_ingredient.root_body, obj_type="body"), + pot=self.get_object_pose(obj_name=self.env.pot_object.root_body, obj_type="body"), + stove=self.get_object_pose(obj_name=self.env.stove_object_1.root_body, obj_type="body"), + button=self.get_object_pose(obj_name=self.env.button_object_1.root_body, obj_type="body"), + serving_region=self.get_object_pose(obj_name=self.env.serving_region.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is to flip the switch to turn stove on (motion relative to button) + signals["stove_on"] = int(self.env.buttons_on[1]) + + # second subtask is to grasp pot (motion relative to pot) + grasped_pot = self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.pot_object.contact_geoms] + ) + signals["grasp_pot"] = int(grasped_pot) + + # third subtask is to place pot on stove (motion relative to stove) + + # check for pot-stove contact and that hand is not grasping pot + pot_bottom_in_contact_with_stove = self.env.check_contact("PotObject_body_0", "Stove1_collision_burner") + signals["place_pot_on_stove"] = int(pot_bottom_in_contact_with_stove and not grasped_pot) + + # fourth subtask is to grasp bread (motion relative to bread) + signals["grasp_bread"] = int(self.env._check_grasp( + gripper=self.env.robots[0].gripper, + object_geoms=[g for g in self.env.bread_ingredient.contact_geoms] + )) + + # fifth subtask is to place bread in pot and grasp pot (motion relative to pot) + signals["place_bread_in_pot"] = int(self.env.check_contact(self.env.bread_ingredient, self.env.pot_object) and grasped_pot) + + # sixth subtask is to place pot in front of serving region and then push it into the serving region (motion relative to serving region) + pot_pos = self.env.sim.data.body_xpos[self.env.pot_object_id] + serving_region_pos = self.env.sim.data.body_xpos[self.env.serving_region_id] + dist_serving_pot = serving_region_pos - pot_pos + pot_in_serving_region = np.abs(dist_serving_pot[0]) < 0.05 and np.abs(dist_serving_pot[1]) < 0.10 and np.abs(dist_serving_pot[2]) < 0.05 + signals["serve"] = int(pot_in_serving_region) + + # final subtask is to turn off the stove (motion relative to button) - but final subtask signal not needed + return signals + + +class MG_CoffeePreparation(RobosuiteInterface): + """ + Corresponds to robosuite CoffeePreparation task and variants. + """ + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # four relevant objects - coffee pod, coffee machine, drawer, and mug + return dict( + coffee_pod=self.get_object_pose(obj_name=self.env.coffee_pod.root_body, obj_type="body"), + coffee_machine=self.get_object_pose(obj_name=self.env.coffee_machine.root_body, obj_type="body"), + drawer=self.get_object_pose(obj_name=self.env.cabinet_object.root_body, obj_type="body"), + mug=self.get_object_pose(obj_name=self.env.mug.root_body, obj_type="body"), + ) + + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + metrics = self.env._get_partial_task_metrics() + + # first subtask is grasping mug (motion relative to mug) + signals["mug_grasp"] = int(metrics["mug_grasp"]) + + # second subtask is placing the mug on the coffee machine base and then opening the lid (motion relative to coffee machine) + signals["mug_place"] = int(self.env._check_mug_placement() and (self.env.sim.data.qpos[self.env.hinge_qpos_addr] > 2.08)) + + # third subtask is opening the drawer (motion relative to drawer) + signals["drawer_open"] = int(self.env.sim.data.qpos[self.env.cabinet_qpos_addr] < -0.19) + + # fourth subtask is grasping the coffee pod (motion relative to coffee pod) + signals["pod_grasp"] = int(metrics["grasp"]) + + # final subtask is inserting pod into machine and closing the lid (motion relative to machine) - but final subtask signal is not needed + return signals diff --git a/mimicgen_envs/envs/robosuite/__init__.py b/mimicgen/envs/__init__.py similarity index 57% rename from mimicgen_envs/envs/robosuite/__init__.py rename to mimicgen/envs/__init__.py index 981400f..8edfcfd 100644 --- a/mimicgen_envs/envs/robosuite/__init__.py +++ b/mimicgen/envs/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen_envs/scripts/__init__.py b/mimicgen/envs/robosuite/__init__.py similarity index 57% rename from mimicgen_envs/scripts/__init__.py rename to mimicgen/envs/robosuite/__init__.py index 981400f..8edfcfd 100644 --- a/mimicgen_envs/scripts/__init__.py +++ b/mimicgen/envs/robosuite/__init__.py @@ -1,3 +1,3 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen_envs/envs/robosuite/coffee.py b/mimicgen/envs/robosuite/coffee.py similarity index 99% rename from mimicgen_envs/envs/robosuite/coffee.py rename to mimicgen/envs/robosuite/coffee.py index 3782f30..9ff85f2 100644 --- a/mimicgen_envs/envs/robosuite/coffee.py +++ b/mimicgen/envs/robosuite/coffee.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -17,9 +17,9 @@ from robosuite.utils.placement_samplers import SequentialCompositeSampler, UniformRandomSampler from robosuite.utils.observables import Observable, sensor -import mimicgen_envs -from mimicgen_envs.models.robosuite.objects import BlenderObject, CoffeeMachinePodObject, CoffeeMachineObject, LongDrawerObject, CupObject -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +import mimicgen +from mimicgen.models.robosuite.objects import BlenderObject, CoffeeMachinePodObject, CoffeeMachineObject, LongDrawerObject, CupObject +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class Coffee(SingleArmEnv_MG): @@ -858,7 +858,7 @@ def _get_mug_model(self): """ shapenet_id = "3143a4ac" # beige round mug, works well and matches color scheme of other assets shapenet_scale = 1.0 - base_mjcf_path = os.path.join(mimicgen_envs.__path__[0], "models/robosuite/assets/shapenet_core/mugs") + base_mjcf_path = os.path.join(mimicgen.__path__[0], "models/robosuite/assets/shapenet_core/mugs") mjcf_path = os.path.join(base_mjcf_path, "{}/model.xml".format(shapenet_id)) self.mug = BlenderObject( diff --git a/mimicgen_envs/envs/robosuite/hammer_cleanup.py b/mimicgen/envs/robosuite/hammer_cleanup.py similarity index 98% rename from mimicgen_envs/envs/robosuite/hammer_cleanup.py rename to mimicgen/envs/robosuite/hammer_cleanup.py index f25867a..6fb99be 100644 --- a/mimicgen_envs/envs/robosuite/hammer_cleanup.py +++ b/mimicgen/envs/robosuite/hammer_cleanup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -26,9 +26,9 @@ import robosuite_task_zoo from robosuite_task_zoo.environments.manipulation.hammer_place import HammerPlaceEnv -import mimicgen_envs -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG -from mimicgen_envs.models.robosuite.objects import DrawerObject +import mimicgen +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +from mimicgen.models.robosuite.objects import DrawerObject class HammerCleanup_D0(HammerPlaceEnv, SingleArmEnv_MG): diff --git a/mimicgen_envs/envs/robosuite/kitchen.py b/mimicgen/envs/robosuite/kitchen.py similarity index 98% rename from mimicgen_envs/envs/robosuite/kitchen.py rename to mimicgen/envs/robosuite/kitchen.py index 3d2b748..e08a0fa 100644 --- a/mimicgen_envs/envs/robosuite/kitchen.py +++ b/mimicgen/envs/robosuite/kitchen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -26,8 +26,8 @@ from robosuite_task_zoo.environments.manipulation.kitchen import KitchenEnv from robosuite_task_zoo.models.kitchen import PotObject, StoveObject, ButtonObject, ServingRegionObject -import mimicgen_envs -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +import mimicgen +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class StoveObjectNew(StoveObject): @@ -67,7 +67,7 @@ class ServingRegionObjectNew(MujocoXMLObject): def __init__(self, name, joints=None): # our custom serving region xml - turn site into visual-only geom so that it shows up on env reset (instead # of after first env step) - path_to_serving_region_xml = os.path.join(mimicgen_envs.__path__[0], "models/robosuite/assets/objects/serving_region.xml") + path_to_serving_region_xml = os.path.join(mimicgen.__path__[0], "models/robosuite/assets/objects/serving_region.xml") super().__init__(path_to_serving_region_xml, name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) diff --git a/mimicgen_envs/envs/robosuite/mug_cleanup.py b/mimicgen/envs/robosuite/mug_cleanup.py similarity index 98% rename from mimicgen_envs/envs/robosuite/mug_cleanup.py rename to mimicgen/envs/robosuite/mug_cleanup.py index 3e84a88..e2d4e27 100644 --- a/mimicgen_envs/envs/robosuite/mug_cleanup.py +++ b/mimicgen/envs/robosuite/mug_cleanup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -22,9 +22,9 @@ from robosuite.utils.placement_samplers import SequentialCompositeSampler, UniformRandomSampler from robosuite.utils.observables import Observable, sensor -import mimicgen_envs -from mimicgen_envs.models.robosuite.objects import BlenderObject, DrawerObject, LongDrawerObject -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +import mimicgen +from mimicgen.models.robosuite.objects import BlenderObject, DrawerObject, LongDrawerObject +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class MugCleanup(SingleArmEnv_MG): @@ -358,7 +358,7 @@ def _get_object_model(self): """ Allow subclasses to override which object to pack into drawer - should load into @self.cleanup_object. """ - base_mjcf_path = os.path.join(mimicgen_envs.__path__[0], "models/robosuite/assets/shapenet_core/mugs") + base_mjcf_path = os.path.join(mimicgen.__path__[0], "models/robosuite/assets/shapenet_core/mugs") mjcf_path = os.path.join(base_mjcf_path, "{}/model.xml".format(self._shapenet_id)) self.cleanup_object = BlenderObject( @@ -680,7 +680,7 @@ def __init__( ("e94e46bc", 0.8), # dark blue cylindrical mug ("fad118b3", 0.66666667), # tall green cylindrical mug ] - self._base_mjcf_path = os.path.join(mimicgen_envs.__path__[0], "models/robosuite/assets/shapenet_core/mugs") + self._base_mjcf_path = os.path.join(mimicgen.__path__[0], "models/robosuite/assets/shapenet_core/mugs") super(MugCleanup_O2, self).__init__(shapenet_id=None, shapenet_scale=None, **kwargs) def _get_object_model(self): diff --git a/mimicgen_envs/envs/robosuite/nut_assembly.py b/mimicgen/envs/robosuite/nut_assembly.py similarity index 99% rename from mimicgen_envs/envs/robosuite/nut_assembly.py rename to mimicgen/envs/robosuite/nut_assembly.py index d93425e..3f3c359 100644 --- a/mimicgen_envs/envs/robosuite/nut_assembly.py +++ b/mimicgen/envs/robosuite/nut_assembly.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -16,7 +16,7 @@ from robosuite.utils.mjcf_utils import array_to_string, string_to_array, find_elements from robosuite.utils import RandomizationError -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class NutAssembly_D0(NutAssembly, SingleArmEnv_MG): diff --git a/mimicgen_envs/envs/robosuite/pick_place.py b/mimicgen/envs/robosuite/pick_place.py similarity index 89% rename from mimicgen_envs/envs/robosuite/pick_place.py rename to mimicgen/envs/robosuite/pick_place.py index 233cf9b..d9308df 100644 --- a/mimicgen_envs/envs/robosuite/pick_place.py +++ b/mimicgen/envs/robosuite/pick_place.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/envs/robosuite/single_arm_env_mg.py b/mimicgen/envs/robosuite/single_arm_env_mg.py similarity index 93% rename from mimicgen_envs/envs/robosuite/single_arm_env_mg.py rename to mimicgen/envs/robosuite/single_arm_env_mg.py index 0d65052..38643f2 100644 --- a/mimicgen_envs/envs/robosuite/single_arm_env_mg.py +++ b/mimicgen/envs/robosuite/single_arm_env_mg.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -14,7 +14,7 @@ except ImportError: pass -import mimicgen_envs +import mimicgen class SingleArmEnv_MG(SingleArmEnv): @@ -57,11 +57,11 @@ def edit_model_xml(self, xml_str): new_path = "/".join(new_path_split) elem.set("file", new_path) - # replace all paths to mimicgen_envs assets - check_lst = [loc for loc, val in enumerate(old_path_split) if val == "mimicgen_envs"] + # replace all paths to mimicgen assets + check_lst = [loc for loc, val in enumerate(old_path_split) if val == "mimicgen"] if len(check_lst) > 0: ind = max(check_lst) # last occurrence index - new_path_split = os.path.split(mimicgen_envs.__file__)[0].split("/") + old_path_split[ind + 1 :] + new_path_split = os.path.split(mimicgen.__file__)[0].split("/") + old_path_split[ind + 1 :] new_path = "/".join(new_path_split) elem.set("file", new_path) diff --git a/mimicgen_envs/envs/robosuite/stack.py b/mimicgen/envs/robosuite/stack.py similarity index 99% rename from mimicgen_envs/envs/robosuite/stack.py rename to mimicgen/envs/robosuite/stack.py index ae9c585..6009e4f 100644 --- a/mimicgen_envs/envs/robosuite/stack.py +++ b/mimicgen/envs/robosuite/stack.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -16,7 +16,7 @@ from robosuite.utils.observables import Observable, sensor from robosuite.environments.manipulation.stack import Stack -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class Stack_D0(Stack, SingleArmEnv_MG): diff --git a/mimicgen_envs/envs/robosuite/threading.py b/mimicgen/envs/robosuite/threading.py similarity index 99% rename from mimicgen_envs/envs/robosuite/threading.py rename to mimicgen/envs/robosuite/threading.py index c6c76c7..622b541 100644 --- a/mimicgen_envs/envs/robosuite/threading.py +++ b/mimicgen/envs/robosuite/threading.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -13,8 +13,8 @@ from robosuite.utils.placement_samplers import SequentialCompositeSampler, UniformRandomSampler from robosuite.utils.observables import Observable, sensor -from mimicgen_envs.models.robosuite.objects import NeedleObject, RingTripodObject -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +from mimicgen.models.robosuite.objects import NeedleObject, RingTripodObject +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class Threading(SingleArmEnv_MG): diff --git a/mimicgen_envs/envs/robosuite/three_piece_assembly.py b/mimicgen/envs/robosuite/three_piece_assembly.py similarity index 99% rename from mimicgen_envs/envs/robosuite/three_piece_assembly.py rename to mimicgen/envs/robosuite/three_piece_assembly.py index 71111ed..173524b 100644 --- a/mimicgen_envs/envs/robosuite/three_piece_assembly.py +++ b/mimicgen/envs/robosuite/three_piece_assembly.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -16,8 +16,8 @@ from robosuite.utils.observables import Observable, sensor from robosuite.utils.mjcf_utils import CustomMaterial, find_elements, string_to_array -from mimicgen_envs.models.robosuite.objects import BoxPatternObject -from mimicgen_envs.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG +from mimicgen.models.robosuite.objects import BoxPatternObject +from mimicgen.envs.robosuite.single_arm_env_mg import SingleArmEnv_MG class ThreePieceAssembly(SingleArmEnv_MG): diff --git a/mimicgen/exps/templates/robosuite/coffee.json b/mimicgen/exps/templates/robosuite/coffee.json new file mode 100644 index 0000000..0086255 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/coffee.json @@ -0,0 +1,70 @@ +{ + "name": "coffee", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "coffee_pod", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "coffee_machine", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/coffee_preparation.json b/mimicgen/exps/templates/robosuite/coffee_preparation.json new file mode 100644 index 0000000..da4beb9 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/coffee_preparation.json @@ -0,0 +1,112 @@ +{ + "name": "coffee_preparation", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "mug", + "subtask_term_signal": "mug_grasp", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "coffee_machine", + "subtask_term_signal": "mug_place", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "drawer", + "subtask_term_signal": "drawer_open", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": "coffee_pod", + "subtask_term_signal": "pod_grasp", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_5": { + "object_ref": "coffee_machine", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/hammer_cleanup.json b/mimicgen/exps/templates/robosuite/hammer_cleanup.json new file mode 100644 index 0000000..f8a8feb --- /dev/null +++ b/mimicgen/exps/templates/robosuite/hammer_cleanup.json @@ -0,0 +1,84 @@ +{ + "name": "hammer_cleanup", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "drawer", + "subtask_term_signal": "open", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "hammer", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "drawer", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/kitchen.json b/mimicgen/exps/templates/robosuite/kitchen.json new file mode 100644 index 0000000..f7fdb68 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/kitchen.json @@ -0,0 +1,140 @@ +{ + "name": "kitchen", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "button", + "subtask_term_signal": "stove_on", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "pot", + "subtask_term_signal": "grasp_pot", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "stove", + "subtask_term_signal": "place_pot_on_stove", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": "bread", + "subtask_term_signal": "grasp_bread", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_5": { + "object_ref": "pot", + "subtask_term_signal": "place_bread_in_pot", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_6": { + "object_ref": "serving_region", + "subtask_term_signal": "serve", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_7": { + "object_ref": "button", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/mug_cleanup.json b/mimicgen/exps/templates/robosuite/mug_cleanup.json new file mode 100644 index 0000000..e60976a --- /dev/null +++ b/mimicgen/exps/templates/robosuite/mug_cleanup.json @@ -0,0 +1,84 @@ +{ + "name": "mug_cleanup", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "drawer", + "subtask_term_signal": "open", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "object", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "drawer", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/nut_assembly.json b/mimicgen/exps/templates/robosuite/nut_assembly.json new file mode 100644 index 0000000..922d8e5 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/nut_assembly.json @@ -0,0 +1,100 @@ +{ + "name": "nut_assembly", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "square_nut", + "subtask_term_signal": "grasp_square_nut", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "square_peg", + "subtask_term_signal": "insert_square_nut", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "round_nut", + "subtask_term_signal": "grasp_round_nut", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": "round_peg", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/pick_place.json b/mimicgen/exps/templates/robosuite/pick_place.json new file mode 100644 index 0000000..0067a8f --- /dev/null +++ b/mimicgen/exps/templates/robosuite/pick_place.json @@ -0,0 +1,153 @@ +{ + "name": "pick_place", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "milk", + "subtask_term_signal": "grasp_milk", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": null, + "subtask_term_signal": "place_milk", + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "cereal", + "subtask_term_signal": "grasp_cereal", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": null, + "subtask_term_signal": "place_cereal", + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_5": { + "object_ref": "bread", + "subtask_term_signal": "grasp_bread", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_6": { + "object_ref": null, + "subtask_term_signal": "place_bread", + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_7": { + "object_ref": "can", + "subtask_term_signal": "grasp_can", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_8": { + "object_ref": null, + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/square.json b/mimicgen/exps/templates/robosuite/square.json new file mode 100644 index 0000000..23dce5d --- /dev/null +++ b/mimicgen/exps/templates/robosuite/square.json @@ -0,0 +1,72 @@ +{ + "name": "square", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "square_nut", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "square_peg", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/stack.json b/mimicgen/exps/templates/robosuite/stack.json new file mode 100644 index 0000000..2d42f08 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/stack.json @@ -0,0 +1,74 @@ +{ + "name": "stack", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "cubeA", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "cubeB", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/stack_three.json b/mimicgen/exps/templates/robosuite/stack_three.json new file mode 100644 index 0000000..8d3b07c --- /dev/null +++ b/mimicgen/exps/templates/robosuite/stack_three.json @@ -0,0 +1,106 @@ +{ + "name": "stack_three", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "cubeA", + "subtask_term_signal": "grasp_1", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "cubeB", + "subtask_term_signal": "stack_1", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "cubeC", + "subtask_term_signal": "grasp_2", + "subtask_term_offset_range": [ + 10, + 20 + ], + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": "cubeA", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "nearest_neighbor_object", + "selection_strategy_kwargs": { + "nn_k": 3 + }, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/threading.json b/mimicgen/exps/templates/robosuite/threading.json new file mode 100644 index 0000000..1511901 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/threading.json @@ -0,0 +1,70 @@ +{ + "name": "threading", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "needle", + "subtask_term_signal": "grasp", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "tripod", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen/exps/templates/robosuite/three_piece_assembly.json b/mimicgen/exps/templates/robosuite/three_piece_assembly.json new file mode 100644 index 0000000..2683598 --- /dev/null +++ b/mimicgen/exps/templates/robosuite/three_piece_assembly.json @@ -0,0 +1,98 @@ +{ + "name": "three_piece_assembly", + "type": "robosuite", + "experiment": { + "name": "demo", + "source": { + "dataset_path": null, + "filter_key": null, + "n": null, + "start": null + }, + "generation": { + "path": null, + "guarantee": false, + "keep_failed": true, + "num_trials": 10, + "select_src_per_subtask": false, + "transform_first_robot_pose": false, + "interpolate_from_last_target_pose": true + }, + "task": { + "name": null, + "robot": null, + "gripper": null, + "interface": null, + "interface_type": null + }, + "max_num_failures": 50, + "render_video": true, + "num_demo_to_render": 50, + "num_fail_demo_to_render": 50, + "log_every_n_attempts": 50, + "seed": 1 + }, + "obs": { + "collect_obs": true, + "camera_names": [], + "camera_height": 84, + "camera_width": 84 + }, + "task": { + "task_spec": { + "subtask_1": { + "object_ref": "piece_1", + "subtask_term_signal": "grasp_1", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_2": { + "object_ref": "base", + "subtask_term_signal": "insert_1", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_3": { + "object_ref": "piece_2", + "subtask_term_signal": "grasp_2", + "subtask_term_offset_range": [ + 5, + 10 + ], + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + }, + "subtask_4": { + "object_ref": "piece_1", + "subtask_term_signal": null, + "subtask_term_offset_range": null, + "selection_strategy": "random", + "selection_strategy_kwargs": null, + "action_noise": 0.05, + "num_interpolation_steps": 5, + "num_fixed_steps": 0, + "apply_noise_during_interpolation": false + } + } + } +} \ No newline at end of file diff --git a/mimicgen_envs/models/robosuite/assets/objects/coffee_base.xml b/mimicgen/models/robosuite/assets/objects/coffee_base.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/coffee_base.xml rename to mimicgen/models/robosuite/assets/objects/coffee_base.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/coffee_body.xml b/mimicgen/models/robosuite/assets/objects/coffee_body.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/coffee_body.xml rename to mimicgen/models/robosuite/assets/objects/coffee_body.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/coffee_lid.xml b/mimicgen/models/robosuite/assets/objects/coffee_lid.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/coffee_lid.xml rename to mimicgen/models/robosuite/assets/objects/coffee_lid.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/coffee_pod.xml b/mimicgen/models/robosuite/assets/objects/coffee_pod.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/coffee_pod.xml rename to mimicgen/models/robosuite/assets/objects/coffee_pod.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/drawer.xml b/mimicgen/models/robosuite/assets/objects/drawer.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/drawer.xml rename to mimicgen/models/robosuite/assets/objects/drawer.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/drawer_long.xml b/mimicgen/models/robosuite/assets/objects/drawer_long.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/drawer_long.xml rename to mimicgen/models/robosuite/assets/objects/drawer_long.xml diff --git a/mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_base.stl b/mimicgen/models/robosuite/assets/objects/meshes/coffee_base.stl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_base.stl rename to mimicgen/models/robosuite/assets/objects/meshes/coffee_base.stl diff --git a/mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_body.stl b/mimicgen/models/robosuite/assets/objects/meshes/coffee_body.stl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_body.stl rename to mimicgen/models/robosuite/assets/objects/meshes/coffee_body.stl diff --git a/mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_lid.stl b/mimicgen/models/robosuite/assets/objects/meshes/coffee_lid.stl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_lid.stl rename to mimicgen/models/robosuite/assets/objects/meshes/coffee_lid.stl diff --git a/mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_pod.stl b/mimicgen/models/robosuite/assets/objects/meshes/coffee_pod.stl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/meshes/coffee_pod.stl rename to mimicgen/models/robosuite/assets/objects/meshes/coffee_pod.stl diff --git a/mimicgen_envs/models/robosuite/assets/objects/serving_region.xml b/mimicgen/models/robosuite/assets/objects/serving_region.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/objects/serving_region.xml rename to mimicgen/models/robosuite/assets/objects/serving_region.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/128ecbc1/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/3143a4ac/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.jpeg b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.jpeg similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.jpeg rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.jpeg diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/model_normalized_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture0.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture0.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture0.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture0.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture1.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture1.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture1.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture1.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture2.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture2.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture2.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture2.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture3.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture3.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture3.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/345d3e72/visual/texture3.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/34ae0b61/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/48e260a6/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/5fe74bab/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.jpeg b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.jpeg similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.jpeg rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.jpeg diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/model_normalized_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/texture0.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/texture0.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/texture0.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/8012f52d/visual/texture0.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/b4ae56d6/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.jpeg b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.jpeg similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.jpeg rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.jpeg diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture0.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture0.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture0.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture0.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture1.png b/mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture1.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture1.png rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/c2eacc52/visual/texture1.png diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/d75af64a/visual/model_normalized_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/e94e46bc/visual/model_normalized_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_1.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_1.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_1.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_1.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_10.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_10.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_10.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_10.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_11.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_11.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_11.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_11.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_12.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_12.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_12.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_12.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_13.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_13.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_13.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_13.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_14.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_14.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_14.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_14.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_15.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_15.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_15.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_15.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_16.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_16.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_16.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_16.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_17.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_17.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_17.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_17.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_18.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_18.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_18.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_18.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_19.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_19.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_19.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_19.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_2.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_2.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_2.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_2.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_20.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_20.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_20.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_20.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_21.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_21.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_21.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_21.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_22.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_22.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_22.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_22.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_23.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_23.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_23.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_23.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_24.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_24.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_24.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_24.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_25.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_25.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_25.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_25.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_26.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_26.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_26.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_26.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_27.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_27.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_27.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_27.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_28.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_28.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_28.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_28.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_29.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_29.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_29.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_29.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_3.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_3.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_3.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_3.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_30.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_30.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_30.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_30.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_31.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_31.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_31.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_31.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_4.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_4.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_4.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_4.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_5.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_5.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_5.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_5.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_6.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_6.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_6.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_6.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_7.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_7.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_7.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_7.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_8.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_8.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_8.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_8.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_9.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_9.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_9.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/collision/model_normalized_collision_9.obj diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/meta.json b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/meta.json similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/meta.json rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/meta.json diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/model.xml b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/model.xml similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/model.xml rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/model.xml diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/material_0.mtl b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/material_0.mtl similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/material_0.mtl rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/material_0.mtl diff --git a/mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/model_normalized_0.obj b/mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/model_normalized_0.obj similarity index 100% rename from mimicgen_envs/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/model_normalized_0.obj rename to mimicgen/models/robosuite/assets/shapenet_core/mugs/fad118b3/visual/model_normalized_0.obj diff --git a/mimicgen_envs/models/robosuite/assets/textures/ceramic.png b/mimicgen/models/robosuite/assets/textures/ceramic.png similarity index 100% rename from mimicgen_envs/models/robosuite/assets/textures/ceramic.png rename to mimicgen/models/robosuite/assets/textures/ceramic.png diff --git a/mimicgen_envs/models/robosuite/objects/__init__.py b/mimicgen/models/robosuite/objects/__init__.py similarity index 72% rename from mimicgen_envs/models/robosuite/objects/__init__.py rename to mimicgen/models/robosuite/objects/__init__.py index 7df5f0e..2b1a797 100644 --- a/mimicgen_envs/models/robosuite/objects/__init__.py +++ b/mimicgen/models/robosuite/objects/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite/__init__.py b/mimicgen/models/robosuite/objects/composite/__init__.py similarity index 80% rename from mimicgen_envs/models/robosuite/objects/composite/__init__.py rename to mimicgen/models/robosuite/objects/composite/__init__.py index 6bbdf49..4542ffe 100644 --- a/mimicgen_envs/models/robosuite/objects/composite/__init__.py +++ b/mimicgen/models/robosuite/objects/composite/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite/box_pattern_object.py b/mimicgen/models/robosuite/objects/composite/box_pattern_object.py similarity index 98% rename from mimicgen_envs/models/robosuite/objects/composite/box_pattern_object.py rename to mimicgen/models/robosuite/objects/composite/box_pattern_object.py index 15a37ee..114ac98 100644 --- a/mimicgen_envs/models/robosuite/objects/composite/box_pattern_object.py +++ b/mimicgen/models/robosuite/objects/composite/box_pattern_object.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite/hollow_cylinder.py b/mimicgen/models/robosuite/objects/composite/hollow_cylinder.py similarity index 98% rename from mimicgen_envs/models/robosuite/objects/composite/hollow_cylinder.py rename to mimicgen/models/robosuite/objects/composite/hollow_cylinder.py index e819fc4..88de19c 100644 --- a/mimicgen_envs/models/robosuite/objects/composite/hollow_cylinder.py +++ b/mimicgen/models/robosuite/objects/composite/hollow_cylinder.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite/needle.py b/mimicgen/models/robosuite/objects/composite/needle.py similarity index 98% rename from mimicgen_envs/models/robosuite/objects/composite/needle.py rename to mimicgen/models/robosuite/objects/composite/needle.py index 7777f9b..226a523 100644 --- a/mimicgen_envs/models/robosuite/objects/composite/needle.py +++ b/mimicgen/models/robosuite/objects/composite/needle.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite/ring_tripod.py b/mimicgen/models/robosuite/objects/composite/ring_tripod.py similarity index 99% rename from mimicgen_envs/models/robosuite/objects/composite/ring_tripod.py rename to mimicgen/models/robosuite/objects/composite/ring_tripod.py index 0644cf6..c49f613 100644 --- a/mimicgen_envs/models/robosuite/objects/composite/ring_tripod.py +++ b/mimicgen/models/robosuite/objects/composite/ring_tripod.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite_body/__init__.py b/mimicgen/models/robosuite/objects/composite_body/__init__.py similarity index 71% rename from mimicgen_envs/models/robosuite/objects/composite_body/__init__.py rename to mimicgen/models/robosuite/objects/composite_body/__init__.py index 7a250c3..493c2c6 100644 --- a/mimicgen_envs/models/robosuite/objects/composite_body/__init__.py +++ b/mimicgen/models/robosuite/objects/composite_body/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. diff --git a/mimicgen_envs/models/robosuite/objects/composite_body/coffee_machine.py b/mimicgen/models/robosuite/objects/composite_body/coffee_machine.py similarity index 95% rename from mimicgen_envs/models/robosuite/objects/composite_body/coffee_machine.py rename to mimicgen/models/robosuite/objects/composite_body/coffee_machine.py index fa76ae2..48c7777 100644 --- a/mimicgen_envs/models/robosuite/objects/composite_body/coffee_machine.py +++ b/mimicgen/models/robosuite/objects/composite_body/coffee_machine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -9,8 +9,8 @@ from robosuite.models.objects import CompositeBodyObject, BoxObject -from mimicgen_envs.models.robosuite.objects.composite_body.cup import CupObject -from mimicgen_envs.models.robosuite.objects.xml_objects import CoffeeMachineBodyObject, CoffeeMachineLidObject, CoffeeMachineBaseObject +from mimicgen.models.robosuite.objects.composite_body.cup import CupObject +from mimicgen.models.robosuite.objects.xml_objects import CoffeeMachineBodyObject, CoffeeMachineLidObject, CoffeeMachineBaseObject class CoffeeMachineObject(CompositeBodyObject): diff --git a/mimicgen_envs/models/robosuite/objects/composite_body/cup.py b/mimicgen/models/robosuite/objects/composite_body/cup.py similarity index 97% rename from mimicgen_envs/models/robosuite/objects/composite_body/cup.py rename to mimicgen/models/robosuite/objects/composite_body/cup.py index a5b9229..0b0d4c7 100644 --- a/mimicgen_envs/models/robosuite/objects/composite_body/cup.py +++ b/mimicgen/models/robosuite/objects/composite_body/cup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -9,7 +9,7 @@ from robosuite.utils.mjcf_utils import RED, BLUE, CustomMaterial from robosuite.models.objects import CompositeBodyObject, BoxObject, CylinderObject -from mimicgen_envs.models.robosuite.objects import HollowCylinderObject +from mimicgen.models.robosuite.objects import HollowCylinderObject class CupObject(CompositeBodyObject): diff --git a/mimicgen_envs/models/robosuite/objects/xml_objects.py b/mimicgen/models/robosuite/objects/xml_objects.py similarity index 97% rename from mimicgen_envs/models/robosuite/objects/xml_objects.py rename to mimicgen/models/robosuite/objects/xml_objects.py index 702f45f..56b1ca0 100644 --- a/mimicgen_envs/models/robosuite/objects/xml_objects.py +++ b/mimicgen/models/robosuite/objects/xml_objects.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -13,9 +13,9 @@ from robosuite.models.objects import MujocoXMLObject from robosuite.utils.mjcf_utils import string_to_array, array_to_string -import mimicgen_envs +import mimicgen -XML_ASSETS_BASE_PATH = os.path.join(mimicgen_envs.__path__[0], "models/robosuite/assets") +XML_ASSETS_BASE_PATH = os.path.join(mimicgen.__path__[0], "models/robosuite/assets") class BlenderObject(MujocoXMLObject): diff --git a/mimicgen/scripts/__init__.py b/mimicgen/scripts/__init__.py new file mode 100644 index 0000000..8edfcfd --- /dev/null +++ b/mimicgen/scripts/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen/scripts/annotate_subtasks.py b/mimicgen/scripts/annotate_subtasks.py new file mode 100644 index 0000000..290cc30 --- /dev/null +++ b/mimicgen/scripts/annotate_subtasks.py @@ -0,0 +1,409 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A script to playback demonstrations (using visual observations and the pygame renderer) +in order to allow a user to annotate portions of the demonstrations. This is useful +to annotate the end of each object-centric subtask in each source demonstration used +by MimicGen, as an alternative to implementing subtask termination signals directly +in the simulation environment. + +Examples: + + # specify the sequence of signals that should be annotated and the dataset images to render on-screen + python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image + + # limit annotation to first 2 demos + python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --n 2 + + # limit annotation to demo 2 and 3 + python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --n 2 --start 1 + + # scale up dataset images when rendering to screen by factor of 10 + python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --image_scale 10 + +""" + +import os +import sys +import h5py +import argparse +import imageio +import numpy as np + +# for rendering images on-screen +import cv2 +import pygame + +import robomimic +from robomimic.utils.file_utils import get_env_metadata_from_dataset + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils +import mimicgen.utils.misc_utils as MiscUtils + + +# scaling size for images when rendering to screen +# IMAGE_SCALE = 10 +IMAGE_SCALE = 5 +# IMAGE_SCALE = 1 + +# Grid of playback rates for the user to cycle through (e.g. 1hz, 5 hz, ...) +RATE_GRID = MiscUtils.Grid( + values=[1, 5, 10, 20, 40], + initial_ind=0, +) + + +def print_keyboard_commands(): + """ + Helper function to print keyboard annotation commands. + """ + def print_command(char, info): + char += " " * (11 - len(char)) + print("{}\t{}".format(char, info)) + + print("") + print_command("Keys", "Command") + print_command("up-down", "increase / decrease playback speed") + print_command("left-right", "seek left / right by N frames") + print_command("spacebar", "press and release to annotate the end of a subtask") + print_command("f", "next demo and save annotations") + print_command("r", "repeat demo and clear annotations") + print("") + + +def make_pygame_screen( + traj_grp, + image_names, + image_scale, +): + """ + Makes pygame screen. + + Args: + traj_grp (h5py.Group): group for a demonstration trajectory + image_names (list): list of image names that will be used for rendering + image_scale (int): scaling factor for the image to diplay in window + + Returns: + screen: pygame screen object + """ + # grab first image from all image modalities to infer size of window + im = [traj_grp["obs/{}".format(k)][0] for k in image_names] + frame = np.concatenate(im, axis=1) + width, height = frame.shape[:2] + width *= image_scale + height *= image_scale + screen = pygame.display.set_mode((height, width)) + return screen + + +def handle_pygame_events( + frame_ind, + subtask_signals, + subtask_ind, + rate_obj, + need_repeat, + annotation_done, + playback_rate_grid, +): + """ + Reads events from pygame window in order to provide the + following keyboard annotation functionality: + + up-down | increase / decrease playback speed + left-right | seek left / right by N frames + spacebar | press and release to annotate the end of a subtask + f | next demo and save annotations + r | repeat demo and clear annotations + + Args: + frame_ind (int): index of current frame in demonstration + subtask_signals (list): list of subtask termination signals that we will annotate + subtask_ind (int): index of current subtask (state variable) + rate_obj (Rate): rate object to maintain playback rate + need_repeat (bool): whether the demo should be repeated (state variable) + annotation_done (bool): whether user is done annotating this demo (state variable) + playback_rate_grid (Grid): grid object to easily toggle between different playback rates + + Returns: + subtask_end_ind (int or None): end index for current subtask, annotated by human, or None if no annotation + subtask_ind (int): possibly updated subtask index + need_repeat (bool): possibly updated value + annotation_done (bool): possibly updated value + seek (int): how much to seek forward or backward in demonstration (value read from user command) + """ + + subtask_end_ind = None + seek = 0 + for event in pygame.event.get(): + + if event.type == pygame.QUIT: + sys.exit() + + if event.type == pygame.KEYUP: + # print("released key {}".format(event.key)) + if event.key == pygame.K_SPACE: + # annotate end of current subtask and move on to next subtask + subtask_end_ind = frame_ind + 1 + print("") + print("*" * 50) + print("cmd: annotated end of subtask {} (signal {}) at index {}".format(subtask_ind, subtask_signals[subtask_ind], frame_ind + 1)) + print("*" * 50) + print("") + subtask_ind += 1 + elif event.key == pygame.K_UP: + # speed up traversal + rate_obj.update_hz(playback_rate_grid.next()) + print("cmd: playback rate increased to {} hz".format(rate_obj.hz)) + elif event.key == pygame.K_DOWN: + # slow down traversal + rate_obj.update_hz(playback_rate_grid.prev()) + print("cmd: playback rate decreased to {} hz".format(rate_obj.hz)) + elif event.key == pygame.K_LEFT: + # seek left + seek = -10 + print("cmd: seek {} frames".format(seek)) + elif event.key == pygame.K_RIGHT: + # seek right + seek = 10 + print("cmd: seek {} frames".format(seek)) + elif event.key == pygame.K_r: + # repeat demo + need_repeat = True + print("cmd: repeat demo") + elif event.key == pygame.K_f: + # next demo + annotation_done = True + print("cmd: next demo") + + return subtask_end_ind, subtask_ind, need_repeat, annotation_done, seek + + +def annotate_subtasks_in_trajectory( + ep, + traj_grp, + subtask_signals, + screen, + video_skip, + image_names, + playback_rate_grid, +): + """ + This function reads all "rgb" observations in the dataset trajectory and + writes them into a video. + + Args: + ep (str): name of hdf5 group for this demo + traj_grp (hdf5 file group): hdf5 group which corresponds to the dataset trajectory to annotate + subtask_signals (list): list of subtask termination signals that will be annotated + screen: pygame screen + video_skip (int): determines rate at which environment frames are written to video + image_names (list): determines which image observations are used for rendering. Pass more than + one to output a video with multiple image observations concatenated horizontally. + playback_rate_grid (Grid): grid object to easily toggle between different playback rates + """ + assert image_names is not None, "error: must specify at least one image observation to use in @image_names" + + traj_len = traj_grp["actions"].shape[0] + + rate_obj = MiscUtils.Rate(hz=playback_rate_grid.get()) + rate_measure = MiscUtils.RateMeasure(name="rate_measure") + + # repeat this demonstration until we have permission to move on + annotation_done = False + while not annotation_done: + print("Starting annotation for demo: {}".format(ep)) + print_keyboard_commands() + + need_repeat = False + subtask_end_inds = [] + + # keep looping through the video, reading user input from keyboard, until + # user indicates that demo is done being annotated + frame_ind = 0 + subtask_ind = 0 + should_add_border_to_frame = False + while (not need_repeat) and (not annotation_done): + + # maybe render frame to screen + if frame_ind % video_skip == 0: + # concatenate image obs together + im = [traj_grp["obs/{}".format(k)][frame_ind] for k in image_names] + frame = np.concatenate(im, axis=1) + # upscale frame to appropriate resolution + frame = cv2.resize(frame, + dsize=(frame.shape[1] * IMAGE_SCALE, frame.shape[0] * IMAGE_SCALE), + interpolation=cv2.INTER_CUBIC) + # maybe add red border + if should_add_border_to_frame: + frame = MiscUtils.add_red_border_to_frame(frame) + # write frame to window + frame = frame.transpose((1, 0, 2)) + pygame.pixelcopy.array_to_surface(screen, frame) + pygame.display.update() + + subtask_end_ind, subtask_ind, need_repeat, annotation_done, seek = handle_pygame_events( + frame_ind=frame_ind, + subtask_signals=subtask_signals, + subtask_ind=subtask_ind, + rate_obj=rate_obj, + need_repeat=need_repeat, + annotation_done=annotation_done, + playback_rate_grid=playback_rate_grid, + ) + + if subtask_end_ind is not None: + # store new annotation and toggle rendering of red border + subtask_end_inds.append(subtask_end_ind) + should_add_border_to_frame = (not should_add_border_to_frame) + + # try to enforce rate + rate_obj.sleep() + rate_measure.measure() + + # increment frame index appropriately (either by 1 or by seek amount), then + # clamp within bounds + mask = int(seek != 0) + frame_ind += (1 - mask) * 1 + mask * seek + frame_ind = max(min(frame_ind, traj_len - 1), 0) + + # if we don't need to repeat the demo, we're done + annotation_done = annotation_done or (not need_repeat) + + # check that we got the right number of annotations + if len(subtask_end_inds) != len(subtask_signals): + print("") + print("*" * 50) + print("Number of termination annotations {} does not match expected number {}...".format(len(subtask_end_inds), len(subtask_signals))) + print("Repeating annotation.") + print("*" * 50) + print("") + annotation_done = False + + # write subtask_termination_signals to hdf5 + assert len(subtask_end_inds) == len(subtask_signals) + + if "subtask_term_signals" in traj_grp["datagen_info"]: + del traj_grp["datagen_info"]["subtask_term_signals"] + for subtask_ind in range(len(subtask_signals)): + # subtask termination signal is 0 until subtask is complete, and 1 afterwards + subtask_signal_array = np.ones(traj_len, dtype=int) + subtask_signal_array[:subtask_end_inds[subtask_ind]] = 0 + traj_grp.create_dataset("datagen_info/subtask_term_signals/{}".format(subtask_signals[subtask_ind]), data=subtask_signal_array) + + # report rate measurements + print("\nFrame Rate (Hz) Statistics for demo {} annotation".format(ep)) + print(rate_measure) + + +def annotate_subtasks(args): + # Auto-fill camera rendering info if not specified + if args.render_image_names is None: + env_meta = get_env_metadata_from_dataset(dataset_path=args.dataset) + args.render_image_names = RobomimicUtils.get_default_env_cameras(env_meta=env_meta) + + # get demonstrations to annotate + dataset_path = args.dataset + demo_keys = MG_FileUtils.get_all_demos_from_dataset( + dataset_path=dataset_path, + filter_key=args.filter_key, + start=args.start, + n=args.n, + ) + + # Verify that the dataset has been processed and has datagen_info. + MG_FileUtils.get_env_interface_info_from_dataset( + dataset_path=dataset_path, + demo_keys=demo_keys, + ) + + # Open the file in read-write mode to add in annotations as subtask_term_signals in datagen_info. + f = h5py.File(dataset_path, "a") + + # make pygame screen first + screen = make_pygame_screen( + traj_grp=f["data/{}".format(demo_keys[0])], + image_names=args.render_image_names, + image_scale=args.image_scale, + ) + + for ind in range(len(demo_keys)): + ep = demo_keys[ind] + print("Annotating episode: {}".format(ep)) + + annotate_subtasks_in_trajectory( + ep=ep, + traj_grp=f["data/{}".format(ep)], + subtask_signals=args.signals, + screen=screen, + video_skip=args.video_skip, + image_names=args.render_image_names, + playback_rate_grid=RATE_GRID, + ) + + f.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--dataset", + type=str, + required=True, + help="path to hdf5 dataset which will be modified in-place", + ) + parser.add_argument( + "--signals", + type=str, + nargs='+', + required=True, + help="specify sequence of subtask termination signals for all except last subtask -- these will be written using the annotations", + ) + parser.add_argument( + "--filter_key", + type=str, + default=None, + help="(optional) filter key, to select a subset of trajectories in the file", + ) + parser.add_argument( + "--n", + type=int, + default=None, + help="(optional) stop after n trajectories are annotated", + ) + parser.add_argument( + "--start", + type=int, + default=None, + help="(optional) start after this many trajectories in the dataset", + ) + parser.add_argument( + "--video_skip", + type=int, + default=1, + help="(optional) render frames on-screen every n steps", + ) + parser.add_argument( + "--render_image_names", + type=str, + nargs='+', + default=None, + help="(optional) camera name(s) / image observation(s) to use for rendering on-screen or to video. Default is" + "None, which corresponds to a predefined camera for each env type", + ) + parser.add_argument( + "--image_scale", + type=int, + default=5, + help="(optional) scaling size for images when rendering to screen", + ) + + args = parser.parse_args() + annotate_subtasks(args) diff --git a/mimicgen_envs/scripts/demo_random_action.py b/mimicgen/scripts/demo_random_action.py similarity index 92% rename from mimicgen_envs/scripts/demo_random_action.py rename to mimicgen/scripts/demo_random_action.py index 4c0c1d9..666f244 100644 --- a/mimicgen_envs/scripts/demo_random_action.py +++ b/mimicgen/scripts/demo_random_action.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -28,14 +28,14 @@ def choose_mimicgen_environment(): robosuite_envs = set(suite.ALL_ENVIRONMENTS) # all environments including mimicgen environments - import mimicgen_envs + import mimicgen all_envs = set(suite.ALL_ENVIRONMENTS) # get only mimicgen envs - only_mimicgen_envs = sorted(all_envs - robosuite_envs) + only_mimicgen = sorted(all_envs - robosuite_envs) # keep only envs that correspond to the different reset distributions from the paper - envs = [x for x in only_mimicgen_envs if x[-1].isnumeric()] + envs = [x for x in only_mimicgen if x[-1].isnumeric()] # Select environment to run print("Here is a list of environments in the suite:\n") diff --git a/mimicgen_envs/scripts/download_datasets.py b/mimicgen/scripts/download_datasets.py similarity index 92% rename from mimicgen_envs/scripts/download_datasets.py rename to mimicgen/scripts/download_datasets.py index 26ce525..ec78c75 100644 --- a/mimicgen_envs/scripts/download_datasets.py +++ b/mimicgen/scripts/download_datasets.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -8,9 +8,9 @@ import os import argparse -import mimicgen_envs -import mimicgen_envs.utils.file_utils as FileUtils -from mimicgen_envs import DATASET_REGISTRY +import mimicgen +import mimicgen.utils.file_utils as FileUtils +from mimicgen import DATASET_REGISTRY if __name__ == "__main__": @@ -55,7 +55,7 @@ # set default base directory for downloads default_base_dir = args.download_dir if default_base_dir is None: - default_base_dir = os.path.join(mimicgen_envs.__path__[0], "../datasets") + default_base_dir = os.path.join(mimicgen.__path__[0], "../datasets") # load args download_dataset_type = args.dataset_type diff --git a/mimicgen/scripts/generate_config_templates.py b/mimicgen/scripts/generate_config_templates.py new file mode 100644 index 0000000..3653904 --- /dev/null +++ b/mimicgen/scripts/generate_config_templates.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Helpful script to generate example config files, one per config class. These should be re-generated +when new config options are added, or when default settings in the config classes are modified. +""" +import os +import json + +import mimicgen +from mimicgen.configs.config import get_all_registered_configs + + +def main(): + # store template config jsons in this directory + target_dir = os.path.join(mimicgen.__path__[0], "exps/templates/") + + # iterate through registered config classes + all_configs = get_all_registered_configs() + for config_type in all_configs: + # store config json by config type + target_type_dir = os.path.join(target_dir, config_type) + os.makedirs(target_type_dir, exist_ok=True) + for name in all_configs[config_type]: + # make config class to dump it to json + c = all_configs[config_type][name]() + assert name == c.name + assert config_type == c.type + # dump to json + json_path = os.path.join(target_type_dir, "{}.json".format(name)) + c.dump(filename=json_path) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/mimicgen/scripts/generate_core_configs.py b/mimicgen/scripts/generate_core_configs.py new file mode 100644 index 0000000..c04bf97 --- /dev/null +++ b/mimicgen/scripts/generate_core_configs.py @@ -0,0 +1,378 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +We utilize robomimic's config generator class to easily generate data generation configs for our +core set of tasks in the paper. It can be modified easily to generate other configs. + +The global variables at the top of the file should be configured manually. + +See https://robomimic.github.io/docs/tutorials/hyperparam_scan.html for more info. +""" +import os +import json +import shutil + +import robomimic +from robomimic.utils.hyperparam_utils import ConfigGenerator + +import mimicgen +import mimicgen.utils.config_utils as ConfigUtils +from mimicgen.utils.file_utils import config_generator_to_script_lines + + +# set path to folder containing src datasets +import mimicgen_envs +SRC_DATA_DIR = os.path.join(mimicgen_envs.__path__[0], "../datasets/source") + +# set base folder for where to copy each base config and generate new config files for data generation +CONFIG_DIR = "/tmp/core_configs" + +# set base folder for newly generated datasets +OUTPUT_FOLDER = "/tmp/core_datasets" + +# number of trajectories to generate (or attempt to generate) +NUM_TRAJ = 1000 + +# whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) +GUARANTEE = True + +# whether to run a quick debug run instead of full generation +DEBUG = False + +# camera settings for collecting observations +CAMERA_NAMES = ["agentview", "robot0_eye_in_hand"] +CAMERA_SIZE = (84, 84) + +# path to base config(s) +BASE_BASE_CONFIG_PATH = os.path.join(mimicgen.__path__[0], "exps/templates/robosuite") +BASE_CONFIGS = [ + os.path.join(BASE_BASE_CONFIG_PATH, "stack.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "stack_three.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "square.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "threading.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "three_piece_assembly.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "coffee.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "coffee_preparation.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "nut_assembly.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "pick_place.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "hammer_cleanup.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "mug_cleanup.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "kitchen.json"), +] + + +def make_generators(base_configs): + """ + An easy way to make multiple config generators by using different + settings for each. + """ + all_settings = [ + # stack + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "stack.hdf5"), + dataset_name="stack", + generation_path="{}/stack".format(OUTPUT_FOLDER), + # task_interface="MG_Stack", + tasks=["Stack_D0", "Stack_D1"], + task_names=["D0", "D1"], + select_src_per_subtask=True, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], None], + ), + # stack_three + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "stack_three.hdf5"), + dataset_name="stack_three", + generation_path="{}/stack_three".format(OUTPUT_FOLDER), + # task_interface="MG_StackThree", + tasks=["StackThree_D0", "StackThree_D1"], + task_names=["D0", "D1"], + select_src_per_subtask=True, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], [10, 20], [10, 20], None], + ), + # square + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "square.hdf5"), + dataset_name="square", + generation_path="{}/square".format(OUTPUT_FOLDER), + # task_interface="MG_Square", + tasks=["Square_D0", "Square_D1", "Square_D2"], + task_names=["D0", "D1", "D2"], + select_src_per_subtask=False, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], None], + ), + # threading + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "threading.hdf5"), + dataset_name="threading", + generation_path="{}/threading".format(OUTPUT_FOLDER), + # task_interface="MG_Threading", + tasks=["Threading_D0", "Threading_D1", "Threading_D2"], + task_names=["D0", "D1", "D2"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[5, 10], None], + ), + # three_piece_assembly + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "three_piece_assembly.hdf5"), + dataset_name="three_piece_assembly", + generation_path="{}/three_piece_assembly".format(OUTPUT_FOLDER), + # task_interface="MG_ThreePieceAssembly", + tasks=["ThreePieceAssembly_D0", "ThreePieceAssembly_D1", "ThreePieceAssembly_D2"], + task_names=["D0", "D1", "D2"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[5, 10], [5, 10], [5, 10], None], + ), + # coffee + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "coffee.hdf5"), + dataset_name="coffee", + generation_path="{}/coffee".format(OUTPUT_FOLDER), + # task_interface="MG_Coffee", + tasks=["Coffee_D0", "Coffee_D1", "Coffee_D2"], + task_names=["D0", "D1", "D2"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[5, 10], None], + ), + # coffee_preparation + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "coffee_preparation.hdf5"), + dataset_name="coffee_preparation", + generation_path="{}/coffee_preparation".format(OUTPUT_FOLDER), + # task_interface="MG_CoffeePreparation", + tasks=["CoffeePreparation_D0", "CoffeePreparation_D1"], + task_names=["D0", "D1"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[5, 10], [5, 10], [5, 10], [5, 10], None], + ), + # nut_assembly + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "nut_assembly.hdf5"), + dataset_name="nut_assembly", + generation_path="{}/nut_assembly".format(OUTPUT_FOLDER), + # task_interface="MG_NutAssembly", + tasks=["NutAssembly_D0"], + task_names=["D0"], + select_src_per_subtask=False, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], [10, 20], [10, 20], None], + ), + # pick_place + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "pick_place.hdf5"), + dataset_name="pick_place", + generation_path="{}/pick_place".format(OUTPUT_FOLDER), + # task_interface="MG_PickPlace", + tasks=["PickPlace_D0"], + task_names=["D0"], + select_src_per_subtask=True, + # NOTE: selection strategy is set by default in the config template, and we will not change it + # selection_strategy="nearest_neighbor_object", + # selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], None, [10, 20], None, [10, 20], None, [10, 20], None], + ), + # hammer_cleanup + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "hammer_cleanup.hdf5"), + dataset_name="hammer_cleanup", + generation_path="{}/hammer_cleanup".format(OUTPUT_FOLDER), + # task_interface="MG_HammerCleanup", + tasks=["HammerCleanup_D0", "HammerCleanup_D1"], + task_names=["D0", "D1"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[10, 20], [10, 20], None], + ), + # mug_cleanup + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "mug_cleanup.hdf5"), + dataset_name="mug_cleanup", + generation_path="{}/mug_cleanup".format(OUTPUT_FOLDER), + # task_interface="MG_MugCleanup", + tasks=["MugCleanup_D0", "MugCleanup_D1", "MugCleanup_O1", "MugCleanup_O2"], + task_names=["D0", "D1", "O1", "O2"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[10, 20], [10, 20], None], + ), + # kitchen + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "kitchen.hdf5"), + dataset_name="kitchen", + generation_path="{}/kitchen".format(OUTPUT_FOLDER), + # task_interface="MG_Kitchen", + tasks=["Kitchen_D0", "Kitchen_D1"], + task_names=["D0", "D1"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[10, 20], [10, 20], [10, 20], [10, 20], [10, 20], [10, 20], None], + ), + ] + + assert len(base_configs) == len(all_settings) + ret = [] + for conf, setting in zip(base_configs, all_settings): + ret.append(make_generator(os.path.expanduser(conf), setting)) + return ret + + +def make_generator(config_file, settings): + """ + Implement this function to setup your own hyperparameter scan. + Each config generator is created using a base config file (@config_file) + and a @settings dictionary that can be used to modify which parameters + are set. + """ + generator = ConfigGenerator( + base_config_file=config_file, + script_file="", # will be overriden in next step + ) + + # set basic settings + ConfigUtils.set_basic_settings( + generator=generator, + group=0, + source_dataset_path=settings["dataset_path"], + source_dataset_name=settings["dataset_name"], + generation_path=settings["generation_path"], + guarantee=GUARANTEE, + num_traj=NUM_TRAJ, + num_src_demos=10, + max_num_failures=25, + num_demo_to_render=10, + num_fail_demo_to_render=25, + verbose=False, + ) + + # set settings for subtasks + ConfigUtils.set_subtask_settings( + generator=generator, + group=0, + base_config_file=config_file, + select_src_per_subtask=settings["select_src_per_subtask"], + subtask_term_offset_range=settings["subtask_term_offset_range"], + selection_strategy=settings.get("selection_strategy", None), + selection_strategy_kwargs=settings.get("selection_strategy_kwargs", None), + # default settings: action noise 0.05, with 5 interpolation steps + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + verbose=False, + ) + + # optionally set env interface to use, and type + # generator.add_param( + # key="experiment.task.interface", + # name="", + # group=0, + # values=[settings["task_interface"]], + # ) + # generator.add_param( + # key="experiment.task.interface_type", + # name="", + # group=0, + # values=["robosuite"], + # ) + + # set task to generate data on + generator.add_param( + key="experiment.task.name", + name="task", + group=1, + values=settings["tasks"], + value_names=settings["task_names"], + ) + + # optionally set robot and gripper that will be used for data generation (robosuite-only) + if settings.get("robots", None) is not None: + generator.add_param( + key="experiment.task.robot", + name="r", + group=2, + values=settings["robots"], + ) + if settings.get("grippers", None) is not None: + generator.add_param( + key="experiment.task.gripper", + name="g", + group=2, + values=settings["grippers"], + ) + + # set observation collection settings + ConfigUtils.set_obs_settings( + generator=generator, + group=-1, + collect_obs=True, + camera_names=CAMERA_NAMES, + camera_height=CAMERA_SIZE[0], + camera_width=CAMERA_SIZE[1], + ) + + if DEBUG: + # set debug settings + ConfigUtils.set_debug_settings( + generator=generator, + group=-1, + ) + + # seed + generator.add_param( + key="experiment.seed", + name="", + group=1000000, + values=[1], + ) + + return generator + + +def main(): + + # make config generators + generators = make_generators(base_configs=BASE_CONFIGS) + + # maybe remove existing config directory + config_dir = CONFIG_DIR + if os.path.exists(config_dir): + ans = input("Non-empty dir at {} will be removed.\nContinue (y / n)? \n".format(config_dir)) + if ans != "y": + exit() + shutil.rmtree(config_dir) + + all_json_files, run_lines = config_generator_to_script_lines(generators, config_dir=config_dir) + + real_run_lines = [] + for line in run_lines: + line = line.strip().replace("train.py", "generate_dataset.py") + line += " --auto-remove-exp" + real_run_lines.append(line) + run_lines = real_run_lines + + print("configs") + print(json.dumps(all_json_files, indent=4)) + print("runs") + print(json.dumps(run_lines, indent=4)) + + +if __name__ == "__main__": + main() diff --git a/mimicgen/scripts/generate_core_training_configs.py b/mimicgen/scripts/generate_core_training_configs.py new file mode 100644 index 0000000..ef3754b --- /dev/null +++ b/mimicgen/scripts/generate_core_training_configs.py @@ -0,0 +1,337 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +We utilize robomimic's config generator class to easily generate policy training configs for the +core set of experiments in the paper. It can be modified easily to generate other +training configs. + +See https://robomimic.github.io/docs/tutorials/hyperparam_scan.html for more info. +""" +import os +import json +import shutil +import argparse + +import robomimic +from robomimic.utils.hyperparam_utils import ConfigGenerator + +import mimicgen +import mimicgen.utils.config_utils as ConfigUtils +from mimicgen.utils.file_utils import config_generator_to_script_lines + + +# set path to folder with mimicgen generated datasets +DATASET_DIR = "/tmp/minimal_datasets" + +# set base folder for where to generate new config files for training runs +CONFIG_DIR = "/tmp/minimal_training_configs" + +# set base folder for training outputs (model checkpoints, videos, logs) +OUTPUT_DIR = "/tmp/minimal_training_results" + +# path to base config +BASE_CONFIG = os.path.join(robomimic.__path__[0], "exps/templates/bc.json") + + +def make_generators(base_config, dataset_dir, output_dir): + """ + An easy way to make multiple config generators by using different + settings for each. + """ + all_settings = [ + # stack + dict( + dataset_paths=[ + os.path.join(dataset_dir, "stack", "demo_src_stack_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "stack", "demo_src_stack_targ_D1/demo.hdf5"), + ], + dataset_names=[ + "stack_D0", + "stack_D1", + ], + horizon=400, + ), + # stack_three + dict( + dataset_paths=[ + os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_targ_D1/demo.hdf5"), + ], + dataset_names=[ + "stack_three_D0", + "stack_three_D1", + ], + horizon=400, + ), + # square + dict( + dataset_paths=[ + os.path.join(dataset_dir, "square", "demo_src_square_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_targ_D2/demo.hdf5"), + ], + dataset_names=[ + "square_D0", + "square_D1", + "square_D2", + ], + horizon=400, + ), + # threading + dict( + dataset_paths=[ + os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D2/demo.hdf5"), + ], + dataset_names=[ + "threading_D0", + "threading_D1", + "threading_D2", + ], + horizon=400, + ), + # three_piece_assembly + dict( + dataset_paths=[ + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D2/demo.hdf5"), + ], + dataset_names=[ + "three_piece_assembly_D0", + "three_piece_assembly_D1", + "three_piece_assembly_D2", + ], + horizon=500, + ), + # coffee + dict( + dataset_paths=[ + os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D2/demo.hdf5"), + ], + dataset_names=[ + "coffee_D0", + "coffee_D1", + "coffee_D2", + ], + horizon=400, + ), + # coffee_preparation + dict( + dataset_paths=[ + os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_targ_D1/demo.hdf5"), + ], + dataset_names=[ + "coffee_preparation_D0", + "coffee_preparation_D1", + ], + horizon=800, + ), + # nut_assembly + dict( + dataset_paths=[ + os.path.join(dataset_dir, "nut_assembly", "demo_src_nut_assembly_targ_D0/demo.hdf5"), + ], + dataset_names=[ + "nut_assembly_D0", + ], + horizon=500, + ), + # pick_place + dict( + dataset_paths=[ + os.path.join(dataset_dir, "pick_place", "demo_src_pick_place_targ_D0/demo.hdf5"), + ], + dataset_names=[ + "pick_place_D0", + ], + horizon=500, + ), + # mug_cleanup + dict( + dataset_paths=[ + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_O1/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_O2/demo.hdf5"), + ], + dataset_names=[ + "mug_cleanup_D0", + "mug_cleanup_D1", + "mug_cleanup_O1", + "mug_cleanup_O2", + ], + horizon=500, + ), + # hammer_cleanup + dict( + dataset_paths=[ + os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_targ_D1/demo.hdf5"), + ], + dataset_names=[ + "hammer_cleanup_D0", + "hammer_cleanup_D1", + ], + horizon=500, + ), + # kitchen + dict( + dataset_paths=[ + os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_targ_D1/demo.hdf5"), + ], + dataset_names=[ + "kitchen_D0", + "kitchen_D1", + ], + horizon=800, + ), + ] + + ret = [] + for setting in all_settings: + for mod in ["low_dim", "image"]: + ret.append(make_gen(os.path.expanduser(base_config), setting, output_dir, mod)) + return ret + + +def make_gen(base_config, settings, output_dir, mod): + """ + Specify training configs to generate here. + """ + generator = ConfigGenerator( + base_config_file=base_config, + script_file="", # will be overriden in next step + base_exp_name="bc_rnn_{}".format(mod), + ) + + # set algo settings for bc-rnn + modality = mod + low_dim_keys = settings.get("low_dim_keys", None) + image_keys = settings.get("image_keys", None) + crop_size = settings.get("crop_size", None) + if modality == "low_dim": + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + if modality == "image": + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + if image_keys is None: + image_keys = ["agentview_image", "robot0_eye_in_hand_image"] + if crop_size is None: + crop_size = [76, 76] + assert len(crop_size) == 2 + + ConfigUtils.set_learning_settings_for_bc_rnn( + generator=generator, + group=-1, + modality=modality, + seq_length=10, + low_dim_keys=low_dim_keys, + image_keys=image_keys, + crop_size=crop_size, + ) + + # set dataset + generator.add_param( + key="train.data", + name="ds", + group=0, + values=settings["dataset_paths"], + value_names=settings["dataset_names"], + ) + + # rollout settings + generator.add_param( + key="experiment.rollout.horizon", + name="", + group=1, + values=[settings["horizon"]], + ) + + # output path + generator.add_param( + key="train.output_dir", + name="", + group=-1, + values=[ + output_dir, + ], + ) + + # num data workers 4 by default (for both low-dim and image) and cache mode "low_dim" + generator.add_param( + key="train.num_data_workers", + name="", + group=-1, + values=[4], + ) + generator.add_param( + key="train.hdf5_cache_mode", + name="", + group=-1, + values=["low_dim"], + ) + + # seed + generator.add_param( + key="train.seed", + name="seed", + group=100000, + values=[101], + ) + + return generator + + +def main(args): + + # make config generators + generators = make_generators(base_config=BASE_CONFIG, dataset_dir=args.dataset_dir, output_dir=args.output_dir) + + if os.path.exists(args.config_dir): + ans = input("Non-empty dir at {} will be removed.\nContinue (y / n)? \n".format(args.config_dir)) + if ans != "y": + exit() + shutil.rmtree(args.config_dir) + + all_json_files, run_lines = config_generator_to_script_lines(generators, config_dir=args.config_dir) + + run_lines = [line.strip() for line in run_lines] + + print("configs") + print(json.dumps(all_json_files, indent=4)) + print("runs") + print(json.dumps(run_lines, indent=4)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config_dir", + type=str, + default=os.path.expanduser(CONFIG_DIR), + help="set base folder for where to generate new config files for data generation", + ) + parser.add_argument( + "--dataset_dir", + type=str, + default=os.path.expanduser(DATASET_DIR), + help="set path to folder with datasets", + ) + parser.add_argument( + "--output_dir", + type=str, + default=os.path.expanduser(OUTPUT_DIR), + help="set base folder for where to generate new config files for data generation", + ) + + args = parser.parse_args() + main(args) diff --git a/mimicgen/scripts/generate_dataset.py b/mimicgen/scripts/generate_dataset.py new file mode 100644 index 0000000..937c73b --- /dev/null +++ b/mimicgen/scripts/generate_dataset.py @@ -0,0 +1,638 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Main data generation script. +""" + +import os +import shutil +import json +import time +import argparse +import traceback +import random +import imageio +import numpy as np +from copy import deepcopy + +import robomimic +from robomimic.utils.file_utils import get_env_metadata_from_dataset + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils +import mimicgen.utils.robomimic_utils as RobomimicUtils + +from mimicgen.configs import config_factory, MG_TaskSpec +from mimicgen.datagen.data_generator import DataGenerator +from mimicgen.env_interfaces.base import make_interface + + +def get_important_stats( + new_dataset_folder_path, + num_success, + num_failures, + num_attempts, + num_problematic, + start_time=None, + ep_length_stats=None, +): + """ + Return a summary of important stats to write to json. + + Args: + new_dataset_folder_path (str): path to folder that will contain generated dataset + num_success (int): number of successful trajectories generated + num_failures (int): number of failed trajectories + num_attempts (int): number of total attempts + num_problematic (int): number of problematic trajectories that failed due + to a specific exception that was caught + start_time (float or None): starting time for this run from time.time() + ep_length_stats (dict or None): if provided, should have entries that summarize + the episode length statistics over the successfully generated trajectories + + Returns: + important_stats (dict): dictionary with useful summary of statistics + """ + important_stats = dict( + generation_path=new_dataset_folder_path, + success_rate=((100. * num_success) / num_attempts), + failure_rate=((100. * num_failures) / num_attempts), + num_success=num_success, + num_failures=num_failures, + num_attempts=num_attempts, + num_problematic=num_problematic, + ) + if (ep_length_stats is not None): + important_stats.update(ep_length_stats) + if start_time is not None: + # add in time taken + important_stats["time spent (hrs)"] = "{:.2f}".format((time.time() - start_time) / 3600.) + return important_stats + + +def generate_dataset( + mg_config, + auto_remove_exp=False, + render=False, + video_path=None, + video_skip=5, + render_image_names=None, + pause_subtask=False, +): + """ + Main function to collect a new dataset with MimicGen. + + Args: + mg_config (MG_Config instance): MimicGen config object + + auto_remove_exp (bool): if True, will remove generation folder if it exists, else + user will be prompted to decide whether to keep existing folder or not + + render (bool): if True, render each data generation attempt on-screen + + video_path (str or None): if provided, render the data generation attempts to the + provided video path + + video_skip (int): skip every nth frame when writing video + + render_image_names (list of str or None): if provided, specify camera names to + use during on-screen / off-screen rendering to override defaults + + pause_subtask (bool): if True, pause after every subtask during generation, for + debugging. + """ + + # time this run + start_time = time.time() + + # check some args + write_video = (video_path is not None) + assert not (render and write_video) # either on-screen or video but not both + if pause_subtask: + assert render, "should enable on-screen rendering for pausing to be useful" + + if write_video: + # debug video - use same cameras as observations + if len(mg_config.obs.camera_names) > 0: + assert render_image_names is None + render_image_names = list(mg_config.obs.camera_names) + + # path to source dataset + source_dataset_path = os.path.expandvars(os.path.expanduser(mg_config.experiment.source.dataset_path)) + + # get environment metadata from dataset + env_meta = get_env_metadata_from_dataset(dataset_path=source_dataset_path) + + # set seed for generation + random.seed(mg_config.experiment.seed) + np.random.seed(mg_config.experiment.seed) + + # create new folder for this data generation run + base_folder = os.path.expandvars(os.path.expanduser(mg_config.experiment.generation.path)) + new_dataset_folder_name = mg_config.experiment.name + new_dataset_folder_path = os.path.join( + base_folder, + new_dataset_folder_name, + ) + print("\nData will be generated at: {}".format(new_dataset_folder_path)) + + # ensure dataset folder does not exist, and make new folder + exist_ok = False + if os.path.exists(new_dataset_folder_path): + if not auto_remove_exp: + ans = input("\nWARNING: dataset folder ({}) already exists! \noverwrite? (y/n)\n".format(new_dataset_folder_path)) + else: + ans = "y" + if ans == "y": + print("Removed old results folder at {}".format(new_dataset_folder_path)) + shutil.rmtree(new_dataset_folder_path) + else: + print("Keeping old dataset folder. Note that individual files may still be overwritten.") + exist_ok = True + os.makedirs(new_dataset_folder_path, exist_ok=exist_ok) + + # log terminal output to text file + RobomimicUtils.make_print_logger(txt_file=os.path.join(new_dataset_folder_path, 'log.txt')) + + # save config to disk + MG_FileUtils.write_json( + json_dic=mg_config, + json_path=os.path.join(new_dataset_folder_path, "mg_config.json"), + ) + + print("\n============= Config =============") + print(mg_config) + print("") + + # some paths that we will create inside our new dataset folder + + # new dataset that will be generated + new_dataset_path = os.path.join(new_dataset_folder_path, "demo.hdf5") + + # tmp folder that will contain per-episode hdf5s that were successful (they will be merged later) + tmp_dataset_folder_path = os.path.join(new_dataset_folder_path, "tmp") + os.makedirs(tmp_dataset_folder_path, exist_ok=exist_ok) + + # folder containing logs + json_log_path = os.path.join(new_dataset_folder_path, "logs") + os.makedirs(json_log_path, exist_ok=exist_ok) + + if mg_config.experiment.generation.keep_failed: + # new dataset for failed trajectories, and tmp folder for per-episode hdf5s that failed + new_failed_dataset_path = os.path.join(new_dataset_folder_path, "demo_failed.hdf5") + tmp_dataset_failed_folder_path = os.path.join(new_dataset_folder_path, "tmp_failed") + os.makedirs(tmp_dataset_failed_folder_path, exist_ok=exist_ok) + + # get list of source demonstration keys from source hdf5 + all_demos = MG_FileUtils.get_all_demos_from_dataset( + dataset_path=source_dataset_path, + filter_key=mg_config.experiment.source.filter_key, + start=mg_config.experiment.source.start, + n=mg_config.experiment.source.n, + ) + + # prepare args for creating simulation environment + + # auto-fill camera rendering info if not specified + if (write_video or render) and (render_image_names is None): + render_image_names = RobomimicUtils.get_default_env_cameras(env_meta=env_meta) + if render: + # on-screen rendering can only support one camera + assert len(render_image_names) == 1 + + # env args: cameras to use come from debug camera video to write, or from observation collection + camera_names = (mg_config.obs.camera_names if not write_video else render_image_names) + + # env args: don't use image obs when writing debug video + use_image_obs = ((mg_config.obs.collect_obs and (len(mg_config.obs.camera_names) > 0)) if not write_video else False) + use_depth_obs = False + + # simulation environment + env = RobomimicUtils.create_env( + env_meta=env_meta, + env_class=None, + env_name=mg_config.experiment.task.name, + robot=mg_config.experiment.task.robot, + gripper=mg_config.experiment.task.gripper, + camera_names=camera_names, + camera_height=mg_config.obs.camera_height, + camera_width=mg_config.obs.camera_width, + render=render, + render_offscreen=write_video, + use_image_obs=use_image_obs, + use_depth_obs=use_depth_obs, + ) + print("\n==== Using environment with the following metadata ====") + print(json.dumps(env.serialize(), indent=4)) + print("") + + # get information necessary to create env interface + env_interface_name, env_interface_type = MG_FileUtils.get_env_interface_info_from_dataset( + dataset_path=source_dataset_path, + demo_keys=all_demos, + ) + # possibly override from config + if mg_config.experiment.task.interface is not None: + env_interface_name = mg_config.experiment.task.interface + if mg_config.experiment.task.interface_type is not None: + env_interface_type = mg_config.experiment.task.interface_type + + # create environment interface to use during data generation + env_interface = make_interface( + name=env_interface_name, + interface_type=env_interface_type, + # NOTE: env_interface takes underlying simulation environment, not robomimic wrapper + env=env.base_env, + ) + print("Created environment interface: {}".format(env_interface)) + + # make sure we except the same exceptions that we would normally except during policy rollouts + exceptions_to_except = env.rollout_exceptions + + # get task spec object from config + task_spec_json_string = mg_config.task.task_spec.dump() + task_spec = MG_TaskSpec.from_json(json_string=task_spec_json_string) + + # make data generator object + data_generator = DataGenerator( + task_spec=task_spec, + dataset_path=source_dataset_path, + demo_keys=all_demos, + ) + + print("\n==== Created Data Generator ====") + print(data_generator) + print("") + + # we might write a video to show the data generation attempts + video_writer = None + if write_video: + video_writer = imageio.get_writer(video_path, fps=20) + + # data generation statistics + num_success = 0 + num_failures = 0 + num_attempts = 0 + num_problematic = 0 + ep_lengths = [] # episode lengths for successfully generated data + selected_src_demo_inds_all = [] # selected source demo index in @all_demos for each trial + selected_src_demo_inds_succ = [] # selected source demo index in @all_demos for each successful trial + + # we will keep generating data until @num_trials successes (if @guarantee_success) else @num_trials attempts + num_trials = mg_config.experiment.generation.num_trials + guarantee_success = mg_config.experiment.generation.guarantee + + while True: + + # generate trajectory + try: + generated_traj = data_generator.generate( + env=env, + env_interface=env_interface, + select_src_per_subtask=mg_config.experiment.generation.select_src_per_subtask, + transform_first_robot_pose=mg_config.experiment.generation.transform_first_robot_pose, + interpolate_from_last_target_pose=mg_config.experiment.generation.interpolate_from_last_target_pose, + render=render, + video_writer=video_writer, + video_skip=video_skip, + camera_names=render_image_names, + pause_subtask=pause_subtask, + ) + except exceptions_to_except as e: + # problematic trajectory - do not have this count towards our total number of attempts, and re-try + print("") + print("*" * 50) + print("WARNING: got rollout exception {}".format(e)) + print("*" * 50) + print("") + num_problematic += 1 + continue + + # remember selection of source demos for each subtask + selected_src_demo_inds_all.append(generated_traj["src_demo_inds"]) + + # check if generated trajectory was successful + success = bool(generated_traj["success"]) + + if success: + num_success += 1 + + # store successful demonstration + ep_lengths.append(generated_traj["actions"].shape[0]) + MG_FileUtils.write_demo_to_hdf5( + folder=tmp_dataset_folder_path, + env=env, + initial_state=generated_traj["initial_state"], + states=generated_traj["states"], + observations=(generated_traj["observations"] if mg_config.obs.collect_obs else None), + datagen_info=generated_traj["datagen_infos"], + actions=generated_traj["actions"], + src_demo_inds=generated_traj["src_demo_inds"], + src_demo_labels=generated_traj["src_demo_labels"], + ) + selected_src_demo_inds_succ.append(generated_traj["src_demo_inds"]) + else: + num_failures += 1 + + # check if this failure should be kept + if mg_config.experiment.generation.keep_failed and \ + (mg_config.experiment.max_num_failures is None) or (num_failures <= mg_config.experiment.max_num_failures): + + # save failed trajectory in separate folder + MG_FileUtils.write_demo_to_hdf5( + folder=tmp_dataset_failed_folder_path, + env=env, + initial_state=generated_traj["initial_state"], + states=generated_traj["states"], + observations=(generated_traj["observations"] if mg_config.obs.collect_obs else None), + datagen_info=generated_traj["datagen_infos"], + actions=generated_traj["actions"], + src_demo_inds=generated_traj["src_demo_inds"], + src_demo_labels=generated_traj["src_demo_labels"], + ) + + num_attempts += 1 + print("") + print("*" * 50) + print("trial {} success: {}".format(num_attempts, success)) + print("have {} successes out of {} trials so far".format(num_success, num_attempts)) + print("have {} failures out of {} trials so far".format(num_failures, num_attempts)) + print("*" * 50) + + # regularly log progress to disk every so often + if (num_attempts % mg_config.experiment.log_every_n_attempts) == 0: + + # get summary stats + summary_stats = get_important_stats( + new_dataset_folder_path=new_dataset_folder_path, + num_success=num_success, + num_failures=num_failures, + num_attempts=num_attempts, + num_problematic=num_problematic, + start_time=start_time, + ep_length_stats=None, + ) + + # write stats to disk + max_digits = len(str(num_trials * 1000)) + 1 # assume we will never have lower than 0.1% data generation SR + json_file_path = os.path.join(json_log_path, "attempt_{}_succ_{}_rate_{}.json".format( + str(num_attempts).zfill(max_digits), # pad with leading zeros for ordered list of jsons in directory + num_success, + np.round((100. * num_success) / num_attempts, 2), + )) + MG_FileUtils.write_json(json_dic=summary_stats, json_path=json_file_path) + + # termination condition is on enough successes if @guarantee_success or enough attempts otherwise + check_val = num_success if guarantee_success else num_attempts + if check_val >= num_trials: + break + + if write_video: + video_writer.close() + + # merge all new created files + print("\nFinished data generation. Merging per-episode hdf5s together...\n") + MG_FileUtils.merge_all_hdf5( + folder=tmp_dataset_folder_path, + new_hdf5_path=new_dataset_path, + delete_folder=True, + ) + if mg_config.experiment.generation.keep_failed: + MG_FileUtils.merge_all_hdf5( + folder=tmp_dataset_failed_folder_path, + new_hdf5_path=new_failed_dataset_path, + delete_folder=True, + ) + + # get episode length statistics + ep_length_stats = None + if len(ep_lengths) > 0: + ep_lengths = np.array(ep_lengths) + ep_length_mean = float(np.mean(ep_lengths)) + ep_length_std = float(np.std(ep_lengths)) + ep_length_max = int(np.max(ep_lengths)) + ep_length_3std = int(np.ceil(ep_length_mean + 3. * ep_length_std)) + ep_length_stats = dict( + ep_length_mean=ep_length_mean, + ep_length_std=ep_length_std, + ep_length_max=ep_length_max, + ep_length_3std=ep_length_3std, + ) + + stats = get_important_stats( + new_dataset_folder_path=new_dataset_folder_path, + num_success=num_success, + num_failures=num_failures, + num_attempts=num_attempts, + num_problematic=num_problematic, + start_time=start_time, + ep_length_stats=ep_length_stats, + ) + print("\nStats Summary") + print(json.dumps(stats, indent=4)) + + # maybe render videos + if mg_config.experiment.render_video: + if (num_success > 0): + playback_video_path = os.path.join(new_dataset_folder_path, "playback_{}.mp4".format(new_dataset_folder_name)) + num_render = mg_config.experiment.num_demo_to_render + print("Rendering successful trajectories...") + RobomimicUtils.make_dataset_video( + dataset_path=new_dataset_path, + video_path=playback_video_path, + num_render=num_render, + ) + else: + print("\n" + "*" * 80) + print("\nWARNING: skipping dataset video creation since no successes") + print("\n" + "*" * 80 + "\n") + if mg_config.experiment.generation.keep_failed: + if (num_failures > 0): + playback_video_path = os.path.join(new_dataset_folder_path, "playback_{}_failed.mp4".format(new_dataset_folder_name)) + num_render = mg_config.experiment.num_fail_demo_to_render + print("Rendering failure trajectories...") + RobomimicUtils.make_dataset_video( + dataset_path=new_failed_dataset_path, + video_path=playback_video_path, + num_render=num_render, + ) + else: + print("\n" + "*" * 80) + print("\nWARNING: skipping dataset video creation since no failures") + print("\n" + "*" * 80 + "\n") + + # return some summary info + final_important_stats = get_important_stats( + new_dataset_folder_path=new_dataset_folder_path, + num_success=num_success, + num_failures=num_failures, + num_attempts=num_attempts, + num_problematic=num_problematic, + start_time=start_time, + ep_length_stats=ep_length_stats, + ) + + # write stats to disk + json_file_path = os.path.join(new_dataset_folder_path, "important_stats.json") + MG_FileUtils.write_json(json_dic=final_important_stats, json_path=json_file_path) + + # NOTE: we are not currently saving the choice of source human demonstrations for each trial, + # but you can do that if you wish -- the information is stored in @selected_src_demo_inds_all + # and @selected_src_demo_inds_succ + + return final_important_stats + + +def main(args): + + # load config object + with open(args.config, "r") as f: + ext_cfg = json.load(f) + # config generator from robomimic generates this part of config unused by MimicGen + if "meta" in ext_cfg: + del ext_cfg["meta"] + mg_config = config_factory(ext_cfg["name"], config_type=ext_cfg["type"]) + + # update config with external json - this will throw errors if + # the external config has keys not present in the base config + with mg_config.values_unlocked(): + mg_config.update(ext_cfg) + + # We assume that the external config specifies all subtasks, so + # delete any subtasks not in the external config. + source_subtasks = set(mg_config.task.task_spec.keys()) + new_subtasks = set(ext_cfg["task"]["task_spec"].keys()) + for subtask in (source_subtasks - new_subtasks): + print("deleting subtask {} in original config".format(subtask)) + del mg_config.task.task_spec[subtask] + + # maybe override some settings + if args.task_name is not None: + mg_config.experiment.task.name = args.task_name + + if args.source is not None: + mg_config.experiment.source.dataset_path = args.source + + if args.folder is not None: + mg_config.experiment.generation.path = args.folder + + if args.num_demos is not None: + mg_config.experiment.generation.num_trials = args.num_demos + + if args.seed is not None: + mg_config.experiment.seed = args.seed + + # maybe modify config for debugging purposes + if args.debug: + # shrink length of generation to test whether this run is likely to crash + mg_config.experiment.source.n = 3 + mg_config.experiment.generation.guarantee = False + mg_config.experiment.generation.num_trials = 2 + + # send output to a temporary directory + mg_config.experiment.generation.path = "/tmp/tmp_mimicgen" + + # catch error during generation and print it + res_str = "finished run successfully!" + important_stats = None + try: + important_stats = generate_dataset( + mg_config=mg_config, + auto_remove_exp=args.auto_remove_exp, + render=args.render, + video_path=args.video_path, + video_skip=args.video_skip, + render_image_names=args.render_image_names, + pause_subtask=args.pause_subtask, + ) + except Exception as e: + res_str = "run failed with error:\n{}\n\n{}".format(e, traceback.format_exc()) + print(res_str) + if important_stats is not None: + important_stats = json.dumps(important_stats, indent=4) + print("\nFinal Data Generation Stats") + print(important_stats) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + required=True, + help="path to MimicGen config json", + ) + parser.add_argument( + "--debug", + action='store_true', + help="set this flag to run a quick generation run for debugging purposes", + ) + parser.add_argument( + "--auto-remove-exp", + action='store_true', + help="force delete the experiment folder if it exists" + ) + parser.add_argument( + "--render", + action='store_true', + help="render each data generation attempt on-screen", + ) + parser.add_argument( + "--video_path", + type=str, + default=None, + help="if provided, render the data generation attempts to the provided video path", + ) + parser.add_argument( + "--video_skip", + type=int, + default=5, + help="skip every nth frame when writing video", + ) + parser.add_argument( + "--render_image_names", + type=str, + nargs='+', + default=None, + help="(optional) camera name(s) / image observation(s) to use for rendering on-screen or to video. Default is" + "None, which corresponds to a predefined camera for each env type", + ) + parser.add_argument( + "--pause_subtask", + action='store_true', + help="pause after every subtask during generation for debugging - only useful with render flag", + ) + parser.add_argument( + "--source", + type=str, + help="path to source dataset, to override the one in the config", + ) + parser.add_argument( + "--task_name", + type=str, + help="environment name to use for data generation, to override the one in the config", + default=None, + ) + parser.add_argument( + "--folder", + type=str, + help="folder that will be created with new data, to override the one in the config", + default=None, + ) + parser.add_argument( + "--num_demos", + type=int, + help="number of demos to generate, or attempt to generate, to override the one in the config", + default=None, + ) + parser.add_argument( + "--seed", + type=int, + help="seed, to override the one in the config", + default=None, + ) + + args = parser.parse_args() + main(args) diff --git a/mimicgen/scripts/generate_robot_transfer_configs.py b/mimicgen/scripts/generate_robot_transfer_configs.py new file mode 100644 index 0000000..b8f0e8e --- /dev/null +++ b/mimicgen/scripts/generate_robot_transfer_configs.py @@ -0,0 +1,225 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +We utilize robomimic's config generator class to easily generate data generation configs for the +robot transfer set of experiments in the paper, where we use source data collected on the Panda arm +to generate demonstrations for other robot arms. It can be modified easily to generate other configs. + +The global variables at the top of the file should be configured manually. + +See https://robomimic.github.io/docs/tutorials/hyperparam_scan.html for more info. +""" +import os +import json +import shutil + +import robomimic +from robomimic.utils.hyperparam_utils import ConfigGenerator + +import mimicgen +import mimicgen.utils.config_utils as ConfigUtils +from mimicgen.utils.file_utils import config_generator_to_script_lines + + +# set path to folder containing src datasets +import mimicgen_envs +SRC_DATA_DIR = os.path.join(mimicgen_envs.__path__[0], "../datasets/source") + +# set base folder for where to copy each base config and generate new config files for data generation +CONFIG_DIR = "/tmp/robot_configs" + +# set base folder for newly generated datasets +OUTPUT_FOLDER = "/tmp/robot_datasets" + +# number of trajectories to generate (or attempt to generate) +NUM_TRAJ = 1000 + +# whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) +GUARANTEE = True + +# whether to run a quick debug run instead of full generation +DEBUG = False + +# camera settings for collecting observations +CAMERA_NAMES = ["agentview", "robot0_eye_in_hand"] +CAMERA_SIZE = (84, 84) + +# path to base config(s) +BASE_BASE_CONFIG_PATH = os.path.join(mimicgen.__path__[0], "exps/templates/robosuite") +BASE_CONFIGS = [ + os.path.join(BASE_BASE_CONFIG_PATH, "square.json"), + os.path.join(BASE_BASE_CONFIG_PATH, "threading.json"), +] + + +def make_generators(base_configs): + """ + An easy way to make multiple config generators by using different + settings for each. + """ + all_settings = [ + # square + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "square.hdf5"), + dataset_name="square", + generation_path="{}/square".format(OUTPUT_FOLDER), + tasks=["Square_D0", "Square_D1"], + task_names=["D0", "D1"], + robots=["Sawyer", "IIWA", "UR5e"], + grippers=["RethinkGripper", "Robotiq85Gripper", "Robotiq85Gripper"], + select_src_per_subtask=False, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + subtask_term_offset_range=[[10, 20], None], + ), + # threading + dict( + dataset_path=os.path.join(SRC_DATA_DIR, "threading.hdf5"), + dataset_name="threading", + generation_path="{}/threading".format(OUTPUT_FOLDER), + tasks=["Threading_D0", "Threading_D1"], + task_names=["D0", "D1"], + robots=["Sawyer", "IIWA", "UR5e"], + grippers=["RethinkGripper", "Robotiq85Gripper", "Robotiq85Gripper"], + select_src_per_subtask=False, + selection_strategy="random", + selection_strategy_kwargs=None, + subtask_term_offset_range=[[5, 10], None], + ), + ] + + assert len(base_configs) == len(all_settings) + ret = [] + for conf, setting in zip(base_configs, all_settings): + ret.append(make_generator(os.path.expanduser(conf), setting)) + return ret + + +def make_generator(config_file, settings): + """ + Implement this function to setup your own hyperparameter scan. + Each config generator is created using a base config file (@config_file) + and a @settings dictionary that can be used to modify which parameters + are set. + """ + generator = ConfigGenerator( + base_config_file=config_file, + script_file="", # will be overriden in next step + ) + + # set basic settings + ConfigUtils.set_basic_settings( + generator=generator, + group=0, + source_dataset_path=settings["dataset_path"], + source_dataset_name=settings["dataset_name"], + generation_path=settings["generation_path"], + guarantee=GUARANTEE, + num_traj=NUM_TRAJ, + num_src_demos=10, + max_num_failures=25, + num_demo_to_render=10, + num_fail_demo_to_render=25, + verbose=False, + ) + + # set settings for subtasks + ConfigUtils.set_subtask_settings( + generator=generator, + group=0, + base_config_file=config_file, + select_src_per_subtask=settings["select_src_per_subtask"], + subtask_term_offset_range=settings["subtask_term_offset_range"], + selection_strategy=settings.get("selection_strategy", None), + selection_strategy_kwargs=settings.get("selection_strategy_kwargs", None), + # default settings: action noise 0.05, with 5 interpolation steps + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + verbose=False, + ) + + # set task to generate data on + generator.add_param( + key="experiment.task.name", + name="task", + group=1, + values=settings["tasks"], + value_names=settings["task_names"], + ) + + if settings.get("robots", None) is not None: + generator.add_param( + key="experiment.task.robot", + name="robot", + group=2, + values=settings["robots"], + ) + if settings.get("grippers", None) is not None: + generator.add_param( + key="experiment.task.gripper", + name="gripper", + group=2, + values=settings["grippers"], + ) + + # set observation collection settings + ConfigUtils.set_obs_settings( + generator=generator, + group=-1, + collect_obs=True, + camera_names=CAMERA_NAMES, + camera_height=CAMERA_SIZE[0], + camera_width=CAMERA_SIZE[1], + ) + + if DEBUG: + # set debug settings + ConfigUtils.set_debug_settings( + generator=generator, + group=-1, + ) + + # seed + generator.add_param( + key="experiment.seed", + name="", + group=1000000, + values=[1], + ) + + return generator + + +def main(): + + # make config generators + generators = make_generators(base_configs=BASE_CONFIGS) + + # maybe remove existing config directory + config_dir = CONFIG_DIR + if os.path.exists(config_dir): + ans = input("Non-empty dir at {} will be removed.\nContinue (y / n)? \n".format(config_dir)) + if ans != "y": + exit() + shutil.rmtree(config_dir) + + all_json_files, run_lines = config_generator_to_script_lines(generators, config_dir=config_dir) + + real_run_lines = [] + for line in run_lines: + line = line.strip().replace("train.py", "generate_dataset.py") + line += " --auto-remove-exp" + real_run_lines.append(line) + run_lines = real_run_lines + + print("configs") + print(json.dumps(all_json_files, indent=4)) + print("runs") + print(json.dumps(run_lines, indent=4)) + + +if __name__ == "__main__": + main() diff --git a/mimicgen_envs/scripts/generate_training_configs.py b/mimicgen/scripts/generate_training_configs_for_public_datasets.py similarity index 96% rename from mimicgen_envs/scripts/generate_training_configs.py rename to mimicgen/scripts/generate_training_configs_for_public_datasets.py index 00d1bc8..7f4bd16 100644 --- a/mimicgen_envs/scripts/generate_training_configs.py +++ b/mimicgen/scripts/generate_training_configs_for_public_datasets.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -12,8 +12,8 @@ from robomimic.config import config_factory, Config from robomimic.scripts.generate_paper_configs import modify_config_for_default_low_dim_exp, modify_config_for_default_image_exp -import mimicgen_envs -from mimicgen_envs import DATASET_REGISTRY +import mimicgen +from mimicgen import DATASET_REGISTRY def set_obs_config(config, obs_modality): @@ -230,7 +230,7 @@ def generate_all_configs( parser.add_argument( "--config_dir", type=str, - default=os.path.join(mimicgen_envs.__path__[0], "exps/paper"), + default=os.path.join(mimicgen.__path__[0], "exps/paper"), help="Directory where generated configs will be placed. Defaults to 'paper' subfolder in exps folder of repository", ) @@ -238,7 +238,7 @@ def generate_all_configs( parser.add_argument( "--dataset_dir", type=str, - default=os.path.join(mimicgen_envs.__path__[0], "../datasets"), + default=os.path.join(mimicgen.__path__[0], "../datasets"), help="Base dataset directory for released datasets. Defaults to datasets folder in repository.", ) @@ -246,7 +246,7 @@ def generate_all_configs( parser.add_argument( "--output_dir", type=str, - default=os.path.join(mimicgen_envs.__path__[0], "../training_results"), + default=os.path.join(mimicgen.__path__[0], "../training_results"), help="Base output directory for all training runs that will be written to generated configs. Defaults to training_results folder in repository", ) diff --git a/mimicgen_envs/scripts/get_reset_videos.py b/mimicgen/scripts/get_reset_videos.py similarity index 98% rename from mimicgen_envs/scripts/get_reset_videos.py rename to mimicgen/scripts/get_reset_videos.py index c56c2b9..571cbfc 100644 --- a/mimicgen_envs/scripts/get_reset_videos.py +++ b/mimicgen/scripts/get_reset_videos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA Source Code License [see LICENSE for details]. @@ -15,7 +15,7 @@ import robosuite from robosuite.controllers import load_controller_config -import mimicgen_envs +import mimicgen # base output folder diff --git a/mimicgen/scripts/get_source_info.py b/mimicgen/scripts/get_source_info.py new file mode 100644 index 0000000..34e6df0 --- /dev/null +++ b/mimicgen/scripts/get_source_info.py @@ -0,0 +1,60 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Helper script to report source dataset information. It verifies that the dataset has a +"datagen_info" field for the first episode and prints its structure. +""" +import h5py +import argparse + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--dataset", + type=str, + help="path to hdf5 dataset", + required=True, + ) + args = parser.parse_args() + + dataset_path = args.dataset + + # get first demonstration + first_demo_key = MG_FileUtils.get_all_demos_from_dataset( + dataset_path=dataset_path, + filter_key=None, + start=None, + n=1, + )[0] + f = h5py.File(dataset_path, "r") + ep_grp = f["data/{}".format(first_demo_key)] + + # verify datagen info exists + assert "datagen_info" in ep_grp, "Could not find MimicGen metadata in dataset {}. Ensure you have run prepare_src_dataset.py on this hdf5".format(dataset_path) + + # environment interface information + env_interface_name = ep_grp["datagen_info"].attrs["env_interface_name"] + env_interface_type = ep_grp["datagen_info"].attrs["env_interface_type"] + + print("\nEnvironment Interface: {}".format(env_interface_name)) + print("Environment Interface Type: {}".format(env_interface_type)) + + # structure of datagen info + ep_datagen_info = ep_grp["datagen_info"] + + print("\nStructure of datagen_info in episode {}:".format(first_demo_key)) + for k in ep_datagen_info: + if k in ["object_poses", "subtask_term_signals"]: + print(" {}:".format(k)) + for k2 in ep_datagen_info[k]: + print(" {}: shape {}".format(k2, ep_datagen_info[k][k2].shape)) + else: + print(" {}: shape {}".format(k, ep_datagen_info[k].shape)) + print("") + + f.close() diff --git a/mimicgen/scripts/merge_hdf5.py b/mimicgen/scripts/merge_hdf5.py new file mode 100644 index 0000000..600f782 --- /dev/null +++ b/mimicgen/scripts/merge_hdf5.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Script to merge all hdf5s if scripts/generate_dataset.py is incomplete, +and doesn't make it to the line that merges all the hdf5s. +""" + +import os +import shutil +import json +import h5py +import argparse +import imageio + +import numpy as np + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils +from mimicgen.configs import config_factory +from mimicgen.scripts.generate_dataset import make_dataset_video, postprocess_motion_planning_dataset + + +def merge_hdf5s(args): + """ + Main function to collect a new dataset using trajectory transforms from + an existing dataset. + """ + have_config = (args.config is not None) + have_folder = (args.folder is not None) + assert have_config or have_folder + assert not (have_config and have_folder) + + folder_path = args.folder + if have_config: + # get folder path from config + + # load config object + with open(args.config, "r") as f: + ext_cfg = json.load(f) + # config generator from robomimic generates this part of config unused by MimicGen + if "meta" in ext_cfg: + del ext_cfg["meta"] + mg_config = config_factory(ext_cfg["name"], config_type=ext_cfg["type"]) + # update config with external json - this will throw errors if + # the external config has keys not present in the base config + with mg_config.values_unlocked(): + mg_config.update(ext_cfg) + + base_folder = os.path.expandvars(os.path.expanduser(mg_config.experiment.generation.path)) # path where new folder will be generated + new_dataset_folder_name = mg_config.experiment.name # name of folder to generate + folder_path = os.path.join( + base_folder, + new_dataset_folder_name, + ) + + path_to_hdf5s = os.path.join(folder_path, "tmp") + path_to_new_hdf5 = os.path.join(folder_path, "demo.hdf5") + path_to_failed_hdf5s = os.path.join(folder_path, "tmp_failed") + path_to_new_failed_hdf5 = os.path.join(folder_path, "demo_failed.hdf5") + + assert os.path.exists(path_to_hdf5s) + merge_failures = os.path.exists(path_to_failed_hdf5s) + + # merge all new created files + num_success = MG_FileUtils.merge_all_hdf5( + folder=path_to_hdf5s, + new_hdf5_path=path_to_new_hdf5, + delete_folder=args.delete, + dry_run=args.count, + ) + print("NUM SUCCESS: {}".format(num_success)) + if merge_failures: + num_failures = MG_FileUtils.merge_all_hdf5( + folder=path_to_failed_hdf5s, + new_hdf5_path=path_to_new_failed_hdf5, + delete_folder=args.delete, + dry_run=args.count, + ) + print("NUM FAILURE: {}".format(num_failures)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + default=None, + help="path to json for dataset generation, used to find dataset folder", + ) + parser.add_argument( + "--folder", + type=str, + default=None, + help="path to dataset folder that is generated by scripts/generate_dataset.py", + ) + parser.add_argument( + "--count", + action='store_true', + help="if provided just count the number of demos instead of merging all of them", + ) + parser.add_argument( + "--delete", + action='store_true', + help="if provided, delete the tmp directories instead of saving them", + ) + + args = parser.parse_args() + merge_hdf5s(args) diff --git a/mimicgen/scripts/prepare_all_src_datasets.sh b/mimicgen/scripts/prepare_all_src_datasets.sh new file mode 100644 index 0000000..aa2ff7f --- /dev/null +++ b/mimicgen/scripts/prepare_all_src_datasets.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# Store commands used to prepare source datasets for each environment. + + +# coffee +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/coffee.hdf5 \ + --env_interface MG_Coffee \ + --env_interface_type robosuite + +# coffee_preparation +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/coffee_preparation.hdf5 \ + --env_interface MG_CoffeePreparation \ + --env_interface_type robosuite + +# hammer_cleanup +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/hammer_cleanup.hdf5 \ + --env_interface MG_HammerCleanup \ + --env_interface_type robosuite + +# kitchen +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/kitchen.hdf5 \ + --env_interface MG_Kitchen \ + --env_interface_type robosuite + +# mug_cleanup +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/mug_cleanup.hdf5 \ + --env_interface MG_MugCleanup \ + --env_interface_type robosuite + +# nut_assembly +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/nut_assembly.hdf5 \ + --env_interface MG_NutAssembly \ + --env_interface_type robosuite + +# pick_place +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/pick_place.hdf5 \ + --env_interface MG_PickPlace \ + --env_interface_type robosuite + +# square +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/square.hdf5 \ + --env_interface MG_Square \ + --env_interface_type robosuite + +# stack +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/stack.hdf5 \ + --env_interface MG_Stack \ + --env_interface_type robosuite + +# stack_three +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/stack_three.hdf5 \ + --env_interface MG_StackThree \ + --env_interface_type robosuite + +# threading +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/threading.hdf5 \ + --env_interface MG_Threading \ + --env_interface_type robosuite + +# three_piece_assembly +python prepare_src_dataset.py \ + --dataset ../../../mimicgen_environments/datasets/source/three_piece_assembly.hdf5 \ + --env_interface MG_ThreePieceAssembly \ + --env_interface_type robosuite diff --git a/mimicgen/scripts/prepare_src_dataset.py b/mimicgen/scripts/prepare_src_dataset.py new file mode 100644 index 0000000..7e8b339 --- /dev/null +++ b/mimicgen/scripts/prepare_src_dataset.py @@ -0,0 +1,262 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Script to extract information needed for data generation from low-dimensional simulation states +in a source dataset and add it to the source dataset. Basically a stripped down version of +dataset_states_to_obs.py script in the robomimic codebase, with a handful of modifications. + +Example usage: + + # prepare a source dataset collected on robosuite Stack task + python prepare_src_dataset.py --dataset /path/to/stack.hdf5 --env_interface MG_Stack --env_interface_type robosuite + + # prepare a source dataset collected on robosuite Square task, but only use first 10 demos, and write output to new hdf5 + python prepare_src_dataset.py --dataset /path/to/square.hdf5 --env_interface MG_Square --env_interface_type robosuite --n 10 --output /tmp/square_new.hdf5 +""" +import os +import shutil +import json +import h5py +import argparse +import numpy as np +from copy import deepcopy +from tqdm import tqdm + +import robomimic +import robomimic.utils.tensor_utils as TensorUtils +import robomimic.utils.file_utils as FileUtils +import robomimic.utils.env_utils as EnvUtils +from robomimic.envs.env_base import EnvBase + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils +from mimicgen.env_interfaces.base import make_interface + + +def extract_datagen_info_from_trajectory( + env, + env_interface, + initial_state, + states, + actions, +): + """ + Helper function to extract observations, rewards, and dones along a trajectory using + the simulator environment. + + Args: + env (instance of robomimic EnvBase): environment + + env_interface (MG_EnvInterface instance): environment interface for some data generation operations + + initial_state (dict): initial simulation state to load + + states (np.array): array of simulation states to load to extract information + + actions (np.array): array of actions + + Returns: + datagen_infos (dict): the datagen info objects across all timesteps represented as a dictionary of + numpy arrays, for easy writes to an hdf5 + """ + assert isinstance(env, EnvBase) + assert len(states) == actions.shape[0] + + # load the initial state + env.reset() + env.reset_to(initial_state) + + all_datagen_infos = [] + traj_len = len(states) + for t in range(traj_len): + # reset to state + env.reset_to({"states" : states[t]}) + + # extract datagen info as a dictionary + datagen_info = env_interface.get_datagen_info(action=actions[t]).to_dict() + all_datagen_infos.append(datagen_info) + + # convert list of dict to dict of list for datagen info dictionaries (for convenient writes to hdf5 dataset) + all_datagen_infos = TensorUtils.list_of_flat_dict_to_dict_of_list(all_datagen_infos) + + for k in all_datagen_infos: + if k in ["object_poses", "subtask_term_signals"]: + # convert list of dict to dict of list again + all_datagen_infos[k] = TensorUtils.list_of_flat_dict_to_dict_of_list(all_datagen_infos[k]) + # list to numpy array + for k2 in all_datagen_infos[k]: + all_datagen_infos[k][k2] = np.array(all_datagen_infos[k][k2]) + else: + # list to numpy array + all_datagen_infos[k] = np.array(all_datagen_infos[k]) + + return all_datagen_infos + + +def prepare_src_dataset( + dataset_path, + env_interface_name, + env_interface_type, + filter_key=None, + n=None, + output_path=None, +): + """ + Adds DatagenInfo object instance for each timestep in each source demonstration trajectory + and stores it under the "datagen_info" key for each episode. Also store the @env_interface_name + and @env_interface_type used in the attribute of each key. This information is used during + MimicGen data generation. + + Args: + dataset_path (str): path to input hdf5 dataset, which will be modified in-place unless + @output_path is provided + + env_interface_name (str): name of environment interface class to use for this source dataset + + env_interface_type (str): type of environment interface to use for this source dataset + + filter_key (str or None): name of filter key + + n (int or None): if provided, stop after n trajectories are processed + + output_path (str or None): if provided, write a new hdf5 here instead of modifying the + original dataset in-place + """ + + # maybe write to new file instead of modifying existing file in-place + if output_path is not None: + shutil.copy(dataset_path, output_path) + dataset_path = output_path + + # create environment that was to collect source demonstrations + env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=dataset_path) + env = EnvUtils.create_env_for_data_processing( + env_meta=env_meta, + camera_names=[], + camera_height=84, + camera_width=84, + reward_shaping=False, + ) + print("") + print("==== Using environment with the following metadata ====") + print(json.dumps(env.serialize(), indent=4)) + print("") + + # create environment interface for us to grab relevant information from simulation at each timestep + env_interface = make_interface( + name=env_interface_name, + interface_type=env_interface_type, + # NOTE: env_interface takes underlying simulation environment, not robomimic wrapper + env=env.base_env, + ) + print("Created environment interface: {}".format(env_interface)) + print("") + + # some operations are env-type-specific + is_robosuite_env = EnvUtils.is_robosuite_env(env_meta) + + # get list of source demonstration keys from source hdf5 + demos = MG_FileUtils.get_all_demos_from_dataset( + dataset_path=dataset_path, + filter_key=filter_key, + start=None, + n=n, + ) + + print("File that will be modified with datagen info: {}".format(dataset_path)) + print("") + + # open file to modify it + f = h5py.File(dataset_path, "a") + + total_samples = 0 + for ind in tqdm(range(len(demos))): + ep = demos[ind] + ep_grp = f["data/{}".format(ep)] + + # prepare states to reload from + states = ep_grp["states"][()] + initial_state = dict(states=states[0]) + if is_robosuite_env: + initial_state["model"] = ep_grp.attrs["model_file"] + + # extract datagen info + actions = ep_grp["actions"][()] + datagen_info = extract_datagen_info_from_trajectory( + env=env, + env_interface=env_interface, + initial_state=initial_state, + states=states, + actions=actions, + ) + + # delete old dategen info if it already exists + if "datagen_info" in ep_grp: + del ep_grp["datagen_info"] + + for k in datagen_info: + if k in ["object_poses", "subtask_term_signals"]: + # handle dict + for k2 in datagen_info[k]: + ep_grp.create_dataset("datagen_info/{}/{}".format(k, k2), data=np.array(datagen_info[k][k2])) + else: + ep_grp.create_dataset("datagen_info/{}".format(k), data=np.array(datagen_info[k])) + + # remember the env interface used too + ep_grp["datagen_info"].attrs["env_interface_name"] = env_interface_name + ep_grp["datagen_info"].attrs["env_interface_type"] = env_interface_type + + print("Modified {} trajectories to include datagen info.".format(len(demos))) + f.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--dataset", + type=str, + required=True, + help="path to input hdf5 dataset, which will be modified in-place", + ) + parser.add_argument( + "--env_interface", + type=str, + required=True, + help="name of environment interface class to use for this source dataset", + ) + parser.add_argument( + "--env_interface_type", + type=str, + required=True, + help="type of environment interface to use for this source dataset", + ) + parser.add_argument( + "--n", + type=int, + default=None, + help="(optional) stop after n trajectories are processed", + ) + parser.add_argument( + "--filter_key", + type=str, + default=None, + help="(optional) name of filter key, to select a subset of demo keys in the source hdf5", + ) + parser.add_argument( + "--output", + type=str, + default=None, + help="(optional) path to output hdf5 dataset, instead of modifying existing dataset in-place", + ) + + args = parser.parse_args() + prepare_src_dataset( + dataset_path=args.dataset, + env_interface_name=args.env_interface, + env_interface_type=args.env_interface_type, + filter_key=args.filter_key, + n=args.n, + output_path=args.output, + ) diff --git a/mimicgen/scripts/visualize_subtasks.py b/mimicgen/scripts/visualize_subtasks.py new file mode 100644 index 0000000..5fbc0e5 --- /dev/null +++ b/mimicgen/scripts/visualize_subtasks.py @@ -0,0 +1,357 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A script to visualize each subtask in a source demonstration. This is a useful way to +debug the subtask termination signals in a set of source demonstrations, as well as +the choice of maximum subtask termination offsets. + +Examples: + + # render on-screen + python visualize_subtasks.py --dataset /path/to/demo.hdf5 --config /path/to/config.json --render + + # render to video + python visualize_subtasks.py --dataset /path/to/demo.hdf5 --config /path/to/config.json --video_path /path/to/video.mp4 + + # specify subtask information manually instead of using a config + python visualize_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 --offsets 10 10 10 --render + +""" + +import os +import sys +import json +import h5py +import argparse +import imageio +import numpy as np + +import robomimic +import robomimic.utils.obs_utils as ObsUtils +import robomimic.utils.env_utils as EnvUtils +from robomimic.envs.env_base import EnvBase +from robomimic.utils.file_utils import get_env_metadata_from_dataset + +import mimicgen +import mimicgen.utils.file_utils as MG_FileUtils +import mimicgen.utils.robomimic_utils as RobomimicUtils +from mimicgen.utils.misc_utils import add_red_border_to_frame +from mimicgen.configs import MG_TaskSpec + + +def visualize_subtasks_with_env( + env, + initial_state, + states, + subtask_end_indices, + render=False, + video_writer=None, + video_skip=5, + camera_names=None, +): + """ + Helper function to visualize each subtask in a trajectory using the simulator environment. + If using on-screen rendering, the script will pause for input at the end of each subtask. If + writing to a video, each subtask will toggle between having a red border around each + frame and no border in the video. + + Args: + env (instance of EnvBase): environment + initial_state (dict): initial simulation state to load + states (list): list of simulation states to load + subtask_end_indices (list): list containing the end index for each subtask + render (bool): if True, render on-screen + video_writer (imageio writer): video writer + video_skip (int): determines rate at which environment frames are written to video + camera_names (list): determines which camera(s) are used for rendering. Pass more than + one to output a video with multiple camera views concatenated horizontally. + """ + assert isinstance(env, EnvBase) + + write_video = (video_writer is not None) + video_count = 0 + assert not (render and write_video) + assert render or write_video + + # load the initial state + env.reset() + env.reset_to(initial_state) + traj_len = len(states) + + cur_subtask_ind = 0 + should_add_border_to_frame = False + for i in range(traj_len): + # reset to state + env.reset_to({"states" : states[i]}) + + # whether we are on last index of current subtask + is_last_subtask_ind = (i == subtask_end_indices[cur_subtask_ind] - 1) + + # on-screen render + if render: + env.render(mode="human", camera_name=camera_names[0]) + + if is_last_subtask_ind: + # pause on last index of current subtask + input("Pausing after subtask {} execution. Press any key to continue...".format(cur_subtask_ind)) + cur_subtask_ind += 1 + + # video render + if write_video: + if video_count % video_skip == 0: + video_img = [] + for cam_name in camera_names: + video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) + video_img = np.concatenate(video_img, axis=1) # concatenate horizontally + if should_add_border_to_frame: + video_img = add_red_border_to_frame(video_img) + video_writer.append_data(video_img) + video_count += 1 + + if is_last_subtask_ind: + # toggle whether to add red border for next subtask + should_add_border_to_frame = (not should_add_border_to_frame) + cur_subtask_ind += 1 + + +def visualize_subtasks_with_obs( + traj_grp, + subtask_end_indices, + video_writer, + video_skip=5, + image_names=None, +): + """ + Helper function to visualize each subtask in a trajectory by writing image observations + to a video. Each subtask will toggle between having a red border around each + frame and no border in the video. + + Args: + traj_grp (hdf5 file group): hdf5 group which corresponds to the dataset trajectory to playback + subtask_end_indices (list): list containing the end index for each subtask + video_writer (imageio writer): video writer + video_skip (int): determines rate at which environment frames are written to video + image_names (list): determines which image observations are used for rendering. Pass more than + one to output a video with multiple image observations concatenated horizontally. + """ + assert image_names is not None, "error: must specify at least one image observation to use in @image_names" + assert (stage_signal is None) or (vis_stage_inds is None) + video_count = 0 + + traj_len = traj_grp["actions"].shape[0] + should_add_border_to_frame = False + cur_subtask_ind = 0 + for i in range(traj_len): + # whether we are on last index of current subtask + is_last_subtask_ind = (i == subtask_end_indices[cur_subtask_ind] - 1) + + if video_count % video_skip == 0: + # concatenate image obs together + im = [traj_grp["obs/{}".format(k)][i] for k in image_names] + frame = np.concatenate(im, axis=1) + if should_add_border_to_frame: + frame = add_red_border_to_frame(frame) + video_writer.append_data(frame) + video_count += 1 + + if is_last_subtask_ind: + # toggle whether to add red border for next subtask + should_add_border_to_frame = (not should_add_border_to_frame) + cur_subtask_ind += 1 + + +def visualize_subtasks(args): + # some arg checking + write_video = (args.video_path is not None) + + # either on-screen or video but not both + assert not (args.render and write_video) + + # either config or signals and offsets should be provided, but not both + assert (args.config is not None) or ((args.signals is not None) and (args.offsets is not None)) + assert (args.config is None) or ((args.signals is None) and (args.offsets is None)) + + # Auto-fill camera rendering info if not specified + if args.render_image_names is None: + env_meta = get_env_metadata_from_dataset(dataset_path=args.dataset) + args.render_image_names = RobomimicUtils.get_default_env_cameras(env_meta=env_meta) + + if args.render: + # on-screen rendering can only support one camera + assert len(args.render_image_names) == 1 + + if args.use_obs: + assert not args.render + else: + # create environment only if not playing back with observations + + # need to make sure ObsUtils knows which observations are images, but it doesn't matter + # for playback since observations are unused. Pass a dummy spec here. + dummy_spec = dict( + obs=dict( + low_dim=["robot0_eef_pos"], + rgb=[], + # image=[], + ), + ) + ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) + + env_meta = get_env_metadata_from_dataset(dataset_path=args.dataset) + env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render=args.render, render_offscreen=write_video) + + # some operations for playback are env-type-specific + is_robosuite_env = EnvUtils.is_robosuite_env(env_meta) + + # get demonstrations to visualize subtasks for + dataset_path = args.dataset + demo_keys = MG_FileUtils.get_all_demos_from_dataset( + dataset_path=dataset_path, + filter_key=args.filter_key, + start=None, + n=args.n, + ) + + # we will parse the source dataset to get subtask boundaries using either the task spec in the + # provided config or the provided arguments + task_spec = None + subtask_term_signals = None + subtask_term_offset_ranges = None + if args.config is not None: + with open(args.config, 'r') as f_config: + mg_config = json.load(f_config) + task_spec = MG_TaskSpec.from_json(json_dict=mg_config["task"]["task_spec"]) + else: + subtask_term_signals = args.signals + [None] + subtask_term_offset_ranges = [(0, offset) for offset in args.offsets] + [None] + + # parse dataset to get subtask boundaries + _, subtask_indices, _, subtask_term_offset_ranges_ret = MG_FileUtils.parse_source_dataset( + dataset_path=dataset_path, + demo_keys=demo_keys, + task_spec=task_spec, + subtask_term_signals=subtask_term_signals, + subtask_term_offset_ranges=subtask_term_offset_ranges, + ) + + # apply maximum offset to each subtask boundary + offsets_to_apply = [x[1] for x in subtask_term_offset_ranges_ret] + offsets_to_apply[-1] = 0 + # subtask_indices is shape (N, S, 2) where N is num demos, S is num subtasks and each entry is 2-tuple of start and end + subtask_end_indices = subtask_indices[:, :, 1] + subtask_end_indices = subtask_end_indices + np.array(offsets_to_apply)[None] # offsets shape (1, S) + + f = h5py.File(args.dataset, "r") + + # maybe dump video + video_writer = None + if write_video: + video_writer = imageio.get_writer(args.video_path, fps=20) + + for ind in range(len(demo_keys)): + ep = demo_keys[ind] + print("Playing back episode: {}".format(ep)) + + if args.use_obs: + traj_grp = f["data/{}".format(ep)] + visualize_trajectory_with_obs( + traj_grp=traj_grp, + subtask_end_indices=subtask_end_indices[ind], + video_writer=video_writer, + video_skip=args.video_skip, + image_names=args.render_image_names, + ) + continue + + states = f["data/{}/states".format(ep)][()] + initial_state = dict(states=states[0]) + if is_robosuite_env: + initial_state["model"] = f["data/{}".format(ep)].attrs["model_file"] + visualize_subtasks_with_env( + env=env, + initial_state=initial_state, + states=states, + subtask_end_indices=subtask_end_indices[ind], + render=args.render, + video_writer=video_writer, + video_skip=args.video_skip, + camera_names=args.render_image_names, + ) + + f.close() + if write_video: + video_writer.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--dataset", + type=str, + help="path to hdf5 dataset", + required=True, + ) + parser.add_argument( + "--config", + type=str, + default=None, + help="use config to infer sequence of subtask signals and offsets", + ) + parser.add_argument( + "--signals", + type=str, + nargs='+', + default=None, + help="specify sequence of subtask termination signals for all except last subtask", + ) + parser.add_argument( + "--offsets", + type=int, + nargs='+', + default=None, + help="specify sequence of maximum subtask termination offsets for all except last subtask", + ) + parser.add_argument( + "--filter_key", + type=str, + default=None, + help="(optional) filter key, to select a subset of trajectories in the file", + ) + parser.add_argument( + "--n", + type=int, + default=None, + help="(optional) stop after n trajectories", + ) + parser.add_argument( + "--use-obs", + action='store_true', + help="visualize trajectories with dataset image observations instead of simulator", + ) + parser.add_argument( + "--render", + action='store_true', + help="on-screen rendering", + ) + parser.add_argument( + "--video_path", + type=str, + default=None, + help="(optional) render trajectories to this video file path", + ) + parser.add_argument( + "--video_skip", + type=int, + default=5, + help="render frames to video every n steps", + ) + parser.add_argument( + "--render_image_names", + type=str, + nargs='+', + default=None, + help="(optional) camera name(s) / image observation(s) to use for rendering on-screen or to video", + ) + args = parser.parse_args() + visualize_subtasks(args) diff --git a/mimicgen/utils/__init__.py b/mimicgen/utils/__init__.py new file mode 100644 index 0000000..8edfcfd --- /dev/null +++ b/mimicgen/utils/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. \ No newline at end of file diff --git a/mimicgen/utils/config_utils.py b/mimicgen/utils/config_utils.py new file mode 100644 index 0000000..a6ff70a --- /dev/null +++ b/mimicgen/utils/config_utils.py @@ -0,0 +1,500 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A collection of utilities for working with config generators. These generators +are re-used from robomimic (https://robomimic.github.io/docs/tutorials/hyperparam_scan.html) +""" +import json +from collections.abc import Iterable + + +def set_debug_settings( + generator, + group, +): + """ + Sets config generator parameters for a quick debug run. + + Args: + generator (robomimic ConfigGenerator instance): config generator object + group (int): parameter group for these settings + """ + generator.add_param( + key="experiment.generation.guarantee", + name="", + group=group, + values=[False], + ) + generator.add_param( + key="experiment.generation.num_trials", + name="", + group=group, + values=[2], + ) + return generator + + +def set_basic_settings( + generator, + group, + source_dataset_path, + source_dataset_name, + generation_path, + guarantee, + num_traj, + num_src_demos=None, + max_num_failures=25, + num_demo_to_render=10, + num_fail_demo_to_render=25, + verbose=False, +): + """ + Sets config generator parameters for some basic data generation settings. + + Args: + generator (robomimic ConfigGenerator instance): config generator object + group (int): parameter group for these settings + source_dataset_path (str): path to source dataset + source_dataset_name (str): name to give source dataset in experiment name + generation_path (str): folder for generated data + guarantee (bool): whether to ensure @num_traj successes + num_traj (int): number of trajectories for generation + num_src_demos (int or None): number of source demos to take from @source_dataset_path + max_num_failures (int): max failures to keep + num_demo_to_render (int): max demos to render to video + num_fail_demo_to_render (int): max fail demos to render to video + verbose (bool): if True, make experiment name verbose using the passed settings + """ + + # set source dataset + generator.add_param( + key="experiment.source.dataset_path", + name="src" if source_dataset_name is not None else "", + group=group, + values=[source_dataset_path], + value_names=[source_dataset_name], + ) + + # set number of demos to use from source dataset + generator.add_param( + key="experiment.source.n", + name="n_src" if verbose else "", + group=group, + values=[num_src_demos], + ) + + # set generation settings + generator.add_param( + key="experiment.generation.path", + name="", + group=group, + values=[generation_path], + ) + generator.add_param( + key="experiment.generation.guarantee", + name="gt" if verbose else "", + group=group, + values=[guarantee], + value_names=["t" if guarantee else "f"], + ) + generator.add_param( + key="experiment.generation.num_trials", + name="nt" if verbose else "", + group=group, + values=[num_traj], + ) + generator.add_param( + key="experiment.max_num_failures", + name="", + group=group, + values=[max_num_failures], + ) + generator.add_param( + key="experiment.num_demo_to_render", + name="", + group=group, + values=[num_demo_to_render], + ) + generator.add_param( + key="experiment.num_fail_demo_to_render", + name="", + group=group, + values=[num_fail_demo_to_render], + ) + + return generator + + +def set_obs_settings( + generator, + group, + collect_obs, + camera_names, + camera_height, + camera_width, +): + """ + Sets config generator parameters for collecting observations. + """ + generator.add_param( + key="obs.collect_obs", + name="", + group=group, + values=[collect_obs], + ) + generator.add_param( + key="obs.camera_names", + name="", + group=group, + values=[camera_names], + ) + generator.add_param( + key="obs.camera_height", + name="", + group=group, + values=[camera_height], + ) + generator.add_param( + key="obs.camera_width", + name="", + group=group, + values=[camera_width], + ) + return generator + + +def set_subtask_settings( + generator, + group, + base_config_file, + select_src_per_subtask, + subtask_term_offset_range=None, + selection_strategy=None, + selection_strategy_kwargs=None, + action_noise=None, + num_interpolation_steps=None, + num_fixed_steps=None, + verbose=False, +): + """ + Sets config generator parameters for each subtask. + + Args: + generator (robomimic ConfigGenerator instance): config generator object + group (int): parameter group for these settings + base_config_file (str): path to base config file being used for generating configs + select_src_per_subtask (bool): whether to select src demo for each subtask + subtask_term_offset_range (list or None): if provided, should be list of 2-tuples, one + entry per subtask, with the last entry being None + selection_strategy (str or None): src demo selection strategy + selection_strategy_kwargs (dict or None): kwargs for selection strategy + action_noise (float or list or None): action noise for all subtasks + num_interpolation_steps (int or list or None): interpolation steps for all subtasks + num_fixed_steps (int or list or None): interpolation steps for all subtasks + verbose (bool): if True, make experiment name verbose using the passed settings + """ + + # get number of subtasks + with open(base_config_file, 'r') as f: + config = json.load(f) + num_subtasks = len(config["task"]["task_spec"]) + + # whether to select a different source demonstration for each subtask + generator.add_param( + key="experiment.generation.select_src_per_subtask", + name="select_src_per_subtask" if verbose else "", + group=group, + values=[select_src_per_subtask], + value_names=["t" if select_src_per_subtask else "f"], + ) + + # settings for each subtask + + # offset range + if subtask_term_offset_range is not None: + assert len(subtask_term_offset_range) == num_subtasks + for i in range(num_subtasks): + if (i == num_subtasks - 1): + assert subtask_term_offset_range[i] is None + else: + assert (subtask_term_offset_range[i] is None) or (len(subtask_term_offset_range[i]) == 2) + generator.add_param( + key="task.task_spec.subtask_{}.subtask_term_offset_range".format(i + 1), + name="offset" if (verbose and (i == 0)) else "", + group=group, + values=[subtask_term_offset_range[i]], + ) + + # selection strategy + if selection_strategy is not None: + for i in range(num_subtasks): + generator.add_param( + key="task.task_spec.subtask_{}.selection_strategy".format(i + 1), + name="ss" if (verbose and (i == 0)) else "", + group=group, + values=[selection_strategy], + ) + + # selection kwargs + if selection_strategy_kwargs is not None: + for i in range(num_subtasks): + generator.add_param( + key="task.task_spec.subtask_{}.selection_strategy_kwargs".format(i + 1), + name="", + group=group, + values=[selection_strategy_kwargs], + ) + + # action noise + if action_noise is not None: + if not isinstance(action_noise, Iterable): + action_noise = [action_noise for _ in range(num_subtasks)] + assert len(action_noise) == num_subtasks + for i in range(num_subtasks): + generator.add_param( + key="task.task_spec.subtask_{}.action_noise".format(i + 1), + name="noise" if (verbose and (i == 0)) else "", + group=group, + values=[action_noise[i]], + ) + + # interpolation + if num_interpolation_steps is not None: + if not isinstance(num_interpolation_steps, Iterable): + num_interpolation_steps = [num_interpolation_steps for _ in range(num_subtasks)] + assert len(num_interpolation_steps) == num_subtasks + for i in range(num_subtasks): + generator.add_param( + key="task.task_spec.subtask_{}.num_interpolation_steps".format(i + 1), + name="ni" if (verbose and (i == 0)) else "", + group=group, + values=[num_interpolation_steps[i]], + ) + if num_fixed_steps is not None: + if not isinstance(num_fixed_steps, Iterable): + num_fixed_steps = [num_fixed_steps for _ in range(num_subtasks)] + assert len(num_fixed_steps) == num_subtasks + for i in range(num_subtasks): + generator.add_param( + key="task.task_spec.subtask_{}.num_fixed_steps".format(i + 1), + name="ni" if (verbose and (i == 0)) else "", + group=group, + values=[num_fixed_steps[i]], + ) + + return generator + + +def set_learning_settings_for_bc_rnn( + generator, + group, + modality, + seq_length=10, + low_dim_keys=None, + image_keys=None, + crop_size=None, +): + """ + Sets config generator parameters for robomimic BC-RNN training runs. + + Args: + generator (robomimic ConfigGenerator instance): config generator object + group (int): parameter group for these settings + modality (str): whether this is a low-dim or image observation run + seq_length (int): BC-RNN context length + low_dim_keys (list or None): if provided, set low-dim observation keys, else use defaults + image_keys (list or None): if provided, set image observation keys, else use defaults + crop_size (tuple or None): if provided, size of crop to use for pixel shift augmentation + """ + supported_modalities = ["low_dim", "image"] + assert modality in supported_modalities, "got modality {} not in supported modalities {}".format(modality, supported_modalities) + + # setup RNN with GMM and desired seq_length + generator.add_param( + key="train.seq_length", + name="", + group=group, + values=[seq_length], + ) + generator.add_param( + key="algo.rnn.horizon", + name="", + group=group, + values=[seq_length], + ) + generator.add_param( + key="algo.rnn.enabled", + name="", + group=group, + values=[True], + ) + generator.add_param( + key="algo.gmm.enabled", + name="", + group=group, + values=[True], + ) + actor_layer_dims = [] + generator.add_param( + key="algo.actor_layer_dims", + name="", + group=group, + values=[actor_layer_dims], + ) + + # 4 data workers and low-dim cache mode seems to work well for both low-dim and image observations + generator.add_param( + key="train.num_data_workers", + name="", + group=group, + values=[4], + ) + generator.add_param( + key="train.hdf5_cache_mode", + name="", + group=group, + values=["low_dim"], + ) + + # modality-specific defaults + if modality == "image": + + # epoch settings + epoch_every_n_steps = 500 + validation_epoch_every_n_steps = 50 + eval_rate = 20 + + # learning settings + num_epochs = 600 + batch_size = 16 + policy_lr = 1e-4 + rnn_hidden_dim = 1000 + + # observation settings + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + if image_keys is None: + image_keys = ["agentview_image", "robot0_eye_in_hand_image"] + if crop_size is None: + crop_size = (76, 76) + + generator.add_param( + key="observation.encoder.rgb", + name="", + group=group, + values=[{ + "core_class": "VisualCore", + "core_kwargs": { + "feature_dimension": 64, + "flatten": True, + "backbone_class": "ResNet18Conv", + "backbone_kwargs": { + "pretrained": False, + "input_coord_conv": False, + }, + "pool_class": "SpatialSoftmax", + "pool_kwargs": { + "num_kp": 32, + "learnable_temperature": False, + "temperature": 1.0, + "noise_std": 0.0, + "output_variance": False, + }, + }, + "obs_randomizer_class": "CropRandomizer", + "obs_randomizer_kwargs": { + "crop_height": crop_size[0], + "crop_width": crop_size[1], + "num_crops": 1, + "pos_enc": False, + }, + }], + ) + + else: + + # epoch settings + epoch_every_n_steps = 100 + validation_epoch_every_n_steps = 10 + eval_rate = 50 + + # learning settings + num_epochs = 2000 + batch_size = 100 + policy_lr = 1e-3 + rnn_hidden_dim = 400 + + # observation settings + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + if image_keys is None: + image_keys = [] + + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=group, + values=[low_dim_keys], + ) + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=group, + values=[image_keys], + ) + + # epoch settings + generator.add_param( + key="experiment.epoch_every_n_steps", + name="", + group=group, + values=[epoch_every_n_steps], + ) + generator.add_param( + key="experiment.validation_epoch_every_n_steps", + name="", + group=group, + values=[validation_epoch_every_n_steps], + ) + generator.add_param( + key="experiment.save.every_n_epochs", + name="", + group=group, + values=[eval_rate], + ) + generator.add_param( + key="experiment.rollout.rate", + name="", + group=group, + values=[eval_rate], + ) + + # learning settings + generator.add_param( + key="train.num_epochs", + name="", + group=group, + values=[num_epochs], + ) + generator.add_param( + key="train.batch_size", + name="", + group=group, + values=[batch_size], + ) + generator.add_param( + key="algo.optim_params.policy.learning_rate.initial", + name="", + group=group, + values=[policy_lr], + ) + generator.add_param( + key="algo.rnn.hidden_dim", + name="", + group=group, + values=[rnn_hidden_dim], + ) + + return generator diff --git a/mimicgen/utils/file_utils.py b/mimicgen/utils/file_utils.py new file mode 100644 index 0000000..2dea426 --- /dev/null +++ b/mimicgen/utils/file_utils.py @@ -0,0 +1,475 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A collection of utilities related to files. +""" +import os +import h5py +import json +import time +import datetime +import shutil +import shlex +import tempfile +import gdown +import numpy as np + +from glob import glob +from tqdm import tqdm + +import robomimic +import robomimic.utils.tensor_utils as TensorUtils +from robomimic.utils.file_utils import url_is_alive + +import mimicgen +from mimicgen.datagen.datagen_info import DatagenInfo + + +def write_json(json_dic, json_path): + """ + Write dictionary to json file. + """ + with open(json_path, 'w') as f: + # preserve original key ordering + json.dump(json_dic, f, sort_keys=False, indent=4) + + +def get_all_demos_from_dataset( + dataset_path, + filter_key=None, + start=None, + n=None, +): + """ + Helper function to get demonstration keys from robomimic hdf5 dataset. + + Args: + dataset_path (str): path to hdf5 dataset + filter_key (str or None): name of filter key + start (int or None): demonstration index to start from + n (int or None): number of consecutive demonstrations to retrieve + + Returns: + demo_keys (list): list of demonstration keys + """ + f = h5py.File(dataset_path, "r") + + # list of all demonstration episodes (sorted in increasing number order) + if filter_key is not None: + print("using filter key: {}".format(filter_key)) + demos = [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])] + else: + demos = list(f["data"].keys()) + inds = np.argsort([int(elem[5:]) for elem in demos]) + demo_keys = [demos[i] for i in inds] + if start is not None: + demo_keys = demo_keys[start:] + if n is not None: + demo_keys = demo_keys[:n] + + f.close() + return demo_keys + + +def get_env_interface_info_from_dataset( + dataset_path, + demo_keys, +): + """ + Gets environment interface information from source dataset. + + Args: + dataset_path (str): path to hdf5 dataset + demo_keys (list): list of demonstration keys to extract info from + + Returns: + env_interface_name (str): name of environment interface class + env_interface_type (str): type of environment interface + """ + f = h5py.File(dataset_path, "r") + env_interface_names = [] + env_interface_types = [] + for ep in demo_keys: + datagen_info_key = "data/{}/datagen_info".format(ep) + assert datagen_info_key in f, "Could not find MimicGen metadata in dataset {}. Ensure you have run prepare_src_dataset.py on this hdf5".format(dataset_path) + env_interface_names.append(f[datagen_info_key].attrs["env_interface_name"]) + env_interface_types.append(f[datagen_info_key].attrs["env_interface_type"]) + f.close() + + # ensure all source demos are consistent + env_interface_name = env_interface_names[0] + env_interface_type = env_interface_types[0] + assert all(elem == env_interface_name for elem in env_interface_names) + assert all(elem == env_interface_type for elem in env_interface_types) + return env_interface_name, env_interface_type + + +def parse_source_dataset( + dataset_path, + demo_keys, + task_spec=None, + subtask_term_signals=None, + subtask_term_offset_ranges=None, +): + """ + Parses a source dataset to extract info needed for data generation (DatagenInfo instances) and + subtask indices that split each source dataset trajectory into contiguous subtask segments. + + Args: + dataset_path (str): path to hdf5 dataset + demo_keys (list): list of demo keys to use from dataset path + task_spec (MG_TaskSpec instance or None): task spec object, which will be used to + infer the sequence of subtask termination signals and offset ranges. + subtask_term_signals (list or None): sequence of subtask termination signals, which + should only be provided if not providing @task_spec. Should have an entry per subtask + and the last subtask entry should be None, since the final subtask ends when the + task ends. + subtask_term_offset_ranges (list or None): sequence of subtask termination offset ranges, which + should only be provided if not providing @task_spec. Should have an entry per subtask + and the last subtask entry should be None or (0, 0), since the final subtask ends when the + task ends. + + Returns: + + datagen_infos (list): list of DatagenInfo instances, one per source + demonstration. Each instance has entries with leading dimension [T, ...], + the length of the trajectory. + + subtask_indices (np.array): array of shape (N, S, 2) where N is the number of + demos and S is the number of subtasks for this task. Each entry is + a pair of integers that represents the index at which a subtask + segment starts and where it is completed. + + subtask_term_signals (list): sequence of subtask termination signals + + subtask_term_offset_ranges (list): sequence of subtask termination offset ranges + """ + + # should provide either task_spec or the subtask termination lists, but not both + assert (task_spec is not None) or ((subtask_term_signals is not None) and (subtask_term_offset_ranges is not None)) + assert (task_spec is None) or ((subtask_term_signals is None) and (subtask_term_offset_ranges is None)) + + if task_spec is not None: + subtask_term_signals = [subtask_spec["subtask_term_signal"] for subtask_spec in task_spec] + subtask_term_offset_ranges = [subtask_spec["subtask_term_offset_range"] for subtask_spec in task_spec] + + assert len(subtask_term_signals) == len(subtask_term_offset_ranges) + assert subtask_term_signals[-1] is None, "end of final subtask does not need to be detected" + assert (subtask_term_offset_ranges[-1] is None) or (subtask_term_offset_ranges[-1] == (0, 0)), "end of final subtask does not need to be detected" + subtask_term_offset_ranges[-1] = (0, 0) + + f = h5py.File(dataset_path, "r") + + datagen_infos = [] + subtask_indices = [] + for ind in tqdm(range(len(demo_keys))): + ep = demo_keys[ind] + ep_grp = f["data/{}".format(ep)] + + # extract datagen info + ep_datagen_info = ep_grp["datagen_info"] + ep_datagen_info_obj = DatagenInfo( + eef_pose=ep_datagen_info["eef_pose"][:], + object_poses={ k : ep_datagen_info["object_poses"][k][:] for k in ep_datagen_info["object_poses"] }, + subtask_term_signals={ k : ep_datagen_info["subtask_term_signals"][k][:] for k in ep_datagen_info["subtask_term_signals"] }, + target_pose=ep_datagen_info["target_pose"][:], + gripper_action=ep_datagen_info["gripper_action"][:], + ) + datagen_infos.append(ep_datagen_info_obj) + + # parse subtask indices using subtask termination signals + ep_subtask_indices = [] + prev_subtask_term_ind = 0 + for subtask_ind in range(len(subtask_term_signals)): + subtask_term_signal = subtask_term_signals[subtask_ind] + if subtask_term_signal is None: + # final subtask, finishes at end of demo + subtask_term_ind = ep_grp["actions"].shape[0] + else: + # trick to detect index where first 0 -> 1 transition occurs - this will be the end of the subtask + subtask_indicators = ep_datagen_info_obj.subtask_term_signals[subtask_term_signal] + diffs = subtask_indicators[1:] - subtask_indicators[:-1] + end_ind = int(diffs.nonzero()[0][0]) + 1 + subtask_term_ind = end_ind + 1 # increment to support indexing like demo[start:end] + ep_subtask_indices.append([prev_subtask_term_ind, subtask_term_ind]) + prev_subtask_term_ind = subtask_term_ind + + # run sanity check on subtask_term_offset_range in task spec to make sure we can never + # get an empty subtask in the worst case when sampling subtask bounds: + # + # end index of subtask i + max offset of subtask i < end index of subtask i + 1 + min offset of subtask i + 1 + # + assert len(ep_subtask_indices) == len(subtask_term_signals), "mismatch in length of extracted subtask info and number of subtasks" + for i in range(1, len(ep_subtask_indices)): + prev_max_offset_range = subtask_term_offset_ranges[i - 1][1] + assert ep_subtask_indices[i - 1][1] + prev_max_offset_range < ep_subtask_indices[i][1] + subtask_term_offset_ranges[i][0], \ + "subtask sanity check violation in demo key {} with subtask {} end ind {}, subtask {} max offset {}, subtask {} end ind {}, and subtask {} min offset {}".format( + demo_keys[ind], i - 1, ep_subtask_indices[i - 1][1], i - 1, prev_max_offset_range, i, ep_subtask_indices[i][1], i, subtask_term_offset_ranges[i][0]) + + subtask_indices.append(ep_subtask_indices) + f.close() + + # convert list of lists to array for easy indexing + subtask_indices = np.array(subtask_indices) + + return datagen_infos, subtask_indices, subtask_term_signals, subtask_term_offset_ranges + + +def write_demo_to_hdf5( + folder, + env, + initial_state, + states, + observations, + datagen_info, + actions, + src_demo_inds=None, + src_demo_labels=None, +): + """ + Helper function to write demonstration to an hdf5 file (robomimic format) in a folder. It will be + named using a timestamp. + + Args: + folder (str): folder to write hdf5 to + env (robomimic EnvBase instance): simulation environment + initial_state (dict): dictionary corresponding to initial simulator state (see robomimic dataset structure for more information) + states (list): list of simulator states + observations (list): list of observation dictionaries + datagen_info (list): list of DatagenInfo instances + actions (np.array): actions per timestep + src_demo_inds (list or None): if provided, list of selected source demonstration indices for each subtask + src_demo_labels (np.array or None): same as @src_demo_inds, but repeated to have a label for each timestep of the trajectory + """ + + # name hdf5 based on timestamp + timestamp = time.time() + time_str = datetime.datetime.fromtimestamp(timestamp).strftime('date_%m_%d_%Y_time_%H_%M_%S') + dataset_path = os.path.join(folder, "{}.hdf5".format(time_str)) + data_writer = h5py.File(dataset_path, "w") + data_grp = data_writer.create_group("data") + data_grp.attrs["timestamp"] = timestamp + data_grp.attrs["readable_timestamp"] = time_str + + # single episode + ep_data_grp = data_grp.create_group("demo_0") + + # write actions + ep_data_grp.create_dataset("actions", data=np.array(actions)) + + # write simulator states + if isinstance(states[0], dict): + states = TensorUtils.list_of_flat_dict_to_dict_of_list(states) + for k in states: + ep_data_grp.create_dataset("states/{}".format(k), data=np.array(states[k])) + else: + ep_data_grp.create_dataset("states", data=np.array(states)) + + # write observations + obs = TensorUtils.list_of_flat_dict_to_dict_of_list(observations) + for k in obs: + ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(obs[k]), compression="gzip") + + # write datagen info + datagen_info = TensorUtils.list_of_flat_dict_to_dict_of_list([x.to_dict() for x in datagen_info]) + for k in datagen_info: + if k in ["object_poses", "subtask_term_signals"]: + # convert list of dict to dict of list again + datagen_info[k] = TensorUtils.list_of_flat_dict_to_dict_of_list(datagen_info[k]) + for k2 in datagen_info[k]: + datagen_info[k][k2] = np.array(datagen_info[k][k2]) + ep_data_grp.create_dataset("datagen_info/{}/{}".format(k, k2), data=np.array(datagen_info[k][k2])) + else: + ep_data_grp.create_dataset("datagen_info/{}".format(k), data=np.array(datagen_info[k])) + + # maybe write which source demonstrations generated this episode + if src_demo_inds is not None: + ep_data_grp.create_dataset("src_demo_inds", data=np.array(src_demo_inds)) + if src_demo_labels is not None: + ep_data_grp.create_dataset("src_demo_labels", data=np.array(src_demo_labels)) + + # episode metadata + if ("model" in initial_state) and (initial_state["model"] is not None): + # only for robosuite envs + ep_data_grp.attrs["model_file"] = initial_state["model"] # model xml for this episode + ep_data_grp.attrs["num_samples"] = actions.shape[0] # number of transitions in this episode + + # global metadata + data_grp.attrs["total"] = actions.shape[0] + data_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) # environment info + data_writer.close() + + +def merge_all_hdf5( + folder, + new_hdf5_path, + delete_folder=False, + dry_run=False, + return_horizons=False, +): + """ + Helper function to take all hdf5s in @folder and merge them into a single one. + Returns the number of hdf5s that were merged. + """ + source_hdf5s = glob(os.path.join(folder, "*.hdf5")) + + # get all timestamps and sort files from lowest to highest + timestamps = [] + filtered_source_hdf5s = [] + for source_hdf5_path in source_hdf5s: + try: + f = h5py.File(source_hdf5_path, "r") + except Exception as e: + print("WARNING: problem with file {}".format(source_hdf5_path)) + print("Exception: {}".format(e)) + continue + filtered_source_hdf5s.append(source_hdf5_path) + timestamps.append(f["data"].attrs["timestamp"]) + f.close() + + assert len(timestamps) == len(filtered_source_hdf5s) + inds = np.argsort(timestamps) + sorted_hdf5s = [filtered_source_hdf5s[i] for i in inds] + + if dry_run: + if return_horizons: + horizons = [] + for source_hdf5_path in sorted_hdf5s: + with h5py.File(source_hdf5_path, "r") as f: + horizons.append(f["data"].attrs["total"]) + return len(sorted_hdf5s), horizons + return len(sorted_hdf5s) + + # write demos in order to new file + f_new = h5py.File(new_hdf5_path, "w") + f_new_grp = f_new.create_group("data") + + env_meta_str = None + total = 0 + if return_horizons: + horizons = [] + for i, source_hdf5_path in enumerate(sorted_hdf5s): + with h5py.File(source_hdf5_path, "r") as f: + # copy this episode over under a different name + demo_str = "demo_{}".format(i) + f.copy("data/demo_0", f_new_grp, name=demo_str) + if return_horizons: + horizons.append(f["data"].attrs["total"]) + total += f["data"].attrs["total"] + if env_meta_str is None: + env_meta_str = f["data"].attrs["env_args"] + + f_new["data"].attrs["total"] = total + f_new["data"].attrs["env_args"] = env_meta_str if env_meta_str is not None else "" + f_new.close() + + if delete_folder: + print("removing folder at path {}".format(folder)) + shutil.rmtree(folder) + + if return_horizons: + return len(sorted_hdf5s), horizons + return len(sorted_hdf5s) + + +def download_url_from_gdrive(url, download_dir, check_overwrite=True): + """ + Downloads a file at a URL from Google Drive. + + Example usage: + url = https://drive.google.com/file/d/1DABdqnBri6-l9UitjQV53uOq_84Dx7Xt/view?usp=drive_link + download_dir = "/tmp" + download_url_from_gdrive(url, download_dir, check_overwrite=True) + + Args: + url (str): url string + download_dir (str): path to directory where file should be downloaded + check_overwrite (bool): if True, will sanity check the download fpath to make sure a file of that name + doesn't already exist there + """ + assert url_is_alive(url), "@download_url_from_gdrive got unreachable url: {}".format(url) + + with tempfile.TemporaryDirectory() as td: + # HACK: Change directory to temp dir, download file there, and then move the file to desired directory. + # We do this because we do not know the name of the file beforehand. + cur_dir = os.getcwd() + os.chdir(td) + fpath = gdown.download(url, quiet=False, fuzzy=True) + fname = os.path.basename(fpath) + file_to_write = os.path.join(download_dir, fname) + if check_overwrite and os.path.exists(file_to_write): + user_response = input(f"Warning: file {file_to_write} already exists. Overwrite? y/n\n") + assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." + shutil.move(fpath, file_to_write) + os.chdir(cur_dir) + + +def config_generator_to_script_lines(generator, config_dir): + """ + Takes a robomimic ConfigGenerator and uses it to + generate a set of training configs, and a set of bash command lines + that correspond to each training run (one per config). Note that + the generator's script_file will be overridden to be a temporary file that + will be removed from disk. + + Args: + generator (ConfigGenerator instance or list): generator(s) + to use for generating configs and training runs + + config_dir (str): path to directory where configs will be generated + + Returns: + config_files (list): a list of config files that were generated + + run_lines (list): a list of strings that are training commands, one per config + """ + + # make sure config dir exists + if not os.path.exists(config_dir): + os.makedirs(config_dir) + + # support one or more config generators + if not isinstance(generator, list): + generator = [generator] + + all_run_lines = [] + for gen in generator: + + # set new config directory by copying base config file from old location to new directory + base_config_file = gen.base_config_file + config_name = os.path.basename(base_config_file) + new_base_config_file = os.path.join(config_dir, config_name) + shutil.copyfile( + base_config_file, + new_base_config_file, + ) + gen.base_config_file = new_base_config_file + + # we'll write script file to a temp dir and parse it from there to get the training commands + with tempfile.TemporaryDirectory() as td: + gen.script_file = os.path.join(td, "tmp.sh") + + # generate configs + gen.generate() + + # collect training commands + with open(gen.script_file, "r") as f: + f_lines = f.readlines() + run_lines = [line for line in f_lines if line.startswith("python")] + all_run_lines += run_lines + + os.remove(gen.base_config_file) + + # get list of generated configs too + config_files = [] + config_file_dict = dict() + for line in all_run_lines: + cmd = shlex.split(line) + config_file_name = cmd[cmd.index("--config") + 1] + config_files.append(config_file_name) + assert config_file_name not in config_file_dict, "got duplicate config name {}".format(config_file_name) + config_file_dict[config_file_name] = 1 + + return config_files, all_run_lines diff --git a/mimicgen/utils/misc_utils.py b/mimicgen/utils/misc_utils.py new file mode 100644 index 0000000..2752fd8 --- /dev/null +++ b/mimicgen/utils/misc_utils.py @@ -0,0 +1,254 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A collection of miscellaneous utilities. +""" +import time +import json +import numpy as np + +from collections import deque, OrderedDict +from contextlib import contextmanager + + +def add_red_border_to_frame(frame, ratio=0.02): + """ + Add a red border to an image frame. + """ + border_size_x = max(1, round(ratio * frame.shape[0])) + border_size_y = max(1, round(ratio * frame.shape[1])) + + frame[:border_size_x, :, :] = [255., 0., 0.] + frame[-border_size_x:, :, :] = [255., 0., 0.] + frame[:, :border_size_y, :] = [255., 0., 0.] + frame[:, -border_size_y:, :] = [255., 0., 0.] + return frame + + +class Grid(object): + """ + Keep track of a list of values, and point to a single value at a time. + """ + def __init__(self, values, initial_ind=0): + self.values = list(values) + self.ind = initial_ind + self.n = len(self.values) + + def get(self): + return self.values[self.ind] + + def next(self): + self.ind = min(self.ind + 1, self.n - 1) + return self.get() + + def prev(self): + self.ind = max(self.ind - 1, 0) + return self.get() + + +class Timer(object): + """ + A simple timer. + """ + def __init__(self, history=100, ignore_first=False): + """ + Args: + history (int): number of recent timesteps to record for reporting statistics + """ + self.total_time = 0. + self.calls = 0 + self.start_time = 0. + self.last_diff = 0. + self.average_time = 0. + self.min_diff = float("inf") + self.max_diff = 0. + self._measurements = deque(maxlen=history) + self._enabled = True + self.ignore_first = ignore_first + self._had_first = False + + def enable(self): + """ + Enable measurements with this timer. + """ + self._enabled = True + + def disable(self): + """ + Disable measurements with this timer. + """ + self._enabled = False + + def tic(self): + # using time.time instead of time.clock because time time.clock + # does not normalize for multithreading + self.start_time = time.time() + + def toc(self): + if self._enabled: + + if self.ignore_first and (self.start_time > 0. and not self._had_first): + self._had_first = True + return time.time() - self.start_time + + self.last_diff = time.time() - self.start_time + self.total_time += self.last_diff + self.calls += 1 + self.average_time = self.total_time / self.calls + self.min_diff = min(self.min_diff, self.last_diff) + self.max_diff = max(self.max_diff, self.last_diff) + self._measurements.append(self.last_diff) + last_diff = self.last_diff + return last_diff + + @contextmanager + def timed(self): + self.tic() + yield + self.toc() + + def report_stats(self, verbose=False): + stats = OrderedDict() + stats["global"] = OrderedDict( + mean=self.average_time, + min=self.min_diff, + max=self.max_diff, + num=self.calls, + ) + num = len(self._measurements) + stats["local"] = OrderedDict() + if num > 0: + stats["local"] = OrderedDict( + mean=np.mean(self._measurements), + std=np.std(self._measurements), + min=np.min(self._measurements), + max=np.max(self._measurements), + num=num, + ) + if verbose: + stats["local"]["values"] = list(self._measurements) + return stats + + +class Rate(object): + """ + Convenience class for enforcing rates in loops. Modeled after rospy.Rate. + + See http://docs.ros.org/en/jade/api/rospy/html/rospy.timer-pysrc.html#Rate.sleep + """ + def __init__(self, hz): + """ + Args: + hz (int): frequency to enforce + """ + self.update_hz(hz) + + def update_hz(self, hz): + """ + Update rate to enforce. + """ + self.hz = hz + self.last_time = time.time() + self.sleep_duration = (1. / hz) + + def _remaining(self, curr_time): + """ + Calculate time remaining for rate to sleep. + """ + assert curr_time >= self.last_time, "time moved backwards!" + elapsed = curr_time - self.last_time + return self.sleep_duration - elapsed + + def sleep(self): + """ + Attempt to sleep at the specified rate in hz, by taking the time + elapsed since the last call to this function into account. + """ + curr_time = time.time() + remaining = self._remaining(curr_time) + if remaining > 0: + time.sleep(remaining) + + # assume successful rate sleeping + self.last_time = self.last_time + self.sleep_duration + + # NOTE: this commented line is what we used to do, but this enforces a slower rate + # self.last_time = time.time() + + # detect time jumping forwards (e.g. loop is too slow) + if curr_time - self.last_time > self.sleep_duration * 2: + # we didn't sleep at all + self.last_time = curr_time + + +class RateMeasure(object): + """ + Measure approximate time intervals of code execution by calling @measure + """ + def __init__(self, name=None, history=100, freq_threshold=None): + self._timer = Timer(history=history, ignore_first=True) + self._timer.tic() + self.name = name + self.freq_threshold = freq_threshold + self._enabled = True + self._first = False + self.sum = 0. + self.calls = 0 + + def enable(self): + """ + Enable measurements. + """ + self._timer.enable() + self._enabled = True + + def disable(self): + """ + Disable measurements. + """ + self._timer.disable() + self._enabled = False + + def measure(self): + """ + Take a measurement of the time elapsed since the last @measure call + and also return the time elapsed. + """ + interval = self._timer.toc() + self._timer.tic() + self.sum += (1. / interval) + self.calls += 1 + if self._enabled and (self.freq_threshold is not None) and ((1. / interval) < self.freq_threshold): + print("WARNING: RateMeasure {} violated threshold {} hz with measurement {} hz".format(self.name, self.freq_threshold, (1. / interval))) + return (interval, True) + return (interval, False) + + def report_stats(self, verbose=False): + """ + Report statistics over measurements, converting timer measurements into frequencies. + """ + stats = self._timer.report_stats(verbose=verbose) + stats["name"] = self.name + if stats["global"]["num"] > 0: + stats["global"] = OrderedDict( + mean=(self.sum / float(self.calls)), + min=(1. / stats["global"]["max"]), + max=(1. / stats["global"]["min"]), + num=stats["global"]["num"], + ) + if len(stats["local"]) > 0: + measurements = [1. / x for x in self._timer._measurements] + stats["local"] = OrderedDict( + mean=np.mean(measurements), + std=np.std(measurements), + min=np.min(measurements), + max=np.max(measurements), + num=stats["local"]["num"], + ) + return stats + + def __str__(self): + stats = self.report_stats(verbose=False) + return json.dumps(stats, indent=4) diff --git a/mimicgen/utils/pose_utils.py b/mimicgen/utils/pose_utils.py new file mode 100644 index 0000000..c31c4d1 --- /dev/null +++ b/mimicgen/utils/pose_utils.py @@ -0,0 +1,297 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +A collection of utilities for working with poses. +""" + +import math +import collections +import numpy as np + +import robosuite +import robosuite.utils.transform_utils as T + + +def make_pose(pos, rot): + """ + Make homogenous pose matrices from a set of translation vectors and rotation matrices. + + Args: + pos (np.array): batch of position vectors with last dimension of 3 + rot (np.array): batch of rotation matrices with last 2 dimensions of (3, 3) + + Returns: + pose (np.array): batch of pose matrices with last 2 dimensions of (4, 4) + """ + assert pos.shape[:-1] == rot.shape[:-2] + assert pos.shape[-1] == rot.shape[-2] == rot.shape[-1] == 3 + pose = np.zeros(pos.shape[:-1] + (4, 4)) + pose[..., :3, :3] = rot + pose[..., :3, 3] = pos + pose[..., 3, 3] = 1. + return pose + + +def unmake_pose(pose): + """ + Split homogenous pose matrices back into translation vectors and rotation matrices. + + Args: + pose (np.array): batch of pose matrices with last 2 dimensions of (4, 4) + + Returns: + pos (np.array): batch of position vectors with last dimension of 3 + rot (np.array): batch of rotation matrices with last 2 dimensions of (3, 3) + """ + return pose[..., :3, 3], pose[..., :3, :3] + + +def pose_inv(pose): + """ + Computes the inverse of homogenous pose matrices. + + Note that the inverse of a pose matrix is the following: + [R t; 0 1]^-1 = [R.T -R.T*t; 0 1] + + Args: + pose (np.array): batch of pose matrices with last 2 dimensions of (4, 4) + + + Returns: + inv_pose (np.array): batch of inverse pose matrices with last 2 dimensions of (4, 4) + """ + num_axes = len(pose.shape) + assert num_axes >= 2 + + inv_pose = np.zeros_like(pose) + + # gymnastics to take transpose of last 2 dimensions + inv_pose[..., :3, :3] = np.transpose(pose[..., :3, :3], tuple(range(num_axes - 2)) + (num_axes - 1, num_axes - 2)) + + # note: numpy matmul wants shapes [..., 3, 3] x [..., 3, 1] -> [..., 3, 1] so we add a dimension and take it away after + inv_pose[..., :3, 3] = np.matmul(-inv_pose[..., :3, :3], pose[..., :3, 3:4])[..., 0] + inv_pose[..., 3, 3] = 1.0 + return inv_pose + + +def pose_in_A_to_pose_in_B(pose_in_A, pose_A_in_B): + """ + Converts homogenous matrices corresponding to a point C in frame A + to homogenous matrices corresponding to the same point C in frame B. + + Args: + pose_in_A (np.array): batch of homogenous matrices corresponding to the pose of C in frame A + pose_A_in_B (np.array): batch of homogenous matrices corresponding to the pose of A in frame B + + Returns: + pose_in_B (np.array): batch of homogenous matrices corresponding to the pose of C in frame B + """ + return np.matmul(pose_A_in_B, pose_in_A) + + +def quat2axisangle(quat): + """ + Converts (x, y, z, w) quaternion to axis-angle format. + Returns a unit vector direction and an angle. + + NOTE: this differs from robosuite's function because it returns + both axis and angle, not axis * angle. + """ + + # conversion from axis-angle to quaternion: + # qw = cos(theta / 2); qx, qy, qz = u * sin(theta / 2) + + # normalize qx, qy, qz by sqrt(qx^2 + qy^2 + qz^2) = sqrt(1 - qw^2) + # to extract the unit vector + + # clipping for scalar with if-else is orders of magnitude faster than numpy + if quat[3] > 1.: + quat[3] = 1. + elif quat[3] < -1.: + quat[3] = -1. + + den = np.sqrt(1. - quat[3] * quat[3]) + if math.isclose(den, 0.): + # This is (close to) a zero degree rotation, immediately return + return np.zeros(3), 0. + + return quat[:3] / den, 2. * math.acos(quat[3]) + + +def axisangle2quat(axis, angle): + """ + Converts axis-angle to (x, y, z, w) quat. + + NOTE: this differs from robosuite's function because it accepts + both axis and angle as arguments, not axis * angle. + """ + + # handle zero-rotation case + if math.isclose(angle, 0.): + return np.array([0., 0., 0., 1.]) + + # make sure that axis is a unit vector + assert math.isclose(np.linalg.norm(axis), 1., abs_tol=1e-3) + + q = np.zeros(4) + q[3] = np.cos(angle / 2.) + q[:3] = axis * np.sin(angle / 2.) + return q + + +def quat_slerp(q1, q2, tau): + """ + Adapted from robosuite. + """ + if tau == 0.0: + return q1 + elif tau == 1.0: + return q2 + d = np.dot(q1, q2) + if abs(abs(d) - 1.0) < np.finfo(float).eps * 4.: + return q1 + if d < 0.0: + # invert rotation + d = -d + q2 *= -1.0 + angle = math.acos(np.clip(d, -1, 1)) + if abs(angle) < np.finfo(float).eps * 4.: + return q1 + isin = 1.0 / math.sin(angle) + q1 = q1 * math.sin((1.0 - tau) * angle) * isin + q2 = q2 * math.sin(tau * angle) * isin + q1 = q1 + q2 + return q1 + + +def interpolate_rotations(R1, R2, num_steps, axis_angle=True): + """ + Interpolate between 2 rotation matrices. If @axis_angle, interpolate the axis-angle representation + of the delta rotation, else, use slerp. + + NOTE: I have verified empirically that both methods are essentially equivalent, so pick your favorite. + """ + if axis_angle: + # delta rotation expressed as axis-angle + delta_rot_mat = R2.dot(R1.T) + delta_quat = T.mat2quat(delta_rot_mat) + delta_axis, delta_angle = quat2axisangle(delta_quat) + + # fix the axis, and chunk the angle up into steps + rot_step_size = delta_angle / num_steps + + # convert into delta rotation matrices, and then convert to absolute rotations + if delta_angle < 0.05: + # small angle - don't bother with interpolation + rot_steps = np.array([R2 for _ in range(num_steps)]) + else: + delta_rot_steps = [T.quat2mat(axisangle2quat(delta_axis, i * rot_step_size)) for i in range(num_steps)] + rot_steps = np.array([delta_rot_steps[i].dot(R1) for i in range(num_steps)]) + else: + q1 = T.mat2quat(R1) + q2 = T.mat2quat(R2) + rot_steps = np.array([T.quat2mat(quat_slerp(q1, q2, tau=(float(i) / num_steps))) for i in range(num_steps)]) + + # add in endpoint + rot_steps = np.concatenate([rot_steps, R2[None]], axis=0) + + return rot_steps + + +def interpolate_poses(pose_1, pose_2, num_steps=None, step_size=None, perturb=False): + """ + Linear interpolation between two poses. + + Args: + pose_1 (np.array): 4x4 start pose + pose_2 (np.array): 4x4 end pose + num_steps (int): if provided, specifies the number of desired interpolated points (not excluding + the start and end points). Passing 0 corresponds to no interpolation, and passing None + means that @step_size must be provided to determine the number of interpolated points. + step_size (float): if provided, will be used to infer the number of steps, by taking the norm + of the delta position vector, and dividing it by the step size + perturb (bool): if True, randomly move all the interpolated position points in a uniform, non-overlapping grid. + + Returns: + pose_steps (np.array): array of shape (N + 2, 3) corresponding to the interpolated pose path, where N is @num_steps + num_steps (int): the number of interpolated points (N) in the path + """ + assert step_size is None or num_steps is None + + pos1, rot1 = unmake_pose(pose_1) + pos2, rot2 = unmake_pose(pose_2) + + if num_steps == 0: + # skip interpolation + return np.concatenate([pos1[None], pos2[None]], axis=0), np.concatenate([rot1[None], rot2[None]], axis=0), num_steps + + delta_pos = pos2 - pos1 + if num_steps is None: + assert np.linalg.norm(delta_pos) > 0 + num_steps = math.ceil(np.linalg.norm(delta_pos) / step_size) + + num_steps += 1 # include starting pose + assert num_steps >= 2 + + # linear interpolation of positions + pos_step_size = delta_pos / num_steps + grid = np.arange(num_steps).astype(np.float64) + if perturb: + # move the interpolation grid points by up to a half-size forward or backward + perturbations = np.random.uniform( + low=-0.5, + high=0.5, + size=(num_steps - 2,), + ) + grid[1:-1] += perturbations + pos_steps = np.array([pos1 + grid[i] * pos_step_size for i in range(num_steps)]) + + # add in endpoint + pos_steps = np.concatenate([pos_steps, pos2[None]], axis=0) + + # interpolate the rotations too + rot_steps = interpolate_rotations(R1=rot1, R2=rot2, num_steps=num_steps, axis_angle=True) + + pose_steps = make_pose(pos_steps, rot_steps) + return pose_steps, num_steps - 1 + + +def transform_source_data_segment_using_object_pose( + obj_pose, + src_eef_poses, + src_obj_pose, +): + """ + Transform a source data segment (object-centric subtask segment from source demonstration) such that + the relative poses between the target eef pose frame and the object frame are preserved. Recall that + each object-centric subtask segment corresponds to one object, and consists of a sequence of + target eef poses. + + Args: + obj_pose (np.array): 4x4 object pose in current scene + src_eef_poses (np.array): pose sequence (shape [T, 4, 4]) for the sequence of end effector control poses + from the source demonstration + src_obj_pose (np.array): 4x4 object pose from the source demonstration + + Returns: + transformed_eef_poses (np.array): transformed pose sequence (shape [T, 4, 4]) + """ + + # transform source end effector poses to be relative to source object frame + + # convert these eef poses from frame A (world frame) to frame B (source object frame) + src_eef_poses_rel_obj = pose_in_A_to_pose_in_B( + pose_in_A=src_eef_poses, + pose_A_in_B=pose_inv(src_obj_pose[None]), + ) + + # apply relative poses to current object frame to obtain new target eef poses + + # convert these eef poses from frame A (new object frame) to frame B (world frame) + transformed_eef_poses = pose_in_A_to_pose_in_B( + pose_in_A=src_eef_poses_rel_obj, + pose_A_in_B=obj_pose[None], + ) + return transformed_eef_poses diff --git a/mimicgen/utils/robomimic_utils.py b/mimicgen/utils/robomimic_utils.py new file mode 100644 index 0000000..c5baff6 --- /dev/null +++ b/mimicgen/utils/robomimic_utils.py @@ -0,0 +1,156 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +Collection of utilities related to robomimic. +""" +import sys +import json +import traceback +import argparse +from copy import deepcopy + +import robomimic +from robomimic.utils.log_utils import PrintLogger +import robomimic.utils.env_utils as EnvUtils +from robomimic.scripts.playback_dataset import playback_dataset, DEFAULT_CAMERAS + + +def make_print_logger(txt_file): + """ + Makes a logger that mirrors stdout and stderr to a text file. + + Args: + txt_file (str): path to txt file to write + """ + logger = PrintLogger(txt_file) + sys.stdout = logger + sys.stderr = logger + return logger + + +def create_env( + env_meta, + env_name=None, + env_class=None, + robot=None, + gripper=None, + camera_names=None, + camera_height=84, + camera_width=84, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, +): + """ + Helper function to create the environment from dataset metadata and arguments. + + Args: + env_meta (dict): environment metadata compatible with robomimic, see + https://robomimic.github.io/docs/modules/environments.html + env_name (str or None): if provided, override environment name + in @env_meta + env_class (class or None): if provided, use this class instead of the + one inferred from @env_meta + robot (str or None): if provided, override the robot argument in + @env_meta. Currently only supported by robosuite environments. + gripper (str or None): if provided, override the gripper argument in + @env_meta. Currently only supported by robosuite environments. + camera_names (list of str or None): list of camera names that correspond to image observations + camera_height (int): camera height for all cameras + camera_width (int): camera width for all cameras + render (bool or None): optionally override rendering behavior + render_offscreen (bool or None): optionally override rendering behavior + use_image_obs (bool or None): optionally override rendering behavior + use_depth_obs (bool or None): optionally override rendering behavior + """ + env_meta = deepcopy(env_meta) + + # maybe override some settings in environment metadata + if env_name is not None: + env_meta["env_name"] = env_name + if robot is not None: + # for now, only support this argument for robosuite environments + assert EnvUtils.is_robosuite_env(env_meta) + assert robot in ["IIWA", "Sawyer", "UR5e", "Panda", "Jaco", "Kinova3"] + env_meta["env_kwargs"]["robots"] = [robot] + if gripper is not None: + # for now, only support this argument for robosuite environments + assert EnvUtils.is_robosuite_env(env_meta) + assert gripper in ["PandaGripper", "RethinkGripper", "Robotiq85Gripper", "Robotiq140Gripper"] + env_meta["env_kwargs"]["gripper_types"] = [gripper] + + if camera_names is None: + camera_names = [] + + # create environment + env = EnvUtils.create_env_for_data_processing( + env_meta=env_meta, + env_class=env_class, + camera_names=camera_names, + camera_height=camera_height, + camera_width=camera_width, + reward_shaping=False, + render=render, + render_offscreen=render_offscreen, + use_image_obs=use_image_obs, + use_depth_obs=use_depth_obs, + ) + + return env + + +def make_dataset_video( + dataset_path, + video_path, + num_render=None, + render_image_names=None, + use_obs=False, + video_skip=5, +): + """ + Helper function to set up args and call @playback_dataset from robomimic + to get video of generated dataset. + """ + print("\nmake_dataset_video(\n\tdataset_path={},\n\tvideo_path={},{}\n)".format( + dataset_path, + video_path, + "\n\tnum_render={},".format(num_render) if num_render is not None else "", + )) + playback_args = argparse.Namespace() + playback_args.dataset = dataset_path + playback_args.filter_key = None + playback_args.n = num_render + playback_args.use_obs = use_obs + playback_args.use_actions = False + playback_args.render = False + playback_args.video_path = video_path + playback_args.video_skip = video_skip + playback_args.render_image_names = render_image_names + if (render_image_names is None): + # default robosuite + playback_args.render_image_names = ["agentview"] + playback_args.render_depth_names = None + playback_args.first = False + + try: + playback_dataset(playback_args) + except Exception as e: + res_str = "playback failed with error:\n{}\n\n{}".format(e, traceback.format_exc()) + print(res_str) + + +def get_default_env_cameras(env_meta): + """ + Get the default set of cameras for a particular robomimic environment type. + + Args: + env_meta (dict): environment metadata compatible with robomimic, see + https://robomimic.github.io/docs/modules/environments.html + + Returns: + camera_names (list of str): list of camera names that correspond to image observations + """ + return DEFAULT_CAMERAS[EnvUtils.get_env_type(env_meta=env_meta)] diff --git a/mimicgen_envs/utils/file_utils.py b/mimicgen_envs/utils/file_utils.py deleted file mode 100644 index 4a65a80..0000000 --- a/mimicgen_envs/utils/file_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the NVIDIA Source Code License [see LICENSE for details]. - -""" -A collection of utility functions for working with files. -""" -import os -import shutil -import tempfile -import gdown - -from robomimic.utils.file_utils import url_is_alive - - -def download_url_from_gdrive(url, download_dir, check_overwrite=True): - """ - Downloads a file at a URL from Google Drive. - - Example usage: - url = https://drive.google.com/file/d/1DABdqnBri6-l9UitjQV53uOq_84Dx7Xt/view?usp=drive_link - download_dir = "/tmp" - download_url_from_gdrive(url, download_dir, check_overwrite=True) - - Args: - url (str): url string - download_dir (str): path to directory where file should be downloaded - check_overwrite (bool): if True, will sanity check the download fpath to make sure a file of that name - doesn't already exist there - """ - assert url_is_alive(url), "@download_url_from_gdrive got unreachable url: {}".format(url) - - with tempfile.TemporaryDirectory() as td: - # HACK: Change directory to temp dir, download file there, and then move the file to desired directory. - # We do this because we do not know the name of the file beforehand. - cur_dir = os.getcwd() - os.chdir(td) - fpath = gdown.download(url, quiet=False, fuzzy=True) - fname = os.path.basename(fpath) - file_to_write = os.path.join(download_dir, fname) - if check_overwrite and os.path.exists(file_to_write): - user_response = input(f"Warning: file {file_to_write} already exists. Overwrite? y/n\n") - assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." - shutil.move(fpath, file_to_write) - os.chdir(cur_dir) diff --git a/requirements-docs.txt b/requirements-docs.txt new file mode 100644 index 0000000..78b8784 --- /dev/null +++ b/requirements-docs.txt @@ -0,0 +1,8 @@ +# requirements for building sphinx docs +pygments==2.4.1 +sphinx +sphinx_rtd_theme +sphinx_markdown_tables +sphinx_book_theme +recommonmark +nbsphinx \ No newline at end of file diff --git a/setup.py b/setup.py index 4bc0ecb..48405c4 100644 --- a/setup.py +++ b/setup.py @@ -11,9 +11,9 @@ long_description = ''.join(lines) setup( - name="mimicgen_envs", + name="mimicgen", packages=[ - package for package in find_packages() if package.startswith("mimicgen_envs") + package for package in find_packages() if package.startswith("mimicgen") ], install_requires=[ "numpy>=1.13.3", From 33fcab68d1589f9c645c86c2550fe72464401ce4 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Thu, 4 Jul 2024 11:56:20 -0700 Subject: [PATCH 02/11] fix small naming issue --- mimicgen/scripts/generate_core_configs.py | 3 +-- mimicgen/scripts/generate_robot_transfer_configs.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/mimicgen/scripts/generate_core_configs.py b/mimicgen/scripts/generate_core_configs.py index c04bf97..f61acba 100644 --- a/mimicgen/scripts/generate_core_configs.py +++ b/mimicgen/scripts/generate_core_configs.py @@ -23,8 +23,7 @@ # set path to folder containing src datasets -import mimicgen_envs -SRC_DATA_DIR = os.path.join(mimicgen_envs.__path__[0], "../datasets/source") +SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/core_configs" diff --git a/mimicgen/scripts/generate_robot_transfer_configs.py b/mimicgen/scripts/generate_robot_transfer_configs.py index b8f0e8e..b124cf7 100644 --- a/mimicgen/scripts/generate_robot_transfer_configs.py +++ b/mimicgen/scripts/generate_robot_transfer_configs.py @@ -24,8 +24,7 @@ # set path to folder containing src datasets -import mimicgen_envs -SRC_DATA_DIR = os.path.join(mimicgen_envs.__path__[0], "../datasets/source") +SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/robot_configs" From 109d2ad523ae568655a8cf273b522d7eb9faee49 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Thu, 4 Jul 2024 12:04:26 -0700 Subject: [PATCH 03/11] update gitignore --- .gitignore | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index 5a803b1..0cf0c04 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +# Folders generated by the repo +datasets/ +training_results/ +paper/ + + # Mac OSX .DS_Store From 9d2a459a6dba39454c272e1d39de441ba022aead Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Fri, 5 Jul 2024 17:13:29 -0700 Subject: [PATCH 04/11] adding docs structure (wip) --- .gitignore | 2 +- README.md | 208 +--------------- docs/Makefile | 23 ++ docs/_static/theme_overrides.css | 13 + docs/api/mimicgen.configs.rst | 38 +++ docs/api/mimicgen.datagen.rst | 46 ++++ docs/api/mimicgen.env_interfaces.rst | 30 +++ docs/api/mimicgen.envs.robosuite.rst | 94 +++++++ docs/api/mimicgen.envs.rst | 17 ++ docs/api/mimicgen.rst | 22 ++ docs/api/mimicgen.scripts.rst | 134 ++++++++++ docs/api/mimicgen.utils.rst | 54 ++++ docs/conf.py | 173 +++++++++++++ docs/datasets/overview.md | 91 +++++++ {assets => docs/images}/mimicgen.gif | Bin {assets => docs/images}/mosaic.gif | Bin docs/index.rst | 61 +++++ docs/introduction/installation.md | 88 +++++++ docs/introduction/overview.md | 46 ++++ docs/mimicgen_logo.png | Bin 0 -> 239240 bytes docs/miscellaneous/troubleshooting.md | 9 + docs/modules/datagen.md | 5 + docs/modules/env_interfaces.md | 4 + docs/modules/overview.md | 4 + docs/tutorials/datagen_custom.md | 3 + docs/tutorials/debugging_datagen.md | 12 + docs/tutorials/getting_started.md | 42 ++++ docs/tutorials/launching_several.md | 3 + docs/tutorials/reproducing_experiments.md | 3 + docs/tutorials/subtask_termination_signals.md | 12 + docs/tutorials/task_visualizations.md | 8 + mimicgen/envs/robosuite/coffee.py | 7 +- mimicgen/envs/robosuite/mug_cleanup.py | 3 - mimicgen/envs/robosuite/threading.py | 3 - .../envs/robosuite/three_piece_assembly.py | 3 - .../robosuite/objects/composite/needle.py | 2 - .../objects/composite/ring_tripod.py | 2 - .../models/robosuite/objects/xml_objects.py | 1 - mimicgen/scripts/generate_core_configs.py | 9 +- .../scripts/generate_core_training_configs.py | 68 ++--- .../generate_robot_transfer_configs.py | 9 +- ...enerate_robot_transfer_training_configs.py | 232 ++++++++++++++++++ mimicgen/scripts/merge_hdf5.py | 1 - mimicgen/scripts/prepare_all_src_datasets.sh | 24 +- setup.py | 6 +- 45 files changed, 1345 insertions(+), 270 deletions(-) create mode 100644 docs/Makefile create mode 100644 docs/_static/theme_overrides.css create mode 100644 docs/api/mimicgen.configs.rst create mode 100644 docs/api/mimicgen.datagen.rst create mode 100644 docs/api/mimicgen.env_interfaces.rst create mode 100644 docs/api/mimicgen.envs.robosuite.rst create mode 100644 docs/api/mimicgen.envs.rst create mode 100644 docs/api/mimicgen.rst create mode 100644 docs/api/mimicgen.scripts.rst create mode 100644 docs/api/mimicgen.utils.rst create mode 100644 docs/conf.py create mode 100644 docs/datasets/overview.md rename {assets => docs/images}/mimicgen.gif (100%) rename {assets => docs/images}/mosaic.gif (100%) create mode 100644 docs/index.rst create mode 100644 docs/introduction/installation.md create mode 100644 docs/introduction/overview.md create mode 100644 docs/mimicgen_logo.png create mode 100644 docs/miscellaneous/troubleshooting.md create mode 100644 docs/modules/datagen.md create mode 100644 docs/modules/env_interfaces.md create mode 100644 docs/modules/overview.md create mode 100644 docs/tutorials/datagen_custom.md create mode 100644 docs/tutorials/debugging_datagen.md create mode 100644 docs/tutorials/getting_started.md create mode 100644 docs/tutorials/launching_several.md create mode 100644 docs/tutorials/reproducing_experiments.md create mode 100644 docs/tutorials/subtask_termination_signals.md create mode 100644 docs/tutorials/task_visualizations.md create mode 100644 mimicgen/scripts/generate_robot_transfer_training_configs.py diff --git a/.gitignore b/.gitignore index 0cf0c04..50d5978 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # Folders generated by the repo -datasets/ +/datasets/ training_results/ paper/ diff --git a/README.md b/README.md index a9ac588..6cc4331 100644 --- a/README.md +++ b/README.md @@ -1,224 +1,36 @@ -# MimicGen Environments and Datasets +# MimicGen

- - +

-This repository contains the official release of simulation environments and datasets for the [CoRL 2023](https://www.corl2023.org/) paper "MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations". +This repository contains the official release of data generation code, simulation environments, and datasets for the [CoRL 2023](https://www.corl2023.org/) paper "MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations". -The datasets contain over 48,000 task demonstrations across 12 tasks. +The released datasets contain over 48,000 task demonstrations across 12 tasks and the MimicGen data generation tool can create as many as you'd like. Website: https://mimicgen.github.io Paper: https://arxiv.org/abs/2310.17596 +Documentation: https://mimicgen.github.io/docs/introduction/overview.html + For business inquiries, please submit this form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/) ------- ## Latest Updates +- [07/05/2024] **v1.0.0**: Full code release, including data generation code - [04/04/2024] **v0.1.1**: Dataset license changed to [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/), which is less restrictive (see [License](#license)) - [09/28/2023] **v0.1.0**: Initial code and paper release ------- +## Troubleshooting -## Table of Contents - -- [Installation](#installation) -- [Downloading and Using Datasets](#downloading-and-using-datasets) -- [Reproducing Policy Learning Results](#reproducing-policy-learning-results) -- [Task Visualizations](#task-visualizations) -- [Data Generation Code](#data-generation-code) -- [Troubleshooting and Known Issues](#troubleshooting-and-known-issues) -- [License](#license) -- [Citation](#citation) - - -## Installation - -We recommend installing the repo into a new conda environment (it is called `mimicgen` in the example below): - -```sh -conda create -n mimicgen python=3.8 -conda activate mimicgen -``` - -You can install most of the dependencies by cloning the repository and then installing from source: - -```sh -cd -git clone https://github.com/NVlabs/mimicgen_environments.git -cd mimicgen_environments -pip install -e . -``` - -However, there are some additional dependencies that we list below. These are best installed from source: - -- [robosuite](https://robosuite.ai/) - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robosuite.git - git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa - cd robosuite - pip install -e . - ``` - - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v1.4+`) should be fine. - - For more detailed instructions, see [here](https://robosuite.ai/docs/installation.html) -- [robomimic](https://robomimic.github.io/) - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robomimic.git - git checkout ab6c3dcb8506f7f06b43b41365e5b3288c858520 - cd robomimic - pip install -e . - ``` - - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v0.3+`) should be fine. - - For more detailed instructions, see [here](https://robomimic.github.io/docs/introduction/installation.html) -- [robosuite_task_zoo](https://github.com/ARISE-Initiative/robosuite-task-zoo) - - **Note**: This is optional and only needed for the Kitchen and Hammer Cleanup environments / datasets. - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robosuite-task-zoo - git checkout 74eab7f88214c21ca1ae8617c2b2f8d19718a9ed - cd robosuite_task_zoo - pip install -e . - ``` - -Lastly, **please downgrade MuJoCo to 2.3.2**: -```sh -pip install mujoco==2.3.2 -``` - -**Note**: This MuJoCo version (`2.3.2`) is important -- in our testing, we found that other versions of MuJoCo could be problematic, especially for the Sawyer arm datasets (e.g. `2.3.5` causes problems with rendering and `2.3.7` changes the dynamics of the robot arm significantly from the collected datasets). - -### Test Your Installation - -The following script can be used to try random actions in a task. -```sh -cd mimicgen/scripts -python demo_random_action.py -``` - -## Downloading and Using Datasets - -### Dataset Types - -As described in the paper, each task has a default reset distribution (D_0). Source human demonstrations (usually 10 demos) were collected on this distribution and MimicGen was subsequently used to generate large datasets (usually 1000 demos) across different task reset distributions (e.g. D_0, D_1, D_2), objects, and robots. - -The datasets are split into different types: - -- **source**: source human datasets used to generate all data -- this generally consists of 10 human demonstrations collected on the D_0 variant for each task. -- **core**: datasets generated with MimicGen for different task reset distributions. These correspond to the core set of results in Figure 4 of the paper. -- **object**: datasets generated with MimicGen for different objects. These correspond to the results in Appendix G of the paper. -- **robot**: datasets generated with MimicGen for different robots. These correspond to the results in Appendix F of the paper. -- **large_interpolation**: datasets generated with MimicGen using much larger interpolation segments. These correspond to the results in Appendix H in the paper. - -**Note 1**: All datasets are readily compatible with [robomimic](https://robomimic.github.io/) --- the structure is explained [here](https://robomimic.github.io/docs/datasets/overview.html#dataset-structure). This means that you can use robomimic to [visualize the data](https://robomimic.github.io/docs/tutorials/dataset_contents.html) or train models with different policy learning methods that we did not explore in our paper, such as [BC-Transformer](https://robomimic.github.io/docs/tutorials/training_transformers.html). - -**Note 2**: We found that the large_interpolation datasets pose a significant challenge for imitation learning, and have substantial room for improvement. - -### Dataset Statistics - -The datasets contain over 48,000 task demonstrations across 12 tasks. - -We provide more information on the amount of demonstrations for each dataset type: -- **source**: 120 human demonstrations across 12 tasks (10 per task) used to automatically generate the other datasets -- **core**: 26,000 task demonstrations across 12 tasks (26 task variants) -- **object**: 2000 task demonstrations on the Mug Cleanup task with different mugs -- **robot**: 16,000 task demonstrations across 4 different robot arms on 2 tasks (4 task variants) -- **large_interpolation**: 6000 task demonstrations across 6 tasks that pose significant challenges for modern imitation learning methods - -### Dataset Download - -#### Method 1: Using `download_datasets.py` (Recommended) - -`download_datasets.py` (located at `mimicgen/scripts`) is a python script that provides a programmatic way of downloading the datasets. This is the preferred method, because this script also sets up a directory structure for the datasets that works out of the box with the code for reproducing policy learning results. - -A few examples of using this script are provided below: - -```sh -# default behavior - just download core square_d0 dataset -python download_datasets.py - -# download core datasets for square D0, D1, D2 and coffee D0, D1, D2 -python download_datasets.py --dataset_type core --tasks square_d0 square_d1 square_d2 coffee_d0 coffee_d1 coffee_d2 - -# download all core datasets, but do a dry run first to see what will be downloaded and where -python download_datasets.py --dataset_type core --tasks all --dry_run - -# download all source human datasets -python download_datasets.py --dataset_type source --tasks all -``` - -#### Method 2: Using Direct Download Links - -You can download the datasets manually through Google Drive. The folders each correspond to the dataset types described in [Dataset Types](#dataset-types). - -**Google Drive folder with all datasets:** [link](https://drive.google.com/drive/folders/14e9kkHGfApuQ709LBEbXrXVI1Lp5Ax7p?usp=drive_link) - -#### Method 3: Using Hugging Face - -You can download the datasets through Hugging Face. - -**Hugging Face dataset repository:** [link](https://huggingface.co/datasets/amandlek/mimicgen_datasets) - -## Reproducing Policy Learning Results - -After downloading the appropriate datasets you’re interested in using by running the `download_datasets.py` script, the `generate_training_configs_for_public_datasets.py` script (located at `mimicgen/scripts`) can be used to generate all training config json files necessary to reproduce the experiments in the paper. A few examples are below. - -```sh -# Assume datasets already exist in mimicgen/../datasets folder. Configs will be generated under mimicgen/exps/paper, and training results will be at mimicgen/../training_results after launching training runs. -python generate_training_configs_for_public_datasets.py - -# Alternatively, specify where datasets exist, and specify where configs should be generated. -python generate_training_configs_for_public_datasets.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results -``` - -Then, to reproduce a specific set of training runs for different experiment groups (see [Dataset Types](#dataset-types)), we can simply navigate to the generated config directory, and copy training commands from the generated shell script there. As an example, we can reproduce the image training results on the Coffee D0 dataset, by looking for the correct set of commands in `mimicgen/exps/paper/core.sh` and running them. The relevant section of the shell script is reproduced below. - -```sh -# task: coffee_d0 -# obs modality: image -python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen/exps/paper/core/coffee_d0/image/bc_rnn.json -``` - -**Note 1**: Another option is to directly run `robomimic/scripts/train.py` with any generated config jsons of interest -- the commands in the shell files do exactly this. - -**Note 2**: See the [robomimic documentation](https://robomimic.github.io/docs/introduction/getting_started.html) for more information on how training works. - -**Note 3**: In the MimicGen paper, we generated our datasets on versions of environments built on robosuite `v1.2`. Since then, we changed the environments and datasets (through postprocessing) to be based on robosuite `v1.4`. However, `v1.4` has some visual and dynamics differences from `v1.2`, so the learning results may not exactly match up with the ones we reported in the paper. In our testing on these released datasets, we were able to reproduce nearly all of our results, but within 10% of the performance reported in the paper. - - -## Task Visualizations - -We provide a convenience script to write videos for each task's reset distribution at `scripts/get_reset_videos.py`. Set the `OUTPUT_FOLDER` global variable to the folder where you want to write the videos, and set `DATASET_INFOS` appropriately if you would like to limit the environments visualized. Then run the script. - -The environments are also readily compatible with robosuite visualization scripts such as the [demo_random_action.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/demos/demo_random_action.py) script and the [make_reset_video.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/scripts/make_reset_video.py) script, but you will need to modify these files to add a `import mimicgen` line to make sure that `robosuite` can find these environments. - - -**Note**: You can find task reset visualizations on the [website](https://mimicgen.github.io), but they may look a little different as they were generated with robosuite v1.2. - -## Data Generation Code - -If you are interested in the data generation code, please send an email to amandlekar@nvidia.com. Thanks! - -## Troubleshooting and Known Issues - -- If your robomimic training seems to be proceeding slowly (especially for image-based agents), it might be a problem with robomimic and more modern versions of PyTorch. We recommend PyTorch 1.12.1 (on Ubuntu, we used `conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch`). It is also a good idea to verify that the GPU is being utilized during training. -- In our testing on M1 macbook we ran into the following error when using `imageio-ffmpeg` installed through pip: `RuntimeError: No ffmpeg exe could be found. Install ffmpeg on your system, or set the IMAGEIO_FFMPEG_EXE environment variable.` Using `conda install imageio-ffmpeg` fixed this issue on our end. -- If you run into trouble with installing [egl_probe](https://github.com/StanfordVL/egl_probe) during robomimic installation (e.g. `ERROR: Failed building wheel for egl_probe`) you may need to make sure `cmake` is installed. A simple `pip install cmake` should work. -- If you run into other strange installation issues, one potential fix is to launch a new terminal, activate your conda environment, and try the install commands that are failing once again. One clue that the current terminal state is corrupt and this fix will help is if you see installations going into a different conda environment than the one you have active. -- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](#installation) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). - -If you run into an error not documented above, please search through the [GitHub issues](https://github.com/NVlabs/mimicgen_environments/issues), and create a new one if you cannot find a fix. +Please see the [troubleshooting](https://mimicgen.github.io/docs/miscellaneous/troubleshooting.html) section for common fixes, or submit an issue on our github page. ## License -The code is released under the [NVIDIA Source Code License](https://github.com/NVlabs/mimicgen_environments/blob/main/LICENSE) and the datasets are released under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). +The code is released under the [NVIDIA Source Code License](https://github.com/NVlabs/mimicgen/blob/main/LICENSE) and the datasets are released under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). ## Citation diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..8508a32 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,23 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +apidoc: + @sphinx-apidoc -T --force ../mimicgen -o api + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/theme_overrides.css b/docs/_static/theme_overrides.css new file mode 100644 index 0000000..63ee6cc --- /dev/null +++ b/docs/_static/theme_overrides.css @@ -0,0 +1,13 @@ +/* override table width restrictions */ +@media screen and (min-width: 767px) { + + .wy-table-responsive table td { + /* !important prevents the common CSS stylesheets from overriding + this as on RTD they are loaded after this stylesheet */ + white-space: normal !important; + } + + .wy-table-responsive { + overflow: visible !important; + } +} diff --git a/docs/api/mimicgen.configs.rst b/docs/api/mimicgen.configs.rst new file mode 100644 index 0000000..a580a9c --- /dev/null +++ b/docs/api/mimicgen.configs.rst @@ -0,0 +1,38 @@ +mimicgen.configs package +======================== + +Submodules +---------- + +mimicgen.configs.config module +------------------------------ + +.. automodule:: mimicgen.configs.config + :members: + :undoc-members: + :show-inheritance: + +mimicgen.configs.robosuite module +--------------------------------- + +.. automodule:: mimicgen.configs.robosuite + :members: + :undoc-members: + :show-inheritance: + +mimicgen.configs.task\_spec module +---------------------------------- + +.. automodule:: mimicgen.configs.task_spec + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.configs + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.datagen.rst b/docs/api/mimicgen.datagen.rst new file mode 100644 index 0000000..6f63ef9 --- /dev/null +++ b/docs/api/mimicgen.datagen.rst @@ -0,0 +1,46 @@ +mimicgen.datagen package +======================== + +Submodules +---------- + +mimicgen.datagen.data\_generator module +--------------------------------------- + +.. automodule:: mimicgen.datagen.data_generator + :members: + :undoc-members: + :show-inheritance: + +mimicgen.datagen.datagen\_info module +------------------------------------- + +.. automodule:: mimicgen.datagen.datagen_info + :members: + :undoc-members: + :show-inheritance: + +mimicgen.datagen.selection\_strategy module +------------------------------------------- + +.. automodule:: mimicgen.datagen.selection_strategy + :members: + :undoc-members: + :show-inheritance: + +mimicgen.datagen.waypoint module +-------------------------------- + +.. automodule:: mimicgen.datagen.waypoint + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.datagen + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.env_interfaces.rst b/docs/api/mimicgen.env_interfaces.rst new file mode 100644 index 0000000..1d9b179 --- /dev/null +++ b/docs/api/mimicgen.env_interfaces.rst @@ -0,0 +1,30 @@ +mimicgen.env\_interfaces package +================================ + +Submodules +---------- + +mimicgen.env\_interfaces.base module +------------------------------------ + +.. automodule:: mimicgen.env_interfaces.base + :members: + :undoc-members: + :show-inheritance: + +mimicgen.env\_interfaces.robosuite module +----------------------------------------- + +.. automodule:: mimicgen.env_interfaces.robosuite + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.env_interfaces + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.envs.robosuite.rst b/docs/api/mimicgen.envs.robosuite.rst new file mode 100644 index 0000000..ecbc884 --- /dev/null +++ b/docs/api/mimicgen.envs.robosuite.rst @@ -0,0 +1,94 @@ +mimicgen.envs.robosuite package +=============================== + +Submodules +---------- + +mimicgen.envs.robosuite.coffee module +------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.coffee + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.hammer\_cleanup module +---------------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.hammer_cleanup + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.kitchen module +-------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.kitchen + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.mug\_cleanup module +------------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.mug_cleanup + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.nut\_assembly module +-------------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.nut_assembly + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.pick\_place module +------------------------------------------ + +.. automodule:: mimicgen.envs.robosuite.pick_place + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.single\_arm\_env\_mg module +--------------------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.single_arm_env_mg + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.stack module +------------------------------------ + +.. automodule:: mimicgen.envs.robosuite.stack + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.threading module +---------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.threading + :members: + :undoc-members: + :show-inheritance: + +mimicgen.envs.robosuite.three\_piece\_assembly module +----------------------------------------------------- + +.. automodule:: mimicgen.envs.robosuite.three_piece_assembly + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.envs.robosuite + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.envs.rst b/docs/api/mimicgen.envs.rst new file mode 100644 index 0000000..d7b7226 --- /dev/null +++ b/docs/api/mimicgen.envs.rst @@ -0,0 +1,17 @@ +mimicgen.envs package +===================== + +Subpackages +----------- + +.. toctree:: + + mimicgen.envs.robosuite + +Module contents +--------------- + +.. automodule:: mimicgen.envs + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.rst b/docs/api/mimicgen.rst new file mode 100644 index 0000000..38537c3 --- /dev/null +++ b/docs/api/mimicgen.rst @@ -0,0 +1,22 @@ +mimicgen package +================ + +Subpackages +----------- + +.. toctree:: + + mimicgen.configs + mimicgen.datagen + mimicgen.env_interfaces + mimicgen.envs + mimicgen.scripts + mimicgen.utils + +Module contents +--------------- + +.. automodule:: mimicgen + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.scripts.rst b/docs/api/mimicgen.scripts.rst new file mode 100644 index 0000000..a1f4ac6 --- /dev/null +++ b/docs/api/mimicgen.scripts.rst @@ -0,0 +1,134 @@ +mimicgen.scripts package +======================== + +Submodules +---------- + +mimicgen.scripts.annotate\_subtasks module +------------------------------------------ + +.. automodule:: mimicgen.scripts.annotate_subtasks + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.demo\_random\_action module +-------------------------------------------- + +.. automodule:: mimicgen.scripts.demo_random_action + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.download\_datasets module +------------------------------------------ + +.. automodule:: mimicgen.scripts.download_datasets + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_config\_templates module +--------------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_config_templates + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_core\_configs module +----------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_core_configs + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_core\_training\_configs module +--------------------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_core_training_configs + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_dataset module +----------------------------------------- + +.. automodule:: mimicgen.scripts.generate_dataset + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_robot\_transfer\_configs module +---------------------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_robot_transfer_configs + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_robot\_transfer\_training\_configs module +-------------------------------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_robot_transfer_training_configs + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.generate\_training\_configs\_for\_public\_datasets module +-------------------------------------------------------------------------- + +.. automodule:: mimicgen.scripts.generate_training_configs_for_public_datasets + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.get\_reset\_videos module +------------------------------------------ + +.. automodule:: mimicgen.scripts.get_reset_videos + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.get\_source\_info module +----------------------------------------- + +.. automodule:: mimicgen.scripts.get_source_info + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.merge\_hdf5 module +----------------------------------- + +.. automodule:: mimicgen.scripts.merge_hdf5 + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.prepare\_src\_dataset module +--------------------------------------------- + +.. automodule:: mimicgen.scripts.prepare_src_dataset + :members: + :undoc-members: + :show-inheritance: + +mimicgen.scripts.visualize\_subtasks module +------------------------------------------- + +.. automodule:: mimicgen.scripts.visualize_subtasks + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.scripts + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/mimicgen.utils.rst b/docs/api/mimicgen.utils.rst new file mode 100644 index 0000000..3ffa865 --- /dev/null +++ b/docs/api/mimicgen.utils.rst @@ -0,0 +1,54 @@ +mimicgen.utils package +====================== + +Submodules +---------- + +mimicgen.utils.config\_utils module +----------------------------------- + +.. automodule:: mimicgen.utils.config_utils + :members: + :undoc-members: + :show-inheritance: + +mimicgen.utils.file\_utils module +--------------------------------- + +.. automodule:: mimicgen.utils.file_utils + :members: + :undoc-members: + :show-inheritance: + +mimicgen.utils.misc\_utils module +--------------------------------- + +.. automodule:: mimicgen.utils.misc_utils + :members: + :undoc-members: + :show-inheritance: + +mimicgen.utils.pose\_utils module +--------------------------------- + +.. automodule:: mimicgen.utils.pose_utils + :members: + :undoc-members: + :show-inheritance: + +mimicgen.utils.robomimic\_utils module +-------------------------------------- + +.. automodule:: mimicgen.utils.robomimic_utils + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: mimicgen.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..d740392 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,173 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import sys +sys.path.insert(0, os.path.abspath('.')) + +import sphinx_book_theme +import mimicgen + + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.napoleon', + 'sphinx_markdown_tables', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinx.ext.autodoc', + 'recommonmark', # use Sphinx-1.4 or newer + # 'nbsphinx', +] + + +# Sphinx-apidoc variables +apidoc_module_dir = "../mimicgen" +apidoc_output_dir = "reference" + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# source_parsers = { +# '.md': CommonMarkParser, +# } + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = ['.rst', '.md', '.ipynb'] + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'mimicgen' +copyright = '2023-2024, NVIDIA' +author = 'Ajay Mandlekar' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = (".").join(mimicgen.__version__.split(".")[:-1]) +# The full version, including alpha/beta/rc tags. +release = (".").join(mimicgen.__version__.split(".")[:-1]) + + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_book_theme' +html_logo = "mimicgen_logo.png" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# html_context = { +# 'css_files': [ +# '_static/theme_overrides.css', # override wide tables in RTD theme +# ], +# } + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mimicgendoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'mimicgen.tex', u'mimicgen Documentation', author, 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'mimicgen', u'mimicgen Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'mimicgen', u'mimicgen Documentation', + author, 'mimicgen', 'mimicgen', + 'Miscellaneous'), +] diff --git a/docs/datasets/overview.md b/docs/datasets/overview.md new file mode 100644 index 0000000..7ad8370 --- /dev/null +++ b/docs/datasets/overview.md @@ -0,0 +1,91 @@ +# Overview + +## Downloading and Using Datasets + +### Dataset Types + +As described in the paper, each task has a default reset distribution (D_0). Source human demonstrations (usually 10 demos) were collected on this distribution and MimicGen was subsequently used to generate large datasets (usually 1000 demos) across different task reset distributions (e.g. D_0, D_1, D_2), objects, and robots. + +The datasets are split into different types: + +- **source**: source human datasets used to generate all data -- this generally consists of 10 human demonstrations collected on the D_0 variant for each task. +- **core**: datasets generated with MimicGen for different task reset distributions. These correspond to the core set of results in Figure 4 of the paper. +- **object**: datasets generated with MimicGen for different objects. These correspond to the results in Appendix G of the paper. +- **robot**: datasets generated with MimicGen for different robots. These correspond to the results in Appendix F of the paper. +- **large_interpolation**: datasets generated with MimicGen using much larger interpolation segments. These correspond to the results in Appendix H in the paper. + +**Note 1**: All datasets are readily compatible with [robomimic](https://robomimic.github.io/) --- the structure is explained [here](https://robomimic.github.io/docs/datasets/overview.html#dataset-structure). This means that you can use robomimic to [visualize the data](https://robomimic.github.io/docs/tutorials/dataset_contents.html) or train models with different policy learning methods that we did not explore in our paper, such as [BC-Transformer](https://robomimic.github.io/docs/tutorials/training_transformers.html). + +**Note 2**: We found that the large_interpolation datasets pose a significant challenge for imitation learning, and have substantial room for improvement. + +### Dataset Statistics + +The datasets contain over 48,000 task demonstrations across 12 tasks. + +We provide more information on the amount of demonstrations for each dataset type: +- **source**: 120 human demonstrations across 12 tasks (10 per task) used to automatically generate the other datasets +- **core**: 26,000 task demonstrations across 12 tasks (26 task variants) +- **object**: 2000 task demonstrations on the Mug Cleanup task with different mugs +- **robot**: 16,000 task demonstrations across 4 different robot arms on 2 tasks (4 task variants) +- **large_interpolation**: 6000 task demonstrations across 6 tasks that pose significant challenges for modern imitation learning methods + +### Dataset Download + +#### Method 1: Using `download_datasets.py` (Recommended) + +`download_datasets.py` (located at `mimicgen/scripts`) is a python script that provides a programmatic way of downloading the datasets. This is the preferred method, because this script also sets up a directory structure for the datasets that works out of the box with the code for reproducing policy learning results. + +A few examples of using this script are provided below: + +```sh +# default behavior - just download core square_d0 dataset +python download_datasets.py + +# download core datasets for square D0, D1, D2 and coffee D0, D1, D2 +python download_datasets.py --dataset_type core --tasks square_d0 square_d1 square_d2 coffee_d0 coffee_d1 coffee_d2 + +# download all core datasets, but do a dry run first to see what will be downloaded and where +python download_datasets.py --dataset_type core --tasks all --dry_run + +# download all source human datasets +python download_datasets.py --dataset_type source --tasks all +``` + +#### Method 2: Using Direct Download Links + +You can download the datasets manually through Google Drive. The folders each correspond to the dataset types described in [Dataset Types](#dataset-types). + +**Google Drive folder with all datasets:** [link](https://drive.google.com/drive/folders/14e9kkHGfApuQ709LBEbXrXVI1Lp5Ax7p?usp=drive_link) + +#### Method 3: Using Hugging Face + +You can download the datasets through Hugging Face. + +**Hugging Face dataset repository:** [link](https://huggingface.co/datasets/amandlek/mimicgen_datasets) + + +## Reproducing Policy Learning Results + +After downloading the appropriate datasets you’re interested in using by running the `download_datasets.py` script, the `generate_training_configs_for_public_datasets.py` script (located at `mimicgen/scripts`) can be used to generate all training config json files necessary to reproduce the experiments in the paper. A few examples are below. + +```sh +# Assume datasets already exist in mimicgen/../datasets folder. Configs will be generated under mimicgen/exps/paper, and training results will be at mimicgen/../training_results after launching training runs. +python generate_training_configs_for_public_datasets.py + +# Alternatively, specify where datasets exist, and specify where configs should be generated. +python generate_training_configs_for_public_datasets.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results +``` + +Then, to reproduce a specific set of training runs for different experiment groups (see [Dataset Types](#dataset-types)), we can simply navigate to the generated config directory, and copy training commands from the generated shell script there. As an example, we can reproduce the image training results on the Coffee D0 dataset, by looking for the correct set of commands in `mimicgen/exps/paper/core.sh` and running them. The relevant section of the shell script is reproduced below. + +```sh +# task: coffee_d0 +# obs modality: image +python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen/exps/paper/core/coffee_d0/image/bc_rnn.json +``` + +**Note 1**: Another option is to directly run `robomimic/scripts/train.py` with any generated config jsons of interest -- the commands in the shell files do exactly this. + +**Note 2**: See the [robomimic documentation](https://robomimic.github.io/docs/introduction/getting_started.html) for more information on how training works. + +**Note 3**: In the MimicGen paper, we generated our datasets on versions of environments built on robosuite `v1.2`. Since then, we changed the environments and datasets (through postprocessing) to be based on robosuite `v1.4`. However, `v1.4` has some visual and dynamics differences from `v1.2`, so the learning results may not exactly match up with the ones we reported in the paper. In our testing on these released datasets, we were able to reproduce nearly all of our results, but within 10% of the performance reported in the paper. diff --git a/assets/mimicgen.gif b/docs/images/mimicgen.gif similarity index 100% rename from assets/mimicgen.gif rename to docs/images/mimicgen.gif diff --git a/assets/mosaic.gif b/docs/images/mosaic.gif similarity index 100% rename from assets/mosaic.gif rename to docs/images/mosaic.gif diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..af87537 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,61 @@ +.. mimicgen documentation master file, created by + sphinx-quickstart on Thu Jul 4 12:05:47 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to mimicgen's documentation! +==================================== + +.. toctree:: + :maxdepth: 1 + :caption: Introduction + + introduction/overview + introduction/installation + +.. toctree:: + :maxdepth: 1 + :caption: Datasets + + datasets/overview + +.. toctree:: + :maxdepth: 1 + :caption: Tutorials + + tutorials/getting_started + tutorials/reproducing_experiments + tutorials/launching_several + tutorials/datagen_custom + tutorials/task_visualizations + tutorials/subtask_termination_signals + tutorials/debugging_datagen + +.. toctree:: + :maxdepth: 1 + :caption: Modules + + modules/overview + modules/datagen + modules/env_interfaces + +.. toctree:: + :maxdepth: 1 + :caption: Miscellaneous + + miscellaneous/troubleshooting + +.. toctree:: + :maxdepth: 4 + :caption: Source API + + api/mimicgen + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/introduction/installation.md b/docs/introduction/installation.md new file mode 100644 index 0000000..d27db5a --- /dev/null +++ b/docs/introduction/installation.md @@ -0,0 +1,88 @@ +# Installation + +## Requirements + +- Mac OS X or Linux machine +- Python >= 3.6 (recommended 3.8.0) +- [conda](https://www.anaconda.com/products/individual) + - [virtualenv](https://virtualenv.pypa.io/en/latest/) is also an acceptable alternative, but we assume you have conda installed in our examples below + +## Install MimicGen + +We recommend installing the repo into a new conda environment (it is called `mimicgen` in the example below): + +```sh +conda create -n mimicgen python=3.8 +conda activate mimicgen +``` + +You can install most of the dependencies by cloning the repository and then installing from source: + +```sh +cd +git clone https://github.com/NVlabs/mimicgen.git +cd mimicgen +pip install -e . +``` + +However, there are some additional dependencies that we list below. These are best installed from source: + +- [robosuite](https://robosuite.ai/) + - **Note**: This is optional and only needed if running the examples provided with this repository. The MimicGen source code does not rely on robosuite and can be used with other simulation frameworks. + - **Installation** + ```sh + cd + git clone https://github.com/ARISE-Initiative/robosuite.git + git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa + cd robosuite + pip install -e . + ``` + - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v1.4+`) should be fine. + - For more detailed instructions, see [here](https://robosuite.ai/docs/installation.html) +- [robomimic](https://robomimic.github.io/) + - **Installation** + ```sh + cd + git clone https://github.com/ARISE-Initiative/robomimic.git + git checkout d0b37cf214bd24fb590d182edb6384333f67b661 + cd robomimic + pip install -e . + ``` + - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v0.3+`) should be fine. + - For more detailed instructions, see [here](https://robomimic.github.io/docs/introduction/installation.html) +- [robosuite_task_zoo](https://github.com/ARISE-Initiative/robosuite-task-zoo) + - **Note**: This is optional and only needed for the Kitchen and Hammer Cleanup environments / datasets. + - **Installation** + ```sh + cd + git clone https://github.com/ARISE-Initiative/robosuite-task-zoo + git checkout 74eab7f88214c21ca1ae8617c2b2f8d19718a9ed + cd robosuite_task_zoo + pip install -e . + ``` + +Lastly, if using robosuite, **please downgrade MuJoCo to 2.3.2**: +```sh +pip install mujoco==2.3.2 +``` + +
+

MuJoCo Version

+ +This MuJoCo version (`2.3.2`) can be important -- in our testing, we found that other versions of MuJoCo could be problematic, especially for the Sawyer arm datasets (e.g. `2.3.5` causes problems with rendering and `2.3.7` changes the dynamics of the robot arm significantly from the collected datasets). +
+ +## Test Your Installation + +The following script can be used to try random actions in one of our custom robosuite tasks. +```sh +cd mimicgen/scripts +python demo_random_action.py +``` + +
+

Testing Data Generation

+ +To test data generation please move on to the [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) tutorial. + +
diff --git a/docs/introduction/overview.md b/docs/introduction/overview.md new file mode 100644 index 0000000..12bd15f --- /dev/null +++ b/docs/introduction/overview.md @@ -0,0 +1,46 @@ +# Overview + +

+ +

+ +This repository contains the official release of data generation code, simulation environments, and datasets for the [CoRL 2023](https://www.corl2023.org/) paper "MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations". + +The released datasets contain over 48,000 task demonstrations across 12 tasks and the MimicGen data generation tool can create as many as you would like. + +Website: [https://mimicgen.github.io](https://mimicgen.github.io) + +Paper: [https://arxiv.org/abs/2310.17596](https://arxiv.org/abs/2310.17596) + +Documentation: [https://mimicgen.github.io/docs/introduction/overview.html](https://mimicgen.github.io/docs/introduction/overview.html) + +For business inquiries, please submit this form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/) + +## Useful Links + +TODO: link to tutorial: quick data generation run example and outputs, pipeline overview (collect demo, postprocess demo, optionally annotate demo subtask terminations, run data generation, then run policy training) +TODO: link to tutorial: configuring multiple data generation runs +TODO: link to tutorial: reproducing paper results +TODO: link to tutorial: data generation for custom envs +TODO: link to modules page to understand codebase overview, especially Datagen Info + +## Troubleshooting + +Please see the [troubleshooting](https://mimicgen.github.io/docs/miscellaneous/troubleshooting.html) section for common fixes, or submit an issue on our github page. + +## License + +The code is released under the [NVIDIA Source Code License](https://github.com/NVlabs/mimicgen/blob/main/LICENSE) and the datasets are released under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). + +## Citation + +Please cite [the MimicGen paper](https://arxiv.org/abs/2310.17596) if you use this code in your work: + +```bibtex +@inproceedings{mandlekar2023mimicgen, + title={MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations}, + author={Mandlekar, Ajay and Nasiriany, Soroush and Wen, Bowen and Akinola, Iretiayo and Narang, Yashraj and Fan, Linxi and Zhu, Yuke and Fox, Dieter}, + booktitle={7th Annual Conference on Robot Learning}, + year={2023} +} +``` \ No newline at end of file diff --git a/docs/mimicgen_logo.png b/docs/mimicgen_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..5d035c4a31a433bc0ea2f0616456ba3a3dbae83d GIT binary patch literal 239240 zcmd?RcT|&W(>_WIASIxJARXOGlcLhQpcLuS1gWy=y_e7;B`P8+0@6W{5+H!|ng{|a z(g{IY0!RxbKnNXj!v4|UWWuD!v}6GC3uKgM zhmiir$T`RWe?2B6yHC#fe;z*|7yN&Zp&%oJxRR0H|LkZoOVai1@``jld;UN7lzHU; z_Z{;n{`Y7)syxd7eM}KeI-2Zzl;<7N-8mmE3x6`QOV`e>l(Qlxn4WPerj2kZpU0(iFfE;c0n##w6S?J9~%#go_gD2jLZ-Xkt1+rR5oe>c=5P&aiS zUz^U-WUKR|#$*ew*vCrCPpbwh+s^MHlgp!v-Y2#k$j)g?@u@7m&NTSJsrPQwg~(fqwJ)b+f7zh4 zd{qNfIdaVVI5F<*`x?Co1@HHuYK}{z#Z%`la~TZ#+$CgD=Vc44-ON9>Rqti&Ta~(~ zIr;#Anxe7Bvgc8F*lbIlUUoj%dP^~vsQKAGN@~?oPLDuWa@Wa75Sktf9t1&KEa64# zIul`7(#3DlZ<#H%UrU8P(~wT%{v$yere`e*67#V6b#gz-j|8ZM45L^EZ|nZ-u}IXkOO;aZnCiGMD9E1-&CUY z?+gpkh`O`{>>wZsqWFz;faKJ#WdF-p-LSH+BbAmX+&##?SaH;bdCyKgzoU@I>FuU z!^AeYH!F8Mh~7#ga5LRax01LIAe8#voPz!4m)lhrekUbhKG2TV#;F8BrJdjar9m$t#*1R3KFKj4|`H>&$5 zr*Ko+ho<3a(_hruMv9p8Zf~wQ3@a`Ung2 z9({}rU4QkG+FoO!Fx5t6*ym9`;?4}v6PY`ZO)FEIqngM3?l-a3wMXnH%i9cNoc5y? zarQEd1vyW^lqvapMygfv=Al4y{nDGJ6d#^ybCCr+>vyCzq?B?_yfSJT`o*%q(3{C5 zjE)Lg7^XF$qFlD+TFXpSQEpnCcRjfptDSOtLU|OsQcFue@s5zW9+-He^}-&8<~MLM z8j-_1ZpP;`x~7LrUekU3BGSVAM1&5~HYbsXb-GDsUzpA=?B5nB+yDr?oyNpUwkE0W z9V)z&dcDK0j)z(#y#rkO1e~c329~EN(U|r0yt545_I>END#Crd(ASQIic^z28WCuh zeViXEzQM!|p z@}C~Rj(hg_ttez@6FZqU)9!n4looj9`tQ&Ya2wy|XH^WmB_IH*+#LMJXo7Jw#$UKn zB)WciGrco9NBW7Q=|^4gN7>NO51Ts2DB#IL*s=n1W04KTw6UEn zziRgFV4L+a3h=@TllQgpTa9e$U~IFuqGA;Vc!S#ztO_BAOuyEp@eOh65)J$=cvwW3 zm1S8r$$Ob-Y~h)M^6wv(tgS1pSQ_q6sLCkpQbCP`Z>4PV!WotZ(8j*-SCMm>1Iz}U zOuzLFr!R&C@EIO_j5<6`x4t7^1i<;>eb|njaY(F8Y z(cR1)HaBd?>^n_x%yAUz@QX#Yik0SaJlj80T7Os{?h)shR2b~_ZyX_y@R;D;7C<#zv@E`ATG z+Ybai$6aIlE4~gk$+0%?S<+Fb-a&YYz6b+Wr0}cK{0IkfFU`xSxh{;?S=Kmii+Hvk zrg}nLhHQ#_hJH$5T=K?q4%a!q28{t2su6 zw}hCM#W|2~St+lathjrrgg;hu$&FxCOnc%{eI91h#=i4>zyw;w|R=(ZI?AFqk*qd_-|G2{p?~#ia^c<;i9J%lI;l-wefVM z@K!QrHe9LB-!)}=;`_iL*p$1|(^9bE1q$y!Ndd%niU=yDuMvD*W0Kz>>8-NCV<)pDLpS*&|dCk#Lf4TP%JoLV1HvO(bXVtLc#SKrT zEXUt-T-K03`6OD|Ixs5nW~pvZ$Bou5Zx%iYqi?P~m-5}UO_-(8w@H6fPO^x_KF68k z&|Do*nFyiV5PCZH$gN~lDxe|^a060TF&yJNZpn=^{_N^dWO?W0F4))3J8RN7S{zun z#+Q2oZ_n|jJ*U^M)i}zJ`Ov>@@ZCz!Z1hO;aLi4XWgokP`wIu(`0UpB4LjvPRm*bg z1@{7f=G3BWc}#X>Dc&lUS+|}PHUz8N@Iy{mCDqdZibry2E%!Z zmaf~GDYeKRgeFpEX|o56KEk+J&whVq=nYc8(y;pL{tag{aP2jqdz+Cc;q&&6X;Bm|$mno7Ms1DVrLtRQBh|BldF4cwHb+l~j zkm5Rd-ucDs*-cL$_9_l+aG@%us$hmJZ8ASN_}0#b>pMHZAQyGOK=jRri=?kV}B@vBJLZf0+K3a4706^#dRQu=c*VyC@AZ9xZQYuv_m+Pp0~RmEbASGt?0GA>#XR^ zVd%LJ9d5#VM>+oY%d?=-x$4dZHc%UKcRI?kPCGq&RZ+HrTOZ80RojjzfV&3=JFF9< z{wtL)daII4_cWAviRdI(e8Kk3$bMluI$`A!`vUb_>B~y_go~~hu&V@`m*Xc#bFAlo zr~H2mk7T+nc)q0CoR^4v;kP{>F6VQ+`sJwg}< zh-RnV7p|$nTz1ts0Z(>)uMY&G`0nB;HjGV^=i-&b*R#BA$JcD zlX+b|cri`r&{T(&UbnRvWinFqgJ#ao5=WK?0&tbeaUEZB$uH;?u0!P`#TPv)yb1HZtJNkD*Qx)@pUQwF)Atw{#>aux8A(nrzy3+F= zyqmmcs%@~;V+YtMWU@PqJCv|NEG{h#7O6%13OYl`mEhLNx457)S59WMAJAx~{}%bN zX+}6uf;adL`Bl?K?Gfy>Q~e9_9`)CJ79ES)u2yY@e?vHhD^G@N+%#V{%+mml`IhS6 z;q$>MTEeJ)-N?Y|vrqY?MCJBC|9d{-9ob~ic5ml^Jj?PciyM>5!{T3S=_yEQ)cNNO zdD-lf+@0oyBSg!}K0ABFFu#Z9?_`^CZg39`1>x{pg3|a+jPcLIZhKY6%jH$+x52s~ ztvm+TZo@o<{e<4hmiGNigG_BV=RDFmr)NH`{Z?RYc2Mzf((b(av{3f4z!o_yEp{!Q zJaE5}hgVh5htsE}t#|%ObW-RT5(^!heHO;98&<}NOPOw`v|;*nt1+wQv4nW=NqC{E zsuWVMkT*IsPb>J)B%vTBvJC8%_%igJmSBc`Jcb8=D*(&g3!!)4AwbSGlz~+D$7PS`mSJzp+q37Iq*o!ui_^rIAl9-+kZN zyj=2omm|IR{MYe~#`f$jVsHrVE59R=@l{mNIL0lv)@Uf%&HhNT4q{BGN#v$ ze(!a8)a3u;R7bjea#5$@@7;q+l&l{<)G8`2PHqXtT~AIuYP@BHillt~y8<;^XSs=uQ%A2eghDii?V<6EzBmUj=e^&{4#b_WpZ(n}cm} zjrPA{U0@Lz$$t*4nnXxPV-R%VJ42cT(xqN4%9!p@kM-WoSv$L&rNpo{n~`SS)hUo2 zI|Fr7ePXRwi3CRVD4?#nfZ(2xkg(d|HNEX+(-Zq^jp!~&Dbu^~6TSOr!pp2qf#!rfm9LvYLuRlZF=73&Q~5+aS;Qan=&?fdcL zN9q%h9$^=?ObFA9BW7#>2Ji8=*QW}k>}8HBkH7*GX2u@4x>)N47RduImwWO6Lka`c z0pv%P)20$t1@mL3m9AQ|tbLUvx2Uvf^vN%X08IpAA|Rdi=9BUqZjEKD9B-E%bp5KO z^hlNh;hTI+&n%UH(QsL8NTNNNZcbGAsZ8Z2`aA%CNuyvWOJeGx9V?lKNG+RsjHN9q zoPolvr8BzJZ4by!3APO4lRzE#A=O2Jb@ofdJx4idzgf|bY$aO^z1XSd%S}LD>Vu~DW z3fX!O;d1)L>f5H7gl=i4w2fqE=%z}i_-<6hd#fNx*{!moqOYu>?IIm2djyLUwA;%q ziBw;B`XtyBdD>E16iS1CTxE!Vwj0IAP~tfdIIOe$;CZ}0mj6ky$?-&ceExF~W$S}o zcG(JdS13b&wn9S@LB{kK^>1*R7zuYis8EQVp_J@6#nM2o0o$cZ*ptYp=8+ZIouwRd zl0R++BGIGM@S;FxmikO+JmuJSw-17DMy`|%au9fOpU`=Y?*cmh=1hMRhOp=+9rQ_Yq!a3CYv$(D)P%L2uI%jW6caa@h(6Y<=cttb_)7OHiHdogkpzS-`QP2o z8p1TTe8BYymwPzb?*z8X?lsy{JUbG#U;+?Xyw(66=UZB$0TcUr{IKd>@|yF zOBT5c={sj7sLA%i*auNi_ehLooBkbDn#%%`3YF_iCTx~66@I$CysM|DHmkH$zdBYC zq#4a06Wo(-e%65n#vwlFq}_`9=`>1-5kh*GggnJ~i!ki##r?D~f4K3q9$Bl*Kpn$Q zS04X!dR4XE$+j$ENol41cyGDzn|5E+qrE+GQzG=63=6GQEo9H(7x5@aos=xS1o0ea zR^UJ1724c$27GO6UamJA&a3FLU;poH_F#i%uJ#`f$V>)9Z zS8=QKm6au9!|GsGf&fxG>xZ+7v~^Ug-*hgBZpW}g)H8GBK4*DJp(P52T0P^9n!QvM zafn}IRN@+>HAwJXDF2I3o!_ssJ*w-*VzHy2KYxDz#CL;n&|a1)`w05P2S^z|>%-1P ztEgqnO1GnQz|(7J_x>nrSeS`-3Mo&+A82@<~x$VqC`=GfP zuI5A79q|B-g}|mPW4Vplw^Dy@?w-y>y{Nk8srF^FzsHi{Bj^GQzj<}F#4+LDDtTty4PHf?zjsSAWBJ`map&|wpVRx z?72Ij@P}-T5X%?w$KYS2I{1rx%=^bTc7DB?JDU; z>jUiv-p+><#WVzluGL;7&*{}4m8B-9;$bnYs`!vM*-GIpP}`+6n>}<~j@{qWVSS~* zFx!CT2_)XRUhOjd*+o+R{bB4k?a(RXr01b6(_%BES}Q+m-|+_3uPuIvGQ}i4mGXl7 zw1JFA&WOZwgHWCzG$Z(Ez(vL;5k-RO6Y)H|%v;{tjXtw#4ifBK$XAU$0p^-@{pxf5 z!VKmpur|TW2_Tqa4G->8Ky-kB#^IbTw>8l;c-ikFpJv4}Nw>wrIlG&ff0-bAfj2 z5#}@$!5q{(N-Bez!|Dm3M+<{i)4(V43;bcN^H~ExeP5T5CL<~!OjS>PMxP0yzQ?eC zzrqaf)5s$o(xEh0`f>p?b83!GkCKz8U2bNeZtnSfuW83rQFqf*?j+pao`0k@?H5%+ z@Y0pdan-zA-1mOSU>YNzV^ATnT8!sgud(b~!QLe+s*tPXVlSK44Vpcni|jxf$iK@tYipLiP6)o~j~d{d%=<`^$mEU5C0JVbz14WOLc>89E4)ddIl zkjB9qQ>A4arN0(AWE2DUNZl9JTv5b!rh}N>6{zTZWyE`s;hr#dAhit-0v9_AWJ66#Q^yBMIA^{$EEy z4hP|L&7Q46;k?UjhO&atJxxo-8*PaWKS)smj7Rv$mZyNFnIb@H{0*{Hu(1!O%rGcZ zH#crgOqjBX3Lr=be$3H44U35$q0{Rpv58V9<$b%k>0tD_OEz(LY6D%X(8Blf->8G^ z4GJ&!=T}1;BZ((_^BNV4W{$1g%%Uy<;kjVLZ=!vwMNBYpN znyac52%v7Rs=nf#etAkRo|824{O^Cy+2Y!%s&=r zCOH)h30m9Q8f5e>QWDihyc!a0D$N_e8eEO}NyoHkUTK4b)3bJdR9fAt?6~yzWdCooAztAY6#-9NVm*vQo^Gc2jq1|Hbo$nuuG9d-eE#B95?2$`^VQ$@RI#5 zWu;CN0~JxwIR>7^(2*U)148SG;IvfU5jBH-;a;>FSRU*|LJQ0$ZAozpsU;~-CR;XV zn7Nt=H-P4B)xIocDbbyJ3Oy*I1WyE*5T3_h?00r@I?hZ{<-@W6{S^QD;v`7%IXNU` zP(+IYPB{+!ysbSSNJR~q8$;{c5)Jcqzao{T9tirBoEMGndC9$Mps&Bn%CMtyanOcf zl|wD?_mROMbGxgEYJy`zZEA5S%o}`79e{WqQ256?P<+{&=uD?F(=9GboT{Crh?<%C zW8Db;g~KoGlup36uEp<^Cifeg2q_kk=P8EJyJB>%-Ohze$=HKG!p!Dph`d|hB6BR) z=N*jxyvExFks00AqNG^@i&5~B_Xv}~H#)wpLgb5d{;i6ZEYleKC@wn`gXb=NcA9Z5!TXNA3=G@ zjO%)lCLEUN{ka5jE@}^<2ygK&Z>_1657p|DDIGRK#oT2X-a#e`o0E)H{CuBU zL=^7$)ZCO`Zg0`$9}I}e_evUA={&HZAui4aj#4&)ow<=_ zTYtE*`i!VT_wMq9xxQiWt7x)b*G(iD*9LPY*sOAPR`VOlOd4Lzk7WI9@3V4wQzWOI z!)V^j;#!6|y4?POqegAkJ^boKJ1lw*+7HKSlYgp{L5KUBF8#A44EcK2hi)fLxkt%yZ~xJI z6sjfvEhg}-ps~k|N=4k}?yPyGl+$L7{+$cxe=bem{}?vW&}=8TXXolB7y;Xw+7~gFu}+ z&EieA2h%e@#qrI5Ok7(17$F?lWIn0H>b5lECS@fgP_i&inddIrV;@^rT6_?Q6tRpl zAaHa0KU!-2fNPowsNj$HjyPN?<{^y?$Ni&)jJZZf8sZAEVB2w{Rd-}#p}t{8IOk(} z9#N!B7Z#!wk*S-@d{RAF9e?Z3OKDQLoW+ktPn?ot2A6>yCAqQo4VNA`a{KMcl&HV6 zc@v#uzV@lQR031j9G*3FJA=bz8Dpg7&O`qY_2ELUW8gF8?t#gB+*EFX=7fEv^Bv7k zVq>J$sm(1%X}Pq>r8eUd*u@YWxnkEA) z>`7X)(#6fG4v&)E8Z^7+LJds0*}n146lX|#qr#Yot5IyrWwOf+g?^8YWT`^b34x*m z(45hBQ3CtJ7N6OUKHk$!NU!C&v%x6RU8ZSYOQnhHTnwL`SrwsW^Yb&pYWG&2=fBFv3?K_mD zN;`FD?l-BE4=Hb2J%*TI$BgvIT&>mRM99Y9ZN!aP2}n9AU`4Acc_>brX6#vsf(gPZ zs}ChBT?_TNkvepc8RYTvL;AJK=7F=iT6T~E1+(tu$QWH7f4~IRliCa|d95o2Z_1tf z+WQ?e*%uG|(Nf&i%wb3p6XUM7{0mQ|(Z&pa(D@;d8~$F=x~4Qg`Vrkps368Q#bVkS zc^mV7F$v0L%sFCIC)&kBQMV?6seL&V9Ey}vhejm`5zlKLeU`|pSCI&fkTN~;vVo1c z!bjK5gahL)aLc~1^$DzqP(188-G3J>}9T$lHI9cT&xy5p6Hdd+J$2`9q2CLZX zSHLy)-1#T8vfVg?R^?Hd_K#KN7?da@lR{%C!3V|4zU%BQ*hk@8L=qTzhnVAN&(yo<4RHMEcBqx==#QSgh8|mIl8ZLu`X^@7n zUtaQZ9Z}*&S{Fc6dvEglOjOybKm8G z*%nt;nyu^BwXh0D2!0Mua8VYFWg|v(<8z?$Vv2J;W9#xLN>FG^LQK4Tunrz56s9#N z#L8sfTJ-U~=LV~fHhxbMXF~-aZAXovP?f9Wk0ydNmvPAC$54}_2;VCW@cq&87aJmP$?G#y{}x|ZZF*}@OK0GS4P)l1d~;LnB&WV7p~U- z(SUTcOoOt>OApD_-shAdFteMlVITwfsA<`cS$pmVE}8IU`HDHV?{%JefgD$b8_S_n z!_5Y!#DX!9>}*)Wq-a281Y3pkKdPG$@L3j&-$SxaRmOUWG6u6uwG<_#8b1%eA=n#i zm3wPCB}@5FY_Sd@w5mLtQTx{Vg_gG>Y7Wp#8Mpf+-nlP#7Jy31;$ACnBvKzrl+MPq zdCf~Ao1mOC%~ln~?c#Q=Zo;|GR9>$tfQC#d!=QUAO)?rc8OE~$*{jM7?PNv;Kpe^s znbZgF&$dI-OMcmJ7OddNudaLOVKv?Y1IxHEN9mcHd*R)BUbm>ht?svHg_EoSr;frtvw z`uusfEAsh+Q$lt8*<;6-Q${8hXJedMs7Fh5(o1jwyo4foLj_Fy4cH@ zf7HM{7f55;gSjHaRpQlh=a?pOn(6*3mlxQ&8!E?8#3``@_2WyzFhX+K$&3?336>0jb$1oA!Pxi6UdPK@8z&~D! zybYvhPFf%}9k@KmWmKW|1Mtw%)L`}~HWl=eyN=W=d6xZfgYQ57P5;}4sy?z;r6IQG z*XR2;f4L1ca3wKnfapL;$UAGM^HzjrHuYdjUiGMn#UC^j(a7e;Svg|JXj5B%7izh5 zqSy+TpU6$;L89S3=5=UMq%G76c|5>EdkpGhGWqY0P)N=h=wJcQd5J-W4I6u5D*MYO z@X*7tyRVl}qfJMm5%n!7T5Is4X~#VNbv(AB$C)Y##vyxKvQs&l?vj!z!?Akm`Eh7nF2YBm2#=g{j^N zzF)bc=ujD5p*GXh5e1|K&M|+ghzkVVGAAi_73NBmSjwaB8eu6Je$b>AZ%=Q1+_iOI z1(p@Ukl?|4RPOQI+#W8B`%I3xKpx@HKpMV@Yk$3FdoPY-18RE&xr=Fh#W^)hDnWPiP^0y`+$r8AtxyqV3+koobyk4&$48lP-DdL* z7yo5k8BBZcl^a?JX*_{@w}>(w)r&n%T-p(;tq8E}w6A?f zJ;miJxJ0NH-@ThwdQlEQS7!@l&S-ueY!K8JR#Nv-B1^Z!5R3?~5@KG?q3Hc`_p;W= zC(xpQC2YY_&tY7cVL8mBRfxGH&#|wKQoo5o-M^Ux^#Y^CpzWZ6=z~VK?mpg1oe6B~ zO~Hq;`Pi2>L%gGvq)oMjTFT~!n`2!Dr)^;gio}o1A=V`Q){&fX-e~zz%T=j4M(dsJ zt5Q>*;glh1;?C%{tnbq8*D4Z%9bhMqhZOS>5qo!=?Ee1E&k80x5CzAT#v!-5>!Z|? z6?~ZY?ExTt+Y2Z4sc_is)}q6=YEhb;8Q+HFo;>l}xGYgRfwsb>Y46SJ=t1*eDpelk zj4J85%(U^01*oV`qnj7~38}J#He3X*+xxiNdy167t&XzM2i$>X)kDPCGok8l3Oi}j z=rfW^?DKKjiW7*cblLUZG4jeA@b_NEFL55y9zw(DJpSA`wx&1r_CNKo>_ZQ1mdYSJ zO7cBG`K3VINSJLK64{|u-C<;K2fuW!3al zyXBNLEqod-=^+}2U&7_O^nR#*N-FbpFeHsrByy*KSFU!Ey&AH`F8jSm-rV|BTK?hQ z@fBP*W470hiKhR$W)729P0tN$|b*vbWti;FXZqg5)Rr&gOO()`8Tc()~*drSv} z_{Im#3ZB*f?{5J#C(eSEZ1pB=`ZmfYP!)r@r|WkLR7#h5j71Gx+u#9f^BBMs<4gQKi+1$(BP4X7?x2z|MKs$&*zQoz zV(qHx&DQUeG!uxrov*KNA$1cto_<#*z$@Ye?UjffZ0YEtOqgkf&i}p!zODMmI z?JFKLcnWXCg|UCBn`qY*$m1rltE;R2SO8AS`WS9ypMHNB@5azX{DwiZ+}iMKrbdP2mV(*3*6lpGZ^U0qWV`+PT_)Ix5N&w^>n!fwq-sWO(fkTcK?? zVKd{u2sp`G2dw#w-c=~yyg3^MNJsj70+O}oZ;PfhjEV)Qd8&D)=z6MIey9(tyDX;~ z{OUS7Sx0Ima#|;E^8EVn=y5s4D@`etb#eaHQ=H56x6x6P!R4Mb77-=VoBt8hhaY8TpOPl|azPb7& ztPI=_7no|9)?y54Asz+rhD{DK=0ORoQoxH-$n9nhc?C-p>ebCd#=d{(8-L+M6&fIB z-Tc-D9vNY_t}GBJzCn%)tfP$3#8;MbnJ)K{HzrO7WWFwj&W!u0ItLK9c<)=4jY|5( zgLD)HidjWDo$dswm@E8TA`QTNiQ4bvZRURS4=2V5QPs23?i_r((TSPvwXaiVYd0L! zz4K}<^*x^eL7QNrMVPIp6{uZ3kHlN56eLv}-+!(vNRf|=SYZJIoNPECbC1^rpFQta z$$>UoO=+8E_wp8SM{}Cqk?l(}LUh=7`$Y9VV zr)RkI)wCfVNCErit@oj!zP=kbMtOSN^xu15WVtBJVOFloy}7{Cwhs!YjwzL8+{>oO zVw3Ar8b76_3?8jEZj%v9Hc$;E(jT#>oVVmk7jy~dS@-f}^Ycg(#=%H2x$U$RCJFk6 z_`mtLVldc~hT`PgX%o*(RQ>wDM||O z)fb;DrjPkn3<YI`{e<+c5fvbwm@LX3|aE~Vx0%< zRSz$YDtQKbN6WBmx58ErYS|rCdNo&^Pg)~u(C#n-YGXGNbc>pr~-WK}RU}Jrr$4!-Ue3*}JQTV3Gl>jb_e^%Ci zk$~|Y3OZO*Pm4v}UY1#?@wy7}{<;c}ScnZC*xnT++1Jxjj*STKoowI1+LT#dLRN3E z4&Fh?A$v>=%BHJ}Pc}-*r8D)SEJD{wx!C{FPj;C68;-w=z?jz*1|%oH+@GXwk9~iV z+Tv<^z?5cVp@6eRauNrC!bFoeR&q zS^1GcObUt{6_wDsk=y#GhRbsXZv*?f@?S`yO)6vWJHAl&vz$p0iiB~a^C6qu(4Gz{ z!f*p@b$P{YJ0zv)aA9))xTUj~16>_;XmV<`9?Axf@4U{jE(c_qGWMw{6J4EXEF=Wi zh8yE;-Hw(D>~|CDHh!igpy6enJeMV7i8oJwo=rrpeu(}PreXF&Pi`*45JuH86B1ow zkK=^ww3GK0W+JEgfo-j=T%;NO>Z+<&En{r$^lfqe5wDd|M^eO%V7S6z0(?Kg%HAw+ z^3#7{BVvPIF_6z^^;+cqiAXkaF$&l)qVeUU<+6*`1n2X^KEYeJDmJYjMGV(T%ECIM zrS8qRVL7z#AQVaad|>ST{$?FZ$SCceg{|Q+^NE>{`ahF~cz`JK#ti-D68urjo{R}E zN=p7Mo5lLNe~J3Z0Sl3(`)cT51`M#awhyfJx|h~io<4^W)Ir*GvVg&%4uOgyc&ApD_WwjE#sWb4v7Tjmb}98@3ntseld3E! zHT7wLG}fX-&ihI!+fLv=@K3 zUlziQak`Z}&OUU%|K%iJzqQ0%DJRi3n5!SOXg)b}E_^=HT7Wg#=pi!dgcvgyN=$o_ z%v)T5QPtDKUwj5SH?>mxRiD(w=T#NZR}5`RUE(Qi9CT-qQc7$;ba|T$V&HO{Rvqvd zWRH!6m*!AH^8F9id&9LZU{+>UsC*7?yjoy*^uC^G$PBB)7IS(Ai`%;LeBq`a0Ij z=+Rc9QrtN(_qmdW_M#d0kudSlhd7I*@PfIZ^?Lo1e!VB~eVgE+ppzO)Sk$!th!^yA zzvR#Kj%dpznbJu=rPw;nCo82JE_IY;Ubk(La^@_|Ru38_hTOxxxKBjJb^hO=ByCq6 zxW>FckA8{%wk=b`1q}Ue(lx?Ua{#C~^(GGZh+mGcIn*uX!F4MUy0V^A2DH32A9C;E zA^ogIp3QJXLe%TUS??&K3O#XvzS&d%Y+qNa1bM`WZwWP!$seT2$m8`C-yn4I2$?8A zMbYr3rMg6TVl-{2`8ahU*qqT&hd1ijcZr1ly=c;euc+o3Blu5J`9E9=1z_1NbuAgX z>b5FZAf4yE5(Y1rXamUq$)bteRPrE|Tvv>R?AQ!U1r0r&8B?xWs+1v_Z)>hABYuwna#LHMFb)&%TYUxa^v7l2h8EW; zewZL_Hc$)XLq?=G?xPKjMO)S<+rkecWQN<-ksId*mB@k!&gk_v9&QiQNwd*{5AeHD z=0{V;-mxOQk^8e5Hf8uu-qTk+VsW4U!`KRE#%@$dYpyXjLdWXV{ZYPp`@<=v^^)N; z;zE1Ph|-DCg1cE|nLE4dmpNFwTJ7XtsFLS=UD&^GZ+o?ViSvow{%6XrQ z;RPBSSP>{re7uKX9lzsLZDgEQQ?$S;MJC!}<}f<3(3^&IScRST)zX+WQvHi^5Ji8s z|0OKpDYF9=fztael@CTn0x6O36=}(C%vx)u(bguM$LC^&vlM(lRK0 z804x^*I7iGIrqWNd2*6Arpf_%%LA(+s_Vr+#y8ffy&DqURbjGPY4GFe5l~H;{4R3| z;U-JT#5%k;%rn^RBXr%Jt`;V;k7dr+e6>l!{)hIDX*$QX zubL;nFe0<5R-`&3>zPlqiV`}XvVHHQ$&9EIc#~%4w5d%;?mQ`8g~+8IH$o7sQjezw z<0A6sR535Gp%n?8^KPLA)_5=57u|qVpM`_A{zx)9SXbkL9;|KuNEkM3 zhhVtHaL#_u*y=jm2!h@QD!RPPZRZ0;u*E2Z{NtVdEqymX6)ucd!hE&^yfrRPJ9Tnm zu$v^nx?ZU|=f9Y}U@3+<2mn^xo}ZYQsKxzyQI$qPpSM28MY~smY^pYtv>qh)N+alU#tYTP}=i2gIcrdhXm#wBR3ospRsB;XC8-O)IdcFTF{8?$?);`OveK>;TY}#j5 z>s0za%Sk2oNN!95b!D0f*+5wF^j)Q|`0;i~2$|#Fd=XbcwM4Km3+DFrwpkUJSiTYM zxeeZ*r85hJd{^MkL;dO(k7l5g2Z4(=wBtu(g)|0Ms;WkQZyxchBWuF|Tlnuh%ATtp z&cQLnuBNBMrEPt}2+P{n@BKhUhdR^!8{f!6(4YOc#=Ytl1kz=-^ps%fsuCF5&HEW? z`AatVQbv4{5d1NcRkmc;SbBL05`o{FV+aLAx#KJ8;}%3NG}PN(%34#*oKW`rtbyhs`BP&Ze}%y>nk%~= zEuP$w)e8&B1VIK`-t37bPybL$6d^fSYeD?Gx#&lmc@o%(1v-Hb5Om(`143&;yDpqi z$bEiB&wMK9pqstBpNU3}77{{tIB|xLCrReIP9HF~fo3;ADYOD|V^;^#9oyw3Mt*uL zI2HeI`#Xw=KOhH$UafC@`m&q`OA4G3b6%y)dn=c>^|i%is*~s{&>=-E(iia5M#YYc zbX*vA_$_$59D49H%}izSlxxY4Wtw>Vl;W%E$r%pkpBFW>!`1@mB}=@(fpf1)sGIr87*z@0G-XUE#S29z4)09K1OSE zb@kY!0O6ly5n1xEP1JC%dG#o8fy6Xrl=>>&`HSl7+e+N(UPK0=faGaPJ^zLA+uoVj zOrD|x%R{9oVRFnh1sHmtoTE?A`nF|}`23gh16bEdSYRlV7N4cKy&(Co$v2&NHh6-S z*ZDRSw$|;$M}WH#&0=p5>OlVCoiZ}tLo6+Q5!{=g|`A>Zsu#l6S^LjRnOiv-@= zg5kJuH8`;2``ar-KNe#(-7%(zEbA-u?LE}-L`yt{-ap^-&&{%A@mTG$*e)V+imu-(x<>k})DsB7Ofhha*c9+%~DtrcW9IE4Y`tpk9@8uGC;WJ zXx6PDbWfxb7~_uYQVtAo2(6sY7e2nIG@j8k(or`w({17SA%Su`Pki&yU%z2CN>d0; zMPflOO4{sKHx@7hR^Qx_e?8sM(2(Wx(mn9cnPuQAd{{$FqLAJ1mUXX>fy4&dQzoZP z#0(-c?NIIfbeQJdz-E$6T;&bdyjhnkUeZ}ua#~;e4k>5P>8SZJ-L^dC6A*lsM(k6$ zuRp|_%w0PE?Wy3Ou=MYo_^qTUI3r}FN7#{NpLzXf{4Za3Zre zW}=e0p0M&u<8s>$jMU$i&$My7+(_7?>l{U*;ZZc?}IeU>dt`gPJ~*7$#YW6!F} zb-jK4rTyFbMB3%^s)$?L1=*_E36Qg{yB>zSEgDx+RiXBvX`VJ4Dd!MLF!hyty)nm_ zcRh>NNmaQo1kx&_N5OTxe_#{PX3)BvWP|0FUOIx$tmt1gKU$S(-Cos|**21^z;^q+ zuv{QpJjl?{*Hwvp{+UD~F~Zy}!;r4xWp_z_zonp-5r@6X3vV+j3i=4SBnqVviKhgH zz+COQb)UvQ_X)hU42=cbHk;&6&^xULV^22am}Zesgy|xoeEVW|AU6S4u%G&c?zB!( zQBkGUZ85B)2|D5Zyn8Xivyb~9&UHZW*!I6E=S22sc0~wOX?mLn2)Okc)!cHV(YQ0; zdk?cUQ+3DB^xot^Pwuac+(+A`0)kzY_7`8RY7Mu*(tpT@83vNrCw~msLk_c9^R{Z zcE4e#P5T0-b{y|TrUlJYCbR1p4YNa0JgPFC3!V)-){8kpT^qfZZ(Acvn~`SI7ryh3 zCf~xwCrKHu*YB;D8EZ9_+UsaN1l#~X8b9YoU~Q0XF4w;-wsrrcl3o?C&h@^D>fcS8 zCW3%W<>CdK28WV2;)nP)Ap>WK@7Il{f5}~t^6k1NJSDE2Zg*vWv0~<(P0qMcxkZL> z*EC@HtFaO(o$hNO^4Sm9$G+SD^yHmR5O3~5`u0K*HV?mI3oQpbqZit7Atu|Ub|m3v zcd&Lf9(U7E8R{39ytzkE`g%Xf{adte?o&^VWpeAwV4hrduQSWFT1Y`=_S(- z9Pp3nET>m3Rb?j5mc2i{h%tU~SwR#7x!|kNaezdWY+%07p3Pk*TyL-tfb>Ity6yzD zvmuiZ%xdyE`$Zb-y!hbjrO#t$B|`G~Mnme;nYpyj|w=BfJGw0?AE)%A`#<{Od)j2#_Z~nY; zJ8gVF4b^vS+hog!Xt~U16#h|!brK6Z(@@#RVr&>a zQf1E+HEVNz=0QU?tXz;^iG*gPK|N%4CGcUpUl*e#YP-G?t$3HqCvsWFwzVp`!4u=+ znt_n_-uu=|#9@}nJ#af~XD!JK)6o-UJXN&{BC@_8)%VRd%s1vsui2g6X8Y;Ipbjwv zT5Rv!V$3v75IssF@$R%0ZM1j)(x@HtutTlBLV=tjd&hp))hnPFQZ}no6)cx_CE^CM zzl3vM9%Q;PUR;6`c~IH+3-F2Zif$aK+iahD15`{nk=ZE4Z3EFsPA+JSARfa1W_6`TH)(diGa65WVcW&WlAqmb+` zCo?8cx-Ug@VWxiGXKR|=P4ZAwrhf5~%nrI%z7U#{)e*>PUV}5zsx}wbk{b+GiYgGq zs~s0-p}?%oVPhuzp2nKPi&+)L);rFna$1~rMqa`@BUoMZzR~{-x=!{Dj?a+-d4=?w z#tbXcqMx|q3g67&{Pbj_2Sb&C$0tru`Ho(~_p|WEY><%C(~(L3sO|gppUb0ie6z}k zNqy1fr+GZkhl4h`NLpQ=ZOG72^4x6XVwp>MgKn=y2~{CK3V?2h8qT#s;3@6S1R}PH z8AE-V5Q@n!YL9#pJ4J4!U20VM38(`N5PJqEu6?J-#h-&ApE3n=!@2J}zI@5xg8N z6)>7yttqeTy{G*+^<&?4N5HM3Yd>0FlZ13JndcyW_7XnmyN!}iDP{yVt9??gAVKXi zXI|%7?hpQCi>0PruIx0W@GU!ao~uemW=a>8uh_N7>V3iefdZ=~gy8Z31ujD{{(eDk z+psC2F8!Ndob6jL6(2}I!O)NXT(4JxI@KPYT-_*jA}Ic~R{623DNp!CFQ^{hTjj#N zn)%Y+0Eu}x1-3_=9KOoH>F8nP00-x}yyDO!-tJ)@q!8`qak-B)|GB*8?us?8E5UNQ zE^xu3?f1@Xs|wadH#$2r<3N+9d4oiV#}4=378r+$vSa%8xFZ_`t7~A%a~`IW)LWe( zJb4&0o0Q!3FdZTc`hEgP(Y zz2L%&d7tzFHU7r-4&ydogF@hI{5g&vo;cclC6G^^B}pPz$>@_ojbYI9m@gfQcnR~i z1N0Ya`)ArDJC1D~pR1C;jeWRJ_4){{=PB7P-Tw|XZQ{dvcB%8NEMV2=wlV+cO+Z&y z!?y^c*UP{J0$)uFT}N#&qyr?BNdx;J$b(W+U43%;w0r|=ahO` zK{eXW;B!!pzGFNGm6Hw-#}W(bYwmwwHfvHhw^V!?b%J<+=r@W>>li=aN05E@=n>U! zPIX~BbNsPsb1zK9Rz=WMQUXz7OXwvWKe@KHwgVo)F-`PznUGv=SY6(5Ek@|KgbX4k zK9)-vfy*xugQ3?d{)ZRbd(9kD(1o{4#1~2jRW(?Rv!mKNIxI*CArX%s_P={xJg%pB8QP#Fd5pk6Cx&1UwoAlxGw{;&m3Im>+@391~a&>cbJjY;9l9hl=?Ul?`yJWzIQ4-*vc_vB*z6`@790oy9^lJdhBu!`&5PI4a&k*l@gS%e$_Lh(PgcM5|Bv53t zQg0(UfQ!iV{-lbd3bafB%7>C&M({3st$*94Tx;R_L;kJ=0M8IWWK>nWzu)wt2CQ6< zc=80m%H-THFE4rIQd{~)T(b67NSf>t&M3Qesnkr|GB8`PWA~$^MxLdHSL%kqzlP+G zr(|{4I}a_%Ca-+>bX$*((-4B(8rdE8ivp zGm6kA#gmoxN9`izh9kxKr74S2!i5N@wn{r#lv35b&y79EEtnadG^=(QeZv`j=jGCh z!RJG8uNntOht%5D+>r}?d9-cp*|MZ^w9?TbRo^pU)*Vj8jAgvQ(RddU`!(KpqI)sx z4rRIfU;DSph2wR#)OBnS>U)FbD!6F0Jn;9wqEnuzHL73NVl)7d*YGHpCnAH(&ooQa**ai@;4LIFN|5g>=c&AroX z_r8{w!FKG51XK)2}zGnxUkblEPQF)GKZs=6B@7H4aJM z_aaK{9U-Il@x8sh3&-uZN5hW(`})5|_sdnvE_$y3=~UT)rBdb>7Z;yOzffMyQRNcmr}ju){wn62WX;b)i($8F zVDT|~6b{Q!*l^Z4c>5}Je-7AOgj^N2KiwdQP=b>LNj0)aPoK(Wf2h)ZfGhpz2<@V{ z14{c*tmZy37<4za&i(?4M10~EBc}CYiV+75>_FVapKUXISQ#{csVsf zQk$HcdAu5_?6L{#!048F))37|c#D=w{={Y6S`%d&jWw90ZFB5^8tH)b8&{WZv@NHI zAh2CV)z3t2#W8cWMH!n_fv9AsP$%^PiR6eAJ75bLc-8d@Ha+kHHXBLic!u@g2nf3< zj)j5X{kMMjQK%@^ug&U@MiPQ$pi?+>RkusWAPA2# zT39iUlf0~?Ct=I`_%%+C_q&9zMMACEao^~=m6Xkt$V}UvEoN2-gofKZpT$p@dCeV7 zV2GZq?qJ%SJz9;G0u8*8=n15#gKCiW>Yq~PpUIFe` z4p+FYzl3OY1C>FX(gKh@Dg@28sI5*N(_c*x-`>UNFb{BS0xDyD}2#hkdO zt1o@A%5HFfVl1)Z$&y>^cc_0SqO>|`wBfnGZC+D~ynyi#b%pj1vFb#K&gcYnU-ZWa zF(GTd-1s?TVwZdFXkxO|xe@Hi+j!rZ=5yOj{(4jHks|T2APfNSx`75VM2+sSNm;&; z$_Bk0RN!gDJ#N?#7!t9}i7GCq*OE9ntJMZt%)yfvwfU9qn3)eh^EUZZ-GtLC;@J<( z4hZKuOn;2Z;@sh2t~J1#)eRBA;pUw3g-03f-=sRi8GGO5AQXJ?Q9ZPZq;P^W^~jn<~BOhDVz)6}GUo~&(_E1mQ;H}{jr z2AxiSp&y^_V{6QJm^qLy%t0|wxQcqlZt?oVJ=3q}O&QqWb`wQFttyWf*ouZH{^v|t z{!r#v;dQ++eU4+e(XRa3pL^{4Z(&k~W$wVZ zDoeMa4<{5JI1d-*d~{RaivvP8s{3)!SuZmQ8QtMS-y;Ow>_!786kmBastcNDyHQ+f zjK}aKry%!$1WGFn)#M#qT2)>@P)!D~tP=-ZA36r;2i2&ttd-*w5LPF16gtnGVF&kT z-mFrvzf~4f+pZ_tX+Jhg)6o?5N8&2`tV}Uf@CV?KhKnAgm??C)lC2W^ZR=UqAQhI> zsUI6(2qxS7i)Zt30QB|sQQNb5pgFp%;?JLLouVsoRt5CsVTXDvHIu%C1x@^LKnQc9yqg_+3kzW1Y3j7`wq?2PHd!x1;Bh ztj@G50w=eVrg}k}^bLM{#7<{#T9%+sI_`*Po_{AfmrDPGQVQXLYX}jxl;$^O`3q>m zf_rb=r#n_6pZ3&iO2k=5(N6Bv5T&)+$d)w`kx;8Kytbw3F#r!*_cEZoO;%R0w!$FR1Nh4Ow^ieAJ($ANlM z7%8&Op&AXr!KP6Z-(pHet7FuwhBQVwfP?kYT*|fnic94!7Difk9Ap*iU!fHDV>5-z zP_?t*ecC4pY6-Q9y95d3Uhw)ae0~~MUeHhSzGj76EoQz=sYjQ$=NXX7oRr6F4Kn=3 z^|Agk1#OKcn`W@D3ByDAWq6@LULxyj#)B!{Ps%pVfA9iuH||(Va31P)=6>MZs$jC@ zGauyH+azk2egAUul&*gtas~!m9z4DIyVx=5U`9HWjBfn#84w(0hTZNhv;ZS7L)Li)HU#IFed8H1L7kS&R^!OdJl+Zq~UBtB(L)1ejWxo%ZZx6% zy5Mo%%Ju$KK07-(kA)n@GixR1F0%XvioT~}VX#k_0rXI<9c3EFnEgQR^$^9o+*!vr zAw@okYKIz6bb_e(AUnWM>uz4yE@W7#;lI3x0xqMwG3ePiPxewT4eIItgPmY1h#ni@ zV(nypJvJa{=<;NJDqFhJM2nf=rGEV8nf{Z18zB^ILj@dw{#DQhmg@YfF4Cu>UsTf~ zCGjl+*}4d(?8x_838IVKVL`yLQ$^Jhek$o9qBarNP0=*!sV*b4rdk1|K7BQvI3u%;;i?ECFTO4^h{-SvsHYO0er1&VTuI0)=?aEcaeX=&N_ z_4U;^OL!|LO6wG#FH1I6rZx43<&wL4a)X=fz$*zwHFLt{uG~zo;`~U{_07Z|XV;!2 zdOuPKL4$FJMi_T6edqLS-%#_|fVetu`Dd4Xiu>OT9=uK$$R=CKaGy>K{O(rwBND`D zh}cdY0YEfG32zR_TbGXm$PpMwAtx}XR*E!z*>kRvaH=WZz&#f_2UO&cpbg}f1nPPs zYP{?8z8RX5>C;iTcdV%zCT=Lcwwe-gzW!p{9mD!iNS$WXW3=^!sqdKe;bsIDqvLt~ zc24wfOZr>;CPg2$Z*nP-c2uAqC7xD^BzHU&!y{;F^01XQXNkxT+6WK`H0<9J zVmuF(d6mTHN1E&EfY4LVV%RjzV~6YzthD3tN5_GjT9vwtCfAo47scUz6&X0@7ephW z97dKw6h7risQ)Ha9Ysa$3x^c?c!zQLM3M5>`Mm6~>(E}kUt&`G6#9LmsU!SFd8^iX zr4fnbSL7G=q;$B5AnALmeV}xtR|FS_twEi7YLq^X42M?Vz6s83v(Qkzk;qX)N`Khp ziRDrKFCxhH;q9y6Y?>XsE!TN$5L2w*;V9=sRlag|KXT#q*2wI)-FL!3phfD@DL?_= zd?|}6q0b_CGt7~(RP2Qd@UhNWp}XAeVF2%KGcKI3Pnn7W>t6i4R$RDTS1e0uiYszv3u;!T2h#0vg~sf-_}kQE8nZM#HHo4W za(vtGV&wso=D4*|vv|?^T>gk3o!07;EMoLJzHn}$$2B^JZn{L;sX3bBBFcYgjQ&*n zV0hUf2r~|+na6&XQGr)y!Zyl+b0ffwa9a8D+ zKhVzhNMwn`09O%tR+$1f=_TuiQx_=96DdZ&R`-K%obl;mWe6vcEhBNanzUjZ6psTM z9bSq&J$)-K#!HK0M~Py0B!}H73VdF7mr9l zzu>7Dz9?eKtRUy-6_@cBVuR#NHe;Hf(x7M~jlilMaH1^x@OS;iL(V^#OZAp)5rGcd!tExBnM zn@8wxQum%gMZ8lN!AFg<#5$TQ;uTF1ftk^osFX8FBtu?2fn7dTC-L>%PrjC+mp7|L zk@vKBFsv(>z<2zK|Krf3g=`PA6f9`aN4|XkdMwZ6lHj3e$qP8_vC23;wWdf*`4E*S zKl7Gtb8_Mw^tapR)QozSxqmupe5~K_wB7Z(ZU1zr_1o{Lz$!nU*Z!w3_?XpE9VM6C z9CD7Lg(f~DmXM#>38DwdRfxaSPJC zEqm)9bT*}vk&@jbNOXdqXBow@uqC~1tHlyjySUioSHsLhI6Xl5+QTZr6$OAQw=5<& zkn?ssJBK3r@$za47O z`@}Elm!`eZX`Mlg$H&cOqpv)VREo2v!m*D}rkYpDfMTv^+R3uT`vVkwcvSPf4fdRb z21n3Yx*qRI#E5or{!W$!6tX=y+VWOo_TpKZ=RW}=@Cz!{4i(t33{KhbxPM}~Yrn8i zHu_6|ogxvI{mzEzLDahN6zU)cnGp;Tj76Jr7Hs#zr|;XH4H@*a@Elp|f; zr2Pw|IN*U6`J1-}4b~h|vx^vNaF*Ec0|hGaGf6t8HzTI!(jAv%sNQ~yw?P)5DggX4 z9;l`0wD_fZ&H=_?@=ow#&jy9gsi0?XSHM4Bs|hn|WIQFiH>|)%Uot7}zV&K5qF2+0 z$ND{?ep{2y*_co1P;;FP2K{?=L=|i@RWW$Er$c7bvITo>$y+i8>{pluN?5F=Kvd-6 z{@_vuKqO-b9fmpo(~;O$^I>sT)^Q(2Qhurw8nwqeo&<@+aeYjrW*o7!@G$fCzP^NX zJls!vw6sA#Ir`fgXlTq_8NhgcBro{N)<^Tp9O0hz`I5@qAV zLq9jGS`J3`HEoA+1hZg_;~k>kS|;#*5@=}QGk6~>gRva?x8-}J;#4&3JVBz;^<2EX zyp~oaRS8iCoApC0qDv4T24bCw<&8qsdO}((Ms#9Ql=Cti+54?;qVGgm_^pE2F9=wA z?hh|t)Hhw0dX!1?Ix{^>>}d;+YaTQYqTfa2p-pu5&T`k`UoW=1Ks}?Nfi>5%?RJ*OoFJe~n1;DAx;A;mw_g3Jm z&P}%LH$SBIl2h2O{T04^`Lg-a6S(2(izZV2Pm&`;2wa1W0KMu1I&cnHczAkxB8n~1 z1&L81VRR_%ugzMUXQ-UHp)tx5BZ7S((}PZ5TPhD221Mbv)Ra;iE7%0ZFy_|gldLb9 zsYo{vg_>R`y*KI^Jrl1~6Yx>f*;{p2%p>^iLlJQUZhrV49W}J)s3=FgaW@d!%YLWk zo78Uf-%??)$PYIH=&~-Gt^i#*eYj{snd`` zq1dJ1$gH0Avq#Wg1TG@@n%T6|!VdKfCCrv!B&beq&Y@7=EC2~RbYBe_1qdk5y#=C znd6A6%ToD2=ZRiip?~C35}&bp$E<*7)L$&?ytqZJmy&^w_Oop`Bpp^n)SG?xJ$n&! zy_x(41dEMB8b^b_K+IZfvil#6PF9-eA@amANz;5j2R@h2di{}Hz~tWgG;kGbmXtHe zr^TneGJpLu5YJyj5oln0vFdR@Orbk{Yg0J2DL6;}c`3WtXY z$FYPT+uDQ;E=n_Qv^ku=XxT#D zTgV43qA%p8o-^(fT1>}Lo-q5zrq6j~ZS(HxXs!I)HGdS~P9i)aU9nZzqcxKKVOAI~ z!Q04)4d?lZ7J|phDjkGies|Vuwyi(numHi!=Df62tz2fUNA9yQlF>J3CUXOb(5#_T zo8~D?TQ5qF#f-m~uCV3Df6W3o+Zv8ZN=kBYy6z^1vyJQ^Z|8=oJ`E+MWw7LY)thY& zRG853o8B6s7GY`ZWuMAS&p+%(>pc7_2W60L3DierbTzzCpze&R>G&$r$#wiEDf(9; z>=D@5gAiaD!1&$WUC|DB_`6TYq=hm>nE?%p@Qps2Ffbm245Ofq?LNds?Vy+l2Aev02qmvAR(Ki1kW zp@v%%n+jF9Mwl&U2z`EmFv#T(XW9MSkPK3-Y$29o@`x^x#9)5bE1l{G1Yh{{ue3;u zy;)rHlr2Ih@FG`cB7C0?$#rZX2{S;48|Pb0*4d71_nvO8q6fgFuz!dyHVVR@p^iU9 ziAV~7F~$~#Et^DP@l>Q@&>H{PO|gpK=WcIXd)CaSs@qB5AFEu}mXq<7WFQ{Z{zxn< z(M$TZ+CBA##B90VwQDHZ)uB2q?)3%%qpJT$+q=&!V}#aXt7rZQBF%pQ5g#}X9|4vN zX<;$a-ljv^*t}r++N48Sx)a#(HR9ks5~FumGhuIFS!gLG+(9vGWo7tnteF_wPdtP2 z5V_r6zjj9-wT?+xF;*<6*RXZ)%ePQl*vYCR;8UI8UhdCTMNK7AKux)8)pvs)j4ObP z7bk+g3jZkozfs`v0{oNm1bTZM3Y!^OM@s}2cxHosb^6JaI9ldBQ70Fgd^P7~VDWYR z%3 zw+71J&e#9h~n?42|jsDloLG%+)z1~)zP^2s(^Bd z4NF%X?nSU3fI=#ufPWHXf8`bb^;a1Y42w8bUHuqc?)PoewP@_DC3ks_3JRl=5&J&{4a`Bu?t%jdIEH|#h8dC`!Gk92mOF3nUf@FO z05Doo_#x2eJK5*{z?j>+so8;xzWpTj>Hvx}MTPS#G)T+vy|QY6Y;%yxYY%;rrr&^s zf*q?Mi*ugP;hzz4#3m5hcTMl__$Pn)=bC*4;bcVlW!R|Nn^KNniwiDRr6cX?N@jW{`^9?wc3DCR@c!<88@Duq%=)l{OCg#)#?H4op zsXc+Pmi$zEo^(qu?>0a@ua}q>+JFwn`zGb)C03<#2h|+do&-6U_vTtC4&r+tHhKd&_58bam z$8_<&{UbT>2N23gtXcxok6G>HmWy$D}TiccLc{_q80{c=GMtMv5j>1M0t&IqKZgmp(=eU9KR9P zQJuWwn)x{J5@B~jrl{kJmgpwqkFy9zkd}UZxs~qtxwxok3%fTHv0i=GsEhRtp|`cY z{A(HFpG419C-b$Ds&s=yw=^V!&KCO+jYiNv_3>1XhT(bzZ$e}a$^F}4A8Do}YJ^xg zVsGj~&ftH9TePw-r8wBvFePDd5i`1T56@Mp(hITp@0*|^w|Wm*8F{#W*tu<^c6@fV zzj?b$*aEV_?N(EWl)fyyFO~qJnRa3$g8kK2q4Pf**G^l|47%jeN%H~9I(eP2C@D$G z4C9lKJnrTUE-;+fD*kM3Ad9;|Cd&==E#rh)QFeU0V)n{!uk$bETm66%IrJe0){4yd zE`aUC^acq6rT^M!JIBdZJq(g;qY-zh4gHyAYTgZvxJJ;a>#0>3GG-avP=8jN$UL}3 zMt5SK9Y5*-pg*Al;651$I{_4}>~;j-V_s62u$jqAto~n?&jdgtmeXeFHd!yppn*-L zDh4nE=v6LF6ut2c#6|puJBjEi5qFd;v9B2i*x%ml^Qe#cB+c4IG5$m&55DM{>w>oE zyo+Y?-mBgq+)+2Gb=&%xICE@%ln^dSWQwHRYNXQNJE!;Z@1i;+NHwZ%{DAEay5M8B z{M+@}wy$HyJx|TC;bVowsw67Ia3{ZFP^azO%X_^u!fO7ZKO&v~QA7c&M+p+Gx|T|% z+un7vGAR{;7(m3hn&TS7CY|EsWflmF$D@K!s&NsXeK(54|B!RWrOJ2dtLbR)URdhM z2TJy1bbzZmG9E`YlO4e%55?`~^*byaq8*7u7$#_h)E2(2Ym)ES7rxPDLGk;LYWar; zIfWY8gHFy?hRT+I)|$}XU*8j0^@b2!l{|ynWq>~(b<;g|0i;F2XpKbfiH)K~wLIa= zV4K8!qXaQcy3n5UbFj57chHUMKD;G|-4U#+CIcB4o++y3U0_GZJx>wLOoFYO5Q6_B z!TpbRJ+hp01W++X)Y4{0#u3l460Ut^E?2#UqA!zkYJn<~KYV$kD+hlh;H2n!r*jebS8P5Y?2I(0p3EfTQ6i*1L^hOd=Phhj@ z1^P?DRHPHOYKNp|Vc6?RQ+MM}$z$I`kfs)2RRR5q=l`nBhR1&_)IhD2xlX6`v*Pws zxV&s}^1L(CqH(PAXfunrSMng24cjZ+Q{^;|L0ap?-cN;wGv5tQvUS?)ZnnjxI zc8EVo!MD#tphjbMCOH%Sp|XCLp?uga@*E7OmILb9SV+cpiWcdAenY7rjeTV<)YkA1 z--!f(a|vt=IzWa@jcyhe7C{VNFIy9`HVe-&29vy<4Jq|Z>Of#JUMiFOu5o`x|7uAv zv7(mKN_~kD(_o&0FO$Mxb(^Q-?%TFD+ed@KM-R}*kXf?}dt%&!U!!$a24h>|opg3q z4^cnoe{b5q?EIhlpv(p*m)c&jal8G-#)H~`5%RU%+jhOdXg^}M*3W@4;S)0!OVjT6 zUo=)}F8tO2I-DKco8GDSZD#xyWkPrzKqwaM+ z2M`EsXlrZxaK$E#xUuP$8j%gl-i=!!6rv@`@$p7WK?-jpMi8I6Dtu+0CJ1@Wjr;bj z*45VZbcStib(wFu7Jhp1{fqx1P&lsIj!fYRA%4ek!-MgQLAf z?@NJb>S#%oCqlTgdN{=jhTt9}`wZwAUcGo}M3)PQgGUATw?EX-mi5B$?H`|nss zixWSj9)A}D+Q=K*MtDEH(Q)gPD@unwr39o7t^O-C@SD!0;Ec(}<}$-b-gCBG;#`*y zLiIv4ZTSJjtzudv^Y-uz-UOWhNkS>#28J>QumH8=dc0gJHXTRPJAo7VxI@F`*U%0& zqlew;u$#!)Q|Z!@`LfwYo=2ivOsyJ2q=^P5fYqNo)zDit?pofNtx`4add6{2 zKiw^w`hRK@j3m=L-Dkf;tv9zEPc~$XIMGYps@m8c=@nW^>m|(IkPCh7R`68%T@mi| ztCKMr2+$ik1Lk|)S4UM{jA;X{VHE{r1 z3b7Lv%Q~U(t7olfX|=&+B1YdPBZ2W2aj^o8u2hc80rrk-!PB2I*{@q3WE!W3zRt8g z2p?o-qY571(H=P2BY#@n_Rbag3?osExUtnL9W7@m??B-gC2pPacz2xiI5`stw$~V2 ze}dZ|JPHQ8G*YYSK+NIZ?UWK9s<&@k$%kYA!|Es|GQoVruim4s2HCpojg1~ z*R-?<%J`TR=C<}nFvgvJ#p^TN zAgxEwrg$*yM?7(4DQD!jY~|s&9f6@;PkC}`(gq8kud>6^O$n;jc9 zg?ZU@Su)aN&fJ-5P_h|a4t_%hyNgb<2{rt)AFQ8)*<)qF%o;V#jHY~9KwPU|i1KJs z$dcG4DBzkM#%sCrHyK{j$BqRp1X`{r@+8b0CJ~W#sWb>#SL>&U|NNTHI6NXfF_WrU zTS}xzMkep)ZJi3c&yCbyP&t($fP+;G-pt@3fKl_^Q&J?Jczx>s#V>8t7E7Q(k+iwo zp%6Y|&U?%gzZ>g!mL0P8B7oNpi@baC(e^sY0&ecL$}Y=O^5EJ-+M?UuF;bhb01{-1 zFHhM2BS-U}(vvT?I{bZX*&*bW1#j!S+k5W2qpCcv$d$doO z51IzwK1yNb`cUarewhhEcMExiKXJCRIJWd}t>W6th=k2%mE1)ecVf?`zbLHA?rqkcZO=5hG9&hih z?Ado!+EF6(0Cb68YeBx*e3Gby*NL~Ucr}>z6{6o3+E5@RtIzOaDlj5QHwzsK?5v`-f zg-U**;Be1)?bpL4t~S3gRu*KtZ>C6l4s@1dtm zAe7HoS*5BY*cx@fo)G!3N{jz(;!WyN?~YWu72`q+YYADYQ2Y{Gu90rq2~R=ak&E2Y z8s`MPx<89gyp9R#WX`lqRG*-(48s{enU%>MwmztO(jAtNCcGOYhh>0Gz%GL^XKSZM zgF;)DpZ)C=n8K9tKI`cgryBy&jpnXH6l^AYa3ZCuKm6r2))L+s;$!v0U#qk3QN1X~ z;a=q-D4%kMce1GY3?M4TmG`Ik3Z%PE`%m^Kc2h+++aUG?TE{u6BPGL*ba7c}5E6_PJ&-JQ4|u<$ z3!PdZw^0OjRuD|sF(v%qtn?N-oPhg9;rvB9sk`5(@kW~3*FAm%1>h;c%0a0{8j(=n z6^^~rAJuo~bs^k&GfJ;qCn@X=4Iw?t=j++su~pplOhjRy!A0RAehMs;s6M0rho!U2 z!wU60h}d2Sb>=b9io{`CWL={!%fVB3-46;!eeKl4FA34ow=Mb7~8#3MRn)*$IUk4U7per{)7EOhw++kx$Y+RmsW`YZNm?*;H1l;MV5 zxp>XxA$}7TkBt$+vsFbsE-9(y*Tci!>dVZ4XP+m4Sc3W;WF7&> zqhBwVjo)Qv-z5o=J^EE)NOkAGY%;vq0s4|&g4{W9}^ZGP3=EpT7LvjVZ@vW z3-AC?@p&=yWx|R7*rLip?plvHy!>h`nMBEN-|B7*?h-wyv!HKLS-mz-kpS#~KK_Gs zhU)!^Nz$+$N<)XMKlZGQQs7QNUIok#*$qM$PPSu7%Tl95F-Hf6=hFv#Sbu&)MftRs z9<4KPf)EL_m;0LfPmUj*)H#L*5X@A@!!Zd?o}gzd7>+#g2p)%vdDhK9u7n08#e6S> zFZ7Kx6an)CwTjh~eYz|h-7*{`7te5Lq1Q_ICEfU_0A@6|U83P}^`R2NJ6>H-x!Svh zc-U}9Mg8BAV*vvDkW-uD-o?W7L>5 zwnYXQ90qq7Ah-sCyA#}ky95go+y)J9K@$k>?(PsYKyU~i+}-U>zI*RGIrZK>)D%oH z{6X#C-o00^UaLFiQRHD&d}sXmqRM^WljM(#95x>*c~@B#2CDwaf3A}Zhye12pXGw0 zhyV9e)*srn;*vUgaVVagR2c%kL-*O9BQy|E-jS#sJWD=!={Q=gCp{nffSZX2guCds zl#OIbj_;;(NN7&3h!Q9AohQitI--h$rGq!Kf2m^LMS7MA=9W+Qq~KWdDyt9A$fS?} zxJ5xOn^Uk7(2P`xwZFCaIS+VH3D2wpNd?)-iSrMyaM5S}{s)cIfoS=Zj@H)2L#Xdj zVY4dO-m34Z>)XK1=rw;;vsuo$VItur!)0I>a@4Al$A+`MEtux?A?A46!JU9Hqq^$G zNMq2|PJ8oJyYW4>g?`W_>=kw0`N}tR8@-Ir>+??a_;Nx@i!IxckLi{2B!F~FF zxcof8WiJs#;-P*LFlX8|m+FV$jocc+V4Bs}`yW^jEx?Kwo4Lb>kOFA7D$tgiw`K9T zm=e!w&?Dl?s;^wStn1m(ElmV& z^);Kk2TVLOHhM&!Oe4@#H24e1YX1u^12)OV>j+=)xQ{T!8ZvMdE~K69Z6D?sGB326veM}(?b&zL#uU_m99GOlz34$Q}KJC zX-d9lc|}V**U*9avt_DAuRwd%^Yx-hf;FwuA1xiBA)Gy{J-%Mwm?;R}un!mgR34&h z9hjuHWcs(W;9to2p9?&SB497+=Td4+M>P}R%A1cR(nt*Z0`9~4LY)*600%TY_+q$c zK|Y$L;0^rQfCal^CgIkXpS-D<1NiJ>w#qZE%KXWTtE*6-kt6F~1k@y7uVLw(JvR?* zpVi&v#v@WO?ecREA` zj)s^Lay|Fc#*%o0{p;Ww&^xI?Ug+1Sv+sscN zJTi~|a0Z`}{QIUs@bUl;^asQOXyF4;G~IrNQQ|qzBXG#$U~uhM&<+_$H~jl0`iqwmUnuS{i9u;)>me$JJu1cVv()W zw}JsWx_DYbbw31TS~5WMiQ=nTi>%^py)!nBM&df|lOcqTdrv>$i9E5P$2xq%M@171J>=V1X7hbA=lzY4k6Dx64-l%jUraM<WQ-I+kQ}7Vf*Pb+@y$@i*z__5_!|$ zn>9a7jy1-qJTxV2>waD&&CG$~ez$a<&X);G@o^-kYpH;~Z$%XAPKF8NSK4sg;A!b)*9iOsPqo{NyXeC%?IE6Nlfj8)44+l9sB z*y@M0xIv{-*jgl`j-h17m&1;QGsy0yp%$8EfqyRLHA0|l9~1VPltK>F>w2I& zRdbOFhX1qX|NqyYHC@j9^u=6&8OLo6!DqhGWX>^nM7g1_?$LY#&v4HSj_W&6bSUqBQQK-xAIU6 z=-mQG&ai(b6vw}VWrC6=rd>*!1Hw!On0oR{#rwCc<5pf_Fgcx1Xm$#v)T+K!i_>ZI zZ@P7+DIUzpqApJh-^IxwSw?h8jrc(~Dt-gE&1RYgP?FJwJ@Y?O@W)*=%mcQ@@AIxnqQ1GW^mO=SSD&Q`XXpiudTWK=k zDd{!nmDpOP@w+Mlj(l`2Mw9;?kVExW74^LuY?xN!#1-ap9(WFyBTJ7`KFb$S<>ebg z?r_+A&4`TsLdx7C7pVrhyq5a7Ixcb^sl-nL-*)|r01rrz2zc)lU$|=c0KZty2v{D8 z%H~hy(%bSRq*FYl1b5m>VPRzh@uuTr=OlU7@y&SkN`50tbb;jrOK zUON$Buf0mR`Lgnv($U*{aW$thX!q)0%gb&G_@oA%pcx~saRGvQQ{X9z=Btjz-$gTU z*G*UPV*!qNYBH{74CvI&bPKwpI19Fz!;hM1piW=b+Gu?`qJsjyEpS>pyK1nWU5k8F zm#fywYAW*0OJK`|Na*PJ3Xs^f4@(}pOaXK>U2{w*FiDfP`MlNX4B6P|)W}rz^}gzj;KB`- z@@(MI?uaQ~`4}t;QTtaIFn;`lDRVC5JJgxkLXT)xrgBS3FShEf7EkK zU@Eqh*=rX)ck#j}ox$RdY6wi+<&MJNseLvV+E0g&rd zDsmjgY?Wo9KqNTIEw@{k%+KFNh;Ygp9XUR~jYEt_ffMW#u~%1(HQf0k65{fpHGe-O zpi1cB%V-x>v9TUuFG z(BEi%&ie`TYveYU+y6-|m&kHUecUN~K%;wHJUD1JU$RW=lcLWBN<#>sibvwIg%%JH zHOE54IG(zkSG45nyoY(HOVM*h4Mnf;354wGxcv{3z%~h&t~3rcV_CQPw}G$uY0$Wx zdE-U6y@i-syiof5<@n^HRg%L3ym)`*feQz^T`Pn9>jq3z16 z2do$S8-Mu>yZ(NxuJ><_(twYcOl9RQz#XHJF!h{3KRPBaRvh`c>Gjq6bKP4TVhGN>&m@{V zer52L$YX8Q%-deK2EU;59t;Vd+O`sxKF@v>1(G_xIoLM>qL2NII)yU%C4Yz$1;pS zm7>*LDvmW>T-fbV?=Y5=Ag4(Cweq8l-p2?tuNe;PIRDySc-0o;4CR*SqpOEf?RPSR zf(4~#)*T92$LdFws;dWYe9LVCraIq`*_fFRD=I2dS*+zFO<|lNj;VR7 zl7);Z1fke8AJ&1gdk>dg8n*|@&(*8c4l8turv^E2EBPJagpTOJ0B)$KvROY8^ZpWW z>x@mS4xZ<a&0J^sj4^H91lBD(gg+kOG(3{ znq+O;hW31Pz}+YfJ4T5vnI0fpOF>CTeX>DHH(kt0IJoQ?;C@i*arPqEE2$>sr!XDr}1S*tVPF0M{4palBD_%V#4SiRX7ohm4s4Mwv@Re?9_Uk)7(=NR* z*pr@1;R9KT#nnsL>%zESj?0{ww0_BVO9Q}w2#{=NQjXx~Eyif311nOV&GUo1^_B#u z4q}9N*`1n=gKVbr=Fj98?wwcV4)!q@z7H$^I+kl({}e9bVGmRpO9iv^Qv28x4b`lL zraE}@vEf`#s@ZGgP3BOEwWS)u8At_A&TNoNSu`6<@6$w5N+2aIN7s%xF)}pRR&Qe& z#TxSXPpb3K^b}SsA8P3ZQr-tDaLCrEUJvH5Q~a8HHcWjSTSX;-)Dkq@Od-uFeckUM z7oJI2G87CX771mz+F>@lGI2Y7^Xk+EaM`TkWegPzf}S~Y_)y>=(f9I@FKUGcjsI)& zm@a@43+z%W^agd3om~YUaiKFceK+c=SSNrW(9iu`WpHr7g=N&N8m2PnnduLG^rlou zpo{~$73bt#IZj#ewib#fE5BYAB`limj&5p;nmGN9M|RG<(e~x>(o`BEgR6PmCmY$m z%{T$;AjOm@$+%!C;6!GHPwnNPnW=dAiZKWbZ@S02iev+}z;8AMoEt?kbL8Cc2BsL? z*w2WI&g3B-SNY&9W?^1G4-XIb16RJXE&PQB*5dxz_mS3PZ};%oit~~bKYSGV$SJNp zZl;Pg_$st*^lU-lXqoYnKl{?O7lp&vi^2>`IVV8KDLD2G{{%?NzvyY7e=DItERG1& zy<(WkiiKFCr1%~2C=MTIDi2FoBIp_aIldK5L${TmXcqD^-ur65u-yB_cK(|-O6!Hi zd}{^pLRqK6$jV`f^Uq|U8pFzzb0T?YdJmepqH=e08-|X!d%PGIVe@<3peP+%I4K_2 zp_r>j77X+UKgzA~1nu!C1$Mqp&4HK4y5)C6P{_jJVyS@q}`dl>_pa_ktc(>Eyo z)Y<{jWDWPLSTu4!nGk(kgnXluyVwY~@Ha4t0nzk&j^sBr!)j||4^DfQgWf}cf z9L7k=PaUxyST98Ka9$b3oO)3AA(^veP2bQa$Hf;J8om6{?cwr6pNiO6;hFA@luO9o zKnPZviKcJG^ucB9(I2~WVP%4ATvXv-^0yy<^aoH4&n46b`0BSvIBaJZzK5z%WRlmH za{C{d;}VSn0s`nISh?_%MLASuh}F`ANgx1736d(Ub0nbKv`G(>7`xF`IAN&^SNKXX zfiEwBpo;rV`@p$oUyys#H-TubrKO=>0`*bH+9_1`-f5H3jA=(?rDQd4~U{eEdV z^lSF=(j`}J0otJMHeHUAP*3juP_@NsEPbz{yxbMqxVdU%;>ezt)YRe196)ob*Ux%= z#96O!0fMbCew(TW7*XfN$bZK3=wjl);Mw;6o1gmoy$35`Q>MgM{tS@zI{VC&Eq%cH z;Z?_`!&>_j;xZyz&3a-T`?&<>ZBFKhl zeuZ_Xc=6>c4e31~r}NEk5jA2ukgmlv^VqcT{`tb9S?e;?bNDSfI@q1co+asjA1y6U(oU;krhejeyFM>;ymCyhVyTdY60&9{?ej5uxFZ-47_tx6S8^t(DG* zMT|+KG|L?_r29eOsl919h@r44np|BX7McJ3QlS-Zn^iHib@f@K_t%QoyuB4w=AOR! zR3}-%2@8!n30uOmJ3|57_Uq5!{kPb6#r1qcR#u4FnQFC4lk;D)KGc@U^%e~^pf{Lt zTGSkx+Mpe&omBaFLz$F^KYg4qF1&xZ^D3nPgA9Z-?^ad}$7+5*KRtXFy2irrY64U0 zlT&udWGFub>|GQBzof8-!y5I|028eZ#k*Bm?#bz&{5vFwLFEt0jWPw0k9|G=d*scC z=t?^Y!6#q+$Q#18{=P-p7aekr;Im0=%tSFQfw9-LN%%r$JPHO9yCGy7k9_%)iwXjidfVU#x1)SeNn} zY=l=~Ex7GRq~lbdsx6BC%q!~qP0rvhwjmHFh8s`f3ymc@GwsjZ9#(;iZmwJcdB2f z`i0gIS=@tBc%? z?; zx)m|R_-=;I!FtMGIc;o6V-eP%XE-t8B4_THGSakrDo_uEQx39TU5B=OaJcLPT$1IT zFdi5Bu?^#25lmC4K*E*_Aa9AP|1AUUhtj|(aL}1jWKya7e_Ws|v~X$U0l}|J=U+j^ znrs!IkJfj7@UL z7TmD66Kc{6U7LTbkFN#B0d2n`szjj-y5Pc|ds4}pTq^B4a}9lbWB#NaQY;560S4Gpr0mDuOF91 z#gxyUx^k61#h&$9{gADiBoz?jVB8{t3K#IdIwVdv0R`Cb_!b@q=pMMvmk3BqFXbs-@s7aAqHs9nvBC8t5FQYEM*$X1M!9rpUk>1gN??Q`z7=o5> zJe#Bup|^X3v^xpak)J2M+jCXkSHgg-FgA(R3;A8rBPz(D)5L4b8Sc~sP*zn(!eCWv zUwqhw8fGguUip&Wtks0Sb0NXTfhuUT!rpf9|4b0OqxjUWb zY+5(t@w$vCyz>!pWQpawF~f)4f$L0O{Hs%>^!hBwGI+*QcMK^VS+L-f7f@_OGpxHY z>~fbee96h?w!)Ej1?G9Ow^8wh3n>oZlYM24X2}Z3s7}L+y^#J@kRxRIL$Q=`$P>z< z%nCrY$&KMXZjmW7t-z{e;76AD6fjcB(N71rY42zwgM5^gd?y8$u31K9VW3Rqd^ojrZ0&J5c@pMqe^~{tHoW0PYmfL5)dm zi;P&Yv53|~*Z9jM_QCDC_bRjsiv&4Eioo49g}X143uB~8pUg1oUFq|KZ;g;cD;iAt zWtkY_fBth%kGUJ#IF79v65<=RM}Fn~MEoC^1!aY`du54@V9VKqBA;&`52?+(GWFXHQTMH9fX~0U zl7DffzgjSaV+}{6tG1pUuhkcyv6$4)d|1bXc!b&?*ZSC+n3&w;#tQGB@{zM(Lb3e0 z2EYb8OPB%#$7m2f31E^s9f2llhI`|-LYe3B%pe#W+-xwMJkWze@4Ij>F4<{iRaz?8 z(&pQz>;F&=#KD8db}IPrk%ngP8HRI3ek`Df>L`bOb#v*6e~8 zfpWFcsA+j%v|Bc(YP>wqnA+!+IALBXuB1eCG#+aFT*Ust08KRED}p{X1`LXpOL&nR z38oCMH$A@ZPrP&tZ0mgv^PP`^)%?0Jt}xyySY}oQ$)V3ucZnI&Ea%8x4}+JtJ9rWV z7b+evcfqctd~f+=>lIMBmNQFJ284sXzS-cpP2WF}Aze@HH+_Iu_)3;fFqGUic^P-! z`_v;5Y(B_@9i*0iYrXP$@t<*fV8XmM^VNSxu^aGoe^B@Rxp@P2Bf=|XAGc*rhP1PT zz#i9~uq`uS19N4ho1p}qoY&oT7HQ0Yg8%qt?TpXAMJ$muE%rlA!e==Ck=kWuWp`pK zTXVSpr>(xYGNloO6G%ZR?ehaJ0I@R?1os(~I<8V&_726)De}2`@zQAd+qZ89Csyyh zBWgYVE@}m+g4scVqOek7^%L*8gtxaee-jt4ITtKOz5inFDKsg?)@1XfP8QZ)_p+PV zVxT!Aj|5FQ3&;TeX;E3NkqlA7u;+xT+OIU-e#taE$?OzR_~vr~W?afn6k}q`Xi{|@ zs+V_J$IPL+0;V#qn)al8OLr%>BSj1Ah4eLQc7%4h`FOwj^okJwd%L3PtKKOm3y4r= zHgBmtcX>;O<#)gc?wP`Brd$DDz$y4j=n!M7vDm?s;^OY{y_wktVsS8@=1=GamLAx{ zzCwr&{S(i_kPbPlB8!rtcr1kc`jJly@l5w;&5=Xdr+lxd*65z@p_jWUv2PLAVC#I! zhJVd}pn%9_*1U)&^-eZqZ2OpT3C@0X1S=eKFV(+GHSF0a4OqOyT)RmUIu@0v67))U z4Y<9GwEL!n0rsHPLo+wRw8r6}EY_@Du^V(Q;|bz*jM%lod5@?nD33)SkmZ)rtDjTq zQG2gGQe4+|ZcBlH{J41LGsCCqezl{CNi2N2DF7g#~$~@Z&HYAaj&kbpvvK#MzdI9X>3XT^M9jt0DNl&TD6de7| zI0NlrMSKab|EN!HYp-`i8k>LQipu}N*N4w)!}*A=#4boJ`oV2j!84q z)!R(gn5T`W>L}%*=eys}zgSne9&B_M&#mIRyW;ole?RpfxmG?>-0ePooe6yABule! z5~FWnczPENUj9ZuIa=J-JQT{8Hh3N$o@bX2a0{bjbt1vMUU=`GLPv*1!LO{&j14Hk zbdB88Ook|w8#B4z#eC33yWO;F5B-{9vcAX5&3D0uC3}#k#}1kBj+LtTg`}T z&_@l5p5D1~{L)~5CtSQ{rzEAU_kT20$k5(VO9ii@{OnkS+irHprwqesXB9@`b3bn% z#koQWpbX5>u{*8YTCUYyW(8;){g0BUc~3uzcUKngz z=ur|cZ8{Grs&qM+7KDv4G1Xt8 z9Z7IUt1~72&q$xXjvFoEgU{N0?G8sF`g+C35&PO_U+dC8WClO|=J+!Ax}wlkX*!5u zhasm)bzthwhKd>~YA!M+=7^W)qUFtUtc?K@{+!d2m!Yy61(p5yUa74mf5jX>S>(rj z52p>MOOFkq*KDj#LL+sg39CFxH#cUHO`XY_nL73fE{hf|JPACF+n=k%jO5=8NMXTK z+tOs`=bnElb_;*F8&}5;!$eBnXPUW*yEEvBL@|VxpOB_AYwVJ&@Am((?EX)cR|z>l z6+2JXzh0tw*{0sZK@+q<7;peGtxSSyfQXWhB0IEsT3CR(rkW-iMhZAf$b7|t-qEhX z{6y@clYQK-MM%Crx175!?!wDjU$VTZsl!Asx3G~CTkN7i$sIMc!_Ib0 zagE;E+G<^WSh;y~Db~P#dXUj0TQ-OEVfC!L4v-T$kEa0cnyX5=pVBRG&DFllkKgmt z#3fQuYFB3G!|@A$;=X>D8XQ3Uq(|UObi?k;=7N^8PLpM&x9DiNK4g&8oKEVU`9Cyw zf3)b6VK#IcE#Sw_h(g;x4ihDzfTIVK&oG`Nzy?;3!Xe`d?cMy~G)PM~0j+7vD3&~< zB%NxWddH)uQ&~;9Es{(_BdT=N?JVfzH^xBHr*l;^TO3+>`D-8=d$eLrhDOf>S#hlN zl{^)m7V|?+$+J)WxqNFF2#wmy3{}myUdi}Ltja!S;`%{f}ey1?j73dE!&ru zM!U-qEn&~x<&2ISBgjSPOxArNgPaFbUgw3S>>JkL&HV3ic4NVtq}mN1Ld{dWUUIh7+J=+FqEW&rnwHM`P;oRneZ`{6gMAz+FGr2-bpJPM9u>_33*W zHOa0bSwB_gBG<2D^E_s0tQR0g&;ishF?P25v*cXcg0xcfYoUE^!E1-AvJ@M`m!$M! ze42Bj*qyA`)Ys2NNWh(1a)lgaIHYK7gk1X1zB)t*VIVN{z~* zRCQ*B+(2ZG#6G=G-fRPi-bODRWR=|g=$aONrvx5vwytv6It=j)K+%l77G zzPs!Hcgq8kp&g06tE8A9j&`& zhC=xnchKC3-p+bGgM_-kC90HomruHL25x zpYKx(kF)e_&Kv4I>0*r@4%bsIVds3z<4Cx-i6181V``$gdg>YS9izbXTRM`kG2J*b zbmmSP9_X{aosDmbefL}`Q0x03Adstep#onk=eF4>=ZxNu5uT#%9(Nr#RLGr`0^~=_ z_zUEEg-beNv+W}ChfifoYf0<<@m{l4p^Sco7U+l@Ei%}iu!^eJ$g+dz=dZ4xSv2B# zEXc_w3ZrxZU@;WxkFX$in)&DHr|(_IpAC&+F(}RElJk@3ZM?=hyzfgYDjqyye_WnW z+(~O{xw^WJD`>iEwkx}c;Kj$^ZJ&(~@@Cu)TrgXWV0JuRIlMJ`_LMkgZMs}W{XYJ3 zKQsnXRA}05zk%6s-7zGgL%%!W=1ozLL&g+woM5Kuisu^)5gj23(@M2@Z(8L96ZW99 zvaIi@8e|>uY7)QNv6&K|Rl={dcb$~5iyOspL_T{kNC!s^Tq#B)uFSltX;}f9**)6C zLQ5Eqpd%jso!uDn^H`Elz&|)eCQc7kE!%ITefqZaV^~Lp%d= zJO;GF9*<^$tx6+ubVDQ6;L5^KpIG=55LLv0Q3k%>xFW$&WjNir=#%!c024K56X`1pMqI?vM%lyqPz%#BFPxzl;QrMdsx~JbN$#7ILoU9buR@ z2R)8o(j|7zpuM#V7wuAVB4|z2zEGZ=g9Q5wb{zD*cIddXL`vzVVIhS3Whh3LVsWM8oI$9`5`O|JjQd;(rYU8EtZI2`)f zUOuyEre~e3Yf*-Bs6o-BwV@6Sa5vC}cB1W7>s%B;4<&0-*|pc;BBAHs1g{sBa>ZX0 zN_I)G$~k?5kz+UYEJ`F*q2tqP6EG=uH*`F_d~OujO=mP(Us8(*b_w%^WtR0XY!+sO zb^pAPLHh!Bf|rsNWM{{caQ|0UJp_cjfiRIvNc@|+M0T>X{$&xsBGU3SexHUjeN@#vBd#gF`GT19iv2$ zpA?e~l$XF#n8>suG)~y+s#uFmEkysqdMrCb* zS;KK4xE~SN(RV=_6%rJ4KWm6YgnUbTE}|S8HS<;B@%~Jw8^}xleOGXx$x8LJI52Ut z6FI=~%St3}S2=@Us5D`zcg4_|Ch!xU9seMs$=fPU-z15ZAj%!B?P_x%OGWMAn^v09 zA_o3a+l!X(F}6<89&tXVC+;CGB;tk!1ES*QHhl!-qzY1=BCFyl!34Ez+W>pna8A`Y zknMM=o)i8!TseO12ROcXyZIP;1S;_UJ->8>F>kSI;I@EfM~q2kQ-LtcWX|=FL2XQa zScsWyc%3#tiKfTwP*?kXTc*sJY&!Ed!T`-XwM5M`mTze63Di$5D5X9~5?Ggo=beRu zuFB>UT0lX#*Pe&*9nXm8RI8g0(0)Y;K|w+He(3Y%F!jEFp94~MfF#WPJ6Q3x;Xm!s z#R`B7650kq@Tul#Sb2iSR{s2wNOL0Ugj`sa(KL@4Lbn4qy{K@xQPoyWh_VR?zyS3M z_0AyIuGY=<@L9GtCPWkOQ-+51m?#WD@9Qgw>vDKy0n3N>h5%gbETTLfl6 zPUgCo%wrMTc{FppLc#Q<$2C=-{9V%SF@j_FZA|Kk&XVj<|6UJ45bVK6ArNsV^=0$) zD&tKvId}j3yAszKlCqNzBL#DffS)Uo5o^mnEy26)C5TalkZ;%4s}lQr&h_@ESm+;^{GOvhI^E zjwT6i+v_W$xzOi#n|98~*3=&|s)_tMXnWffU(e%5y+;m@{q`Q(mlD@K&SG0n5^mbm zI6kTkLG<^-rXpxHIqM`@s_P51t|H6qYx2>?hSB$={pNhm(3|C@pQj@mX{lUAFsh=U z)TY$?D!frVVqogo`PQcT?0V6*QX0h=fx?Js^1f7ajS8&;Pi?Gw`C?rN7-pDT*I>(i zRjjf|6PCUUA0Byf?roilmlusf+=|6SF`0>(Gs6(9O>B+e2SNR8R$YDlj&0y+_}BG@ z+Ej|crS2UiB+yx!p&WiyqD7=BBzuJ7WgqUTHM4c~v38P zX4EJ8x4wxakjlKbbak7#Sin^~cgV_<yT2bB zA>0FLfUgV?cctu=UYqB|wl9C%93f3#91KS^`P|;p?BdZi0|t?KrjaTx|94_Z*=uQ! z_h|k0PLQ(BVKc->FuWEe{BLlRdKy?L+MwBQZe-cM{95zQ_j8p_{tZoMt(hA+Z2QNVw~cYJ*zx+BEBS(- zTR7TU4NGz_AH~{B?(c}fk*T^-lx31wxNq?PC&>Sa^&V<~vhgc+wMm(fJsU;clJR^e zndWiYCFcLR8L^-8KIMI`1!#ujvJa>AGLWCTt)Ta$heGK zFoVxm3)zYN7hvl8)h3LSSsd;?;YZ12IeFc~2mLd!=$Qg`M`6pWaKo%y6oN&$8#&is zX@>r2lpBRlQ)98qDC_gISg@GEI|OhtP%Q?g(g6&&RZkU2_h!f>tm4J$NC8 zI5YA%I<>WB`FwEu5N^w9^Xk*`>I`}%DzrZ2Na&@V(%4I4n@|0|L0rG(^p%du$*&WM zF|D&cEdl7Dr4Tw1m$}%b81IJ*$o=uKJKb}d-c|gU5zq9Et}i`b1avY5<#g0c(C?cJ zARSL$V%t(rZFH;e9dVcqo)tGi49%7*@)-ykB&&<9`2B4UDc$AQ$n|F*ty}B815$?6p47Rx0gqh$Ck<>%LeAeG}t$ zN5_uX$>x2rViN)wz95OO3{FV`;N!3Gxnp3BoYR=bFQ$Jj)1G-4^V$cvB#bGQoaIIs z4C6uCt>{=v33ye^44*R{2R6G3zZS+%d<|(#RE+Oa!g;&Q8Rl#rC#I`(XYl-d)7fbn zR5O#(@(xEgmM^C01K&!L`m#L(%{XNZkZ9Ka^HPc!{%`ZU6LM#QiQx1wQxF4)<`-RQ zfU-Q|rV6SzA#A%$`;-x-jf9k&=4w~zr*Mh4QZ-hAWx6)bx_C{)_l1Lao5bKUNfaO1 zX2lGI{{j^Mg)7KIM4QL+U3NbfzvM&lMb5Y@5p&u2R%GzgKhPtK2ixO-CFP}C*$H`R z$FQquc|^O~J4yPSPgnji`kL_7qLY8ZtdU^|8W2_m zA9muK_i9A|T9IJo@1iww?Z~-$vXOk|&F+4?Nusv!rI^|d9X}>p0F#qeH^hE=TV5N< zh3r;Z(dE~<7j=8~V8GkmhAl4_*bOw)AERM3yxy0)6Q0}l3Px~*_~g-|oDaX-NFH06 zSU=33GZRc`Y{`4^5*d^s1nS+PnZ)F$&mCAhZluMKeay22*u~s)33vohX@*jzVDKsc`8fgV8m0~)Q2=;c+B3_a~(m%XVBDNEU z5RQ*Kd#c!z%P5JXkXm?SA%rl)Pu$aOeY)X;biWgge z+3U^YZZe?s6D2ZytQC2veWVk)lS`ZVY$CpEz0_gw@gqRvG%wBr-xI+T!xND&np6sd z>S<6+yPF=jvRAPLt0L>_1W>cXMU5>C0F-v7vu%)_^5RyK|R z1RQ+CLKT40a8{!gqta%0K&V zrFQm}vTT!81H$g$fPO?7E-rQM=x7s_2l9ojqaCD?0bv?Ka%w?>!NW+OjRnF5;$Th& zj@B`mMyXHe_Och{N+HB4y>X$x^W_b#dk_^8Xp|>R+$o}FY_E?=-Jbl#b7-H&khYq{U8)^K%`N==Q_-8KgP)|H-Wp1wX*;R2+~WKdN;-CJ+rBCk6RF|SedrWkP3*5kj;H+*U`2>_RU}-&Kn3R6fRlPv8w3(-g3DxuPyAIBWU$&Jj z8^Tn$ai$P12vZNIB?cah&H7*H8(?!uNh2ruIDSMa8-l~xgx+q%c88*6kssC zUzEOB-AQDyV>z2M#>3GVC8b5VetQv`0+{h)pJbN=xC%R@iB~s?B($6MvzDnSlj>#x z49A!psOMUjWG!0t*MccW{RuT8h}q+`LZ9x*K_Q0wX4;vh9y{0n4c@YXyTrefi5vKV zG*2Uo)L&p(HeVNM%kbq;%V)(_aJin>O*il=otpi zHujPWJMM<9Ld5&|@h)U&NUo{isMk&;GTnHLrrSeYn>W|5CIlV$K0{9Iqq6Z;A$Qvq@()y?}d8hji`urzR`x94pRRPx|R4VPu>;(qC7FcE5RGQ2A zwR$EqZxLZt60TSpEHEBCzPjdd5t?Q##aa6t?h!B{zZ<8_pzWDp$XibB@^-V0%@fgPR zlxG!aIc+Phc+3RdGWrNi0UTzXy!KT_W+GaAM`l1S6&#MFB%K;!ZIdh}+W57eN2B zL>=sKvpc^?#i3bjOym~vpS=I4Ii2HEj)tzlZHM8F?n#yqXir z^ouq4=aQF)%ZC!?ET123PrQ~dF_#*>iA=n&4Mauzx+tDcL#MQn+c2t+z#lzbS(zdO zbc034e1@vF)p1ir&Z?gsmuy;6Kx^w1?^5ZPY2k($v+%t};O=NQ;WznHmHakHR#Uf{ zRsg7f$H+e{1}^ZBM|`b-Oq;pHHE!Yg`8j>;n3HUoudRNCu7gNwp|*L%a4h%Hvw_4( zjZk=)62T@>KV9WjeSMqCw|!2&s$KI+yyWPV)vDDO!;Paj<-?1_paJdFFh7`#wda&- zcP#@^OG;Q_$8Y!>fsABt!V--b7LISpoJtWf+UOvQCn;nkUpfRKO&YH6$uF2ZZyLe zQE#fTrJEDM!(~b1%^GZ#(yw|eJ6jhkgcK@N^)H6UM+}N{>-qfuYz2am|Kc(Vv2L$B zU84gE1d?x>r_kDr@|=F=EG5v>Nn48|)pdx>@4pLT+}neJ`HkpFDsLPt&25}()>n9b zjcqRXV~fc%W7$b{i}*{*MctKOVq+z1>+T1u)7{HV$Wfs#nsqnUJU1Y9#P;|4+xy*4 z3qQ#fzGHT)bBob0apW|~F_TE6#lb2{p>08=LD3MwZmJ-h2q_?qtUDWMO={C@Q(`;y z0}JSOl-T(2@38h~_on^xxT#k`!Cm=tMv(TqnMyxbD!svi4e5|>iHoTp^@Zn62BRB? z?@6U(av5h~b0bF7`$0`vSKY0rEI6<{P+0A2wUzSe*S;CS%bc^Xp@JI<6MGP79$X(a z+IiQuQIM;|MU+2878Y<*g~dD2E{6L=3FJ>-E! zT~}1PzRcolouQ8slIC5AIr0)$+kO=S^^@<+JFA>e9pqkwoXO&Cjpqkfh;^%o2MR*S zMMcedl3ql;m>5^dJx=L7B)Nqj8ezQxk?JJ=mD3o{VO0sw6Bs9WJnOajxPU4U#jfbanIxu6X>>4ynZ`FMt*RGY4X zdBNf9w4w}=L4!wC((e=&Gh>Q8>Yk^C4unLf;f{?#4R`_nhp?{3TVP_~XAm55mk!bqdf8>R4N6sgnye?s*i$-b&mngO+~9aPff4??VM_xXIjI@ewPm~!y2E6h?eGZ@?% zM+nf>_>J5RS=Pc{YIFhHt;HO;TlCCP?AwSvx=@alyRI0LI3%zB7~ z4kkERQf&hoz~;|;pF4kJ@*OFdbYcwR?rQCFgFf~f9{W)-MF6b|V(6f64Fq+BFR{~J z!g+(bk@g`!l*k_D_PS_i*1x^sNPd};B)9bt^s)_zvkd2Ymt)X(CKjBdkzDt0L^Ft) z$O7O5H=gJhPQGrU%IeE3KvNWNzqpDky)eHpJeW{0yk9^4xPF0w$zj^kb4+R2_U zObMfmn#4t$hOEmm=<>x)@o_7rz>5hB?jOd_iaiEcks$`WD@ysG^*^+2VsZ6P8$_;b zAn;{a&J;Y=oR|>8a9!;zr43l(}1-j@NyUna)KLA=a#s9%dpmRjd?oc%7+H32DS*E8h< z8*c2%1A;$|Q5+aW4HC5mk^{CUVRJaOfr;>c@NfV9ssB}FcSe(4?i(~8Pe$_2TDI&I z%Vdk|i+uWkz!r!`8t&Gc3?T-ayNurXjp&<;Em+<~ATo>zy)@0F5Z5$zUTOgYtlDPG zV`+79T)Oq^9_yZwG<0)wtM2IdAaW!pW{x4Hy~K2+oB&Q#mG`}*v6WiL2l4szq}gt{ z+Hy>o5%>zaH;}j!6x?MngG);w=3m4|6)3nw-NN#Hgyh@S`s|N!Zs_j+d=rf8ht&E{ zh_*-sW!Q0cQe}E@-|@EdVfXGM@IiOuwzFRIXVhNRNy!+>EISVXA*r`Y=@r6-;n5Ewr>q0^B5r;glQp(pc-wKNEHyUm(XA2YL6^ao-KJf)cok z1la_OWNCa)v9q&7YJ&@$%&Xh+x&4YtN($Z{*cvMJL>&*B5yHmTskV#mfZQA{WkIs% zR|XBjGwPqPpyLDogA@3lm;0Z9){qJyM1j}#R#D3IrFlzzJK7%k$Idyokru3HGR^}F zb-APynTzzhZQP9Zq@znHV1cA3MZft^~|4--w!JO|6=`v_3o3VKve zeFZ))I|oG*&H36+6A~7z*%DU-#`lGdJk&b@aaIM^oT0NvA5@W$e7K5O5DzYz3NVo> zXb`aW_^T5wojCY-BwAYZ^h`vfERit3g>i~4`c~jpilmqfIjTh6;nB24${Nn(zb^3N zNXV0FhrJSzEhL*e{t@R}~HFtWP-p2hyH9t-Xi5C)CR z?HkruiRyhjH;->&GndHr>vsrV5lmHGlj@FyPCje>v7mC1K(#{FI7A?#u~Wp2rT_zK zm;ki*e;Gh7q&|PW^4Z3QyNJVYSYd`j01f^+DenUU&0}Mp(*n5&(H#$EHLL<;$p$V6m z#3SWf8YqMY&fP-`1UPyJwHTs-izCp(`8i-@42h7;gMN(C$}(0s$dN~2+2OD5>Z_|` zS!FIEVg%ha+P;ping3`Ci#`#0)yeH(LSN)(wEO7_h3gN+1;!yiKP;wLl29Q6_yQ6P zZN^|(+v3HlSzoh_7P21cOraijhmyE|dXR3yi3!njd;_pT?!SKvy68f|TXYKeLOLvx zQ#NA}+tPd7;87MhQ^JYZ65v~f1yocApCFweaUgEZriYRlDpKpHxo-@W+M+4|-CZ&e z422=}-}Ex62a9j`_iy=c(&;~g|CA5H_^`ojjmzCy3(6B7dTxYzP2m?0k^JPO8={ML zw+zfnxam!PfwnjGPiHGaa#re`Ni|+sN!OTLifch5%Uxp)r$!jbwG2V?H{!SHN!^$* z>oZn}(THLsnPgD^LZ*jiM+?f;8DRdpWVB$k+ThO%*&y&bWblOS0?65Y`e4%KPi zzj||APOZ0ojt%Y|Be;lC- z%@0xmu51v8(8}BhrO{=cl+% zeOwf~o;3|22!+8TFm-U$EmGG=c96w?X*7Xk`ilBA3IBn!_RI&TsT@UyyLmll(3}*C zcC@zp3}FDXJt2bcrKJ`w)Z7CuPFa{P{*FoHaHwSy5g0pve0cPY)n_no{RSk^%ULAV}l+@K!-Usa?AAJwI3OcW8-QrQ{ zYV+5oqm?hyhc5~M%+&mi`|yf_On#s#Up}bw9V91~8M&OVmS6J?A;k&|Y!{8wgF97( zHq$p=zny?={OfwHG(#s*SnUQua4s+q84GI|vN25Dqby2x)pVTQU(dy>MMB^DRo;8N zUU(J|-d0;g|$hTp8>I)1!ya-H)GtoGAxqIc9^U@86^xA<2G^Wpia zDKF=tE^T;R?&BMGGGS#acv(9&vh}yFmk@)ltH?+q-9l$?96Rzm^j$lz8Wi3NvOc7fF6UhT0KJ1GqF3D&BpQwqY-e|qHy(1w{P1m zFSBKe++l~n$N#GHH+{E**7;iMzB^VwObD0tr$&U6^O_&a{0W$(z|bGKJB8d9#12XK z)V)Gz5G}2AF)Ds0T<7Cgn_or{(Hf& zLl>bE9sB_jVHBMIM|v8Q$S#Psf3^h}e0?FydV-{f)u-f#M~RTS0To$X>VM-z4dK2= zb^1|3QCc^Biwx&r)kJo-X?!g z-%|HsyGk=iUC+;cum2s*sBHnA+GBejby}|Y9pgyZ1e7Nzc0vb#c6(I@Us9Y9aa^XXfR*E5 z;$HGx!(W4wisY!HGLrpGG58x;^3#*t93yVi;@7gG7=h$GnTkr!bGut#qPOA47tb_P za?0+qLg7&yFw6sxspFdNPvYUS6jH|6|~4_ydBhbt$mrr z>OwljmzsTV)wm*Q&qNPN0Pjdk3S+si@IwB~r!MO9aeQw>)x@c%?6dh`io3%7z!-ow zJoC%jwMDBoXk2);tRJ}#p1^4K> zB=R-$5CVO0G6zVGy!-i)b>XQTsJ^5L$4cOwPVoavS%&nGAz(!eGt}FXgcM-D-~*t5 zy-)a4W96hxrH%J>jD^C*U%Fky)gfS;Qoc4wdf(;|Bm2Ca7^FyhBCf;;C8>bm?J8u4 z@8Ftm!^0m_2`>`FK_u27DQP=cG@v4gn@ofr_94KX^ob)d`_B{pH441 z<_FdQD@7xQ*Ocrn0u5ndUCy>)W_tdl4u922SQ4p=Z04SwyAOt#Xe^Wsu@9dh$ydM= z6slN)1PmV-Q_BGH)_~x<{Y$3HdUK3$5FXaG7-6!vZt|@SYllN3caqXJa9h|Uv~C+b zy4-wS+>v>&QveNCqjvac?hAU%@P@0vTVV$FG!W$=aQC3r8~Qr` zqC?U%;!L6WLIDV=U#VXD5Q&5XdanQTTi23I&IEQcLtU=D+iVE#x&apYA+w=poCXBy z%oOcgQ_?_9%+M zaL$eV7{=}gyMfaU%R1==xA|N z@HnEyBap$toDYS?!HH&l#D5ugX-^>J`C-0yf3`vcDLBQ$@EsL`^qo*tUqW9vOI!lb zQ4{N02~!@Ph`%YT*DT?Q*c=rKMIE1$2&+uV$M{{8;h{-((fc2g)!#1gf61aqe)^(d zf8=hd&Oc6V`-bB9&OHQTW;ANpJqqIIv&Lq&%t%ve2Kil*q7xUwMo6IDHF!;P25Ap{ z0U!C?=jdSE*hkSLVy_V^y1)Ar(lcNnwXYCXSo5JbB*ePTpSY4x^_j-|vFBD?6dqK` z>@!-0!7SKJc^O>T_%JI?s_CIooRL||S`ecjOa%K7@OPYGZ5KCNPaK93?9oBqe(rhz-ETf}6RGYwbsfdDs0NO?rNVa!&)`)PbM0Tx8?+l>1Do zF?z0B>AJ3&V%WA6Jd8K*EinH1-MH@XwBU0}`zE>OYtm&gzY^mBQ}H0BzRQc}NEDH1bFc186?vcXE~H`W`t{xyI^s zG+KL2PPo)Cy0$MrujRrPLQeR~JFR+{f zc`pK52b2sZDDsYi#-bS5=@BZ`qb?8j|F3{%gblav!j@5eo_z4b&b{gr&+Dw?r0t@T~p*Y|(BJw{JZ;P{vU) z+{xh45!^OOWg1yA@Tg_2RPgD9MUE*w{nlqY0_4BW*AECt!Pmtq98XQJnb=T&B)H|k zuHJNacKRZT)%~$a;2!;ZRd~6C2DlpRmM6V5F%Fg70N=l+#F;tqMI3~}hf+Xrzo}d$ zmFP_={jIa*`x){BvmksG!I+<3u)i`|2!cQu=20#R7`E;bWL(;-riFsElPH8q-qEy( z-DaC{^A8yPA5q`YNASG~=Xa-@8TNwP^6(8`Gt zYkKIVApHM-9EM+}=;rIw%}^$<3vH~r<;={C(;$}`9D~D{R?z5lanrC+7Co}?5Wb_< znaMh?C$#wOjn=FPYDx?h8Xpl>WJ}q$Q5<+1-xs)B*?~6*hof*vT*Y%gG+W#%WFh+?PO z4R91yZ}`abH2rBkqr8cC26eR?b%WUp=&H#@0>^CdYdB}-Q_{=Rh4({C#ApFEld(_8 zEqe9sp-S&{AxE)$*pH~Xef|6o1ul7V;ew}M2w!=X1y;iArKc72Y1B#5dGeZ2*CHx2 z5~Z^|!B0G9y#jwUX{>2Pcxt_*lX`_Lw!lz&MLpl7T^B2uMM1%_{tebNK!cM(*&s$L z9FPmXt1Z0Gci1~x9||YI1msZbNRhhmP9%^WvZAnM;2JkM{G{*?Y3}KX6hj$^a|xqu zES?L@jnPGra*NITxTJ&c0R%%Pc32A^TpLq`6P)|EjTlP#1vv>tbv5x~a-Sl>qCcn` zb{-J_)C`#Ph6!<32vIEFItHd#!6J+O<6$2{FhGM)a26j1r-8ml7^em33k%VM(l=B# zsq*>SX}CK2y5x_HMC8An;cqXyAEEdr)B83PEtPx*PH->lyg7S6$x5VyFoO+Ie_sKg z9a3Qh11k05CT*THk1NFV#u1k#>HvJ5@RhwXlt!0pn=l1^-1Q<|?7~_L6|_5PwyNng zFk*Ir0{WRt6H%4kAqT3l>xy+n6A2Rt(C7}5{i=nDX|IR{MZr_(p(WqpekUT=axQS+ z3&VM+2PIpCifK{E4Za$&e~9ep@+mHT6F)OLJuS-x5qsS*86-d0b)sqN>gx7qUaI-1VHLdoQDzdd}l{kXS{ z5HkNe5?aK9iUc$n9KFkVr0qw5a8YwA_t7+1-iMk*~SF zqIY?sC;K$5Z>t>Oqkf|fG1RN=mis!HXU^VFreqX+rbQi$o&qhJ?Mkp4Q*LyvidQfPrsOO>OFf@VRLWDTtgl%7OXt zpc$my#B=~r)Pv%e1ECE`Oceea%z*S?V1Jil|D6joggd2&(5x((wX7Fjb+WWSV*_Z7 z1F}Vm-25MLs(;vAcPkBqszY}htQADHbCuuc=_4fH@jNP2W!!F8PZGKRri{c(7aL)W74^ysn&3@WLAyO2Gsxwew_ab z!hKrZM!)G#cYfKqojwZIa1FYgk*-L-)PFz#`W$1TOUcdXC@;PV*T1Sy;D19&iTA-BHU z$f`=jn|1zB`+l|YK>h(I1QUto-}rKYzrw&vQy_^=&T|ZvkNt5nJa6u@{N}8^On_PU zcLVAGW4cIsHD;ey`jkvWT*S6~(NTXacrfcFTmg;nk>7=W)OWbG;>AZJA1B&OT{;0R z!?}6@&Y&+EoseTx?rRcv#Q+3kZ4jCj_FQvwv$ZD`KGPD$O7RFt zwkk~otw$CUDmV0AHWB*dfBo`l=>moHdV^vYWB09Mm0bU!t=9VQLHxI~ph1zW_L@y( zm2XitDVPW`vIY;H^gHUg!RYND*<^+K$9~T@M>%G!z96|U)e|9x<+ESR;XyWAeS8^A zfRAV+h2M8TjI8gBRJ&yf?Bvqv>>4M~?S_%z`fU(_F`}H4Ap3RVN-3*Jz=Pe+Lz!ZP zQVRh&ET2AVjr1MrDHgT`VLHKUMt+wZF<7XQB-ZC9eFHOBo>Zgj1mbRCb8<3vAFQ9` zw~lua%{tSNi>d}0ARvh(v~>TMqDkxJTEd2u>qFGJVu5bp3HxTEuRs>mO# zpizC)dM1R5G9>*<>1EHJ-55m0J?zo+{qfz{W)n)Prjx~=EZ)lWXTa+Gr_Y4=Z&ciW z0xRfD_ihFqL)#(In6jNGj|FK^cAqsWAo59Ktv2xoId*wK-jWa;T)_W&n*kPhO)x~g z%kzVyqoZ)=e&RW3za{?R+P(cr_2h> zIFFLW|Kuy~2|~$9k<7+cHAJPd`x6o#x*`ngCO>Ec`#U4#!=8Mf7s7|V^ER?={dZ(q zSzHMINU*y_i0NE?%dbm6;&`m0|HzQH2_Kh&!+Ai?YNTP8CTx%>II^ite~O;cl@nW* zB*+VwDcedk<}q>C=<1zbVsgx&f~jfq-Rp2`5C>%J;IVBy&o^QL8EZhkhQEb3;a^vk zmrF3I^o!@&+PLj|st!T$^+H7xCCu}A9($9&jR_QmpSn<#$Sb}XoMT4oPVWHDSY$#) zj!-c0;px!~f%BqSc;ob=*nFA&Ue*QekcI$?Wj9loX416s3L{6S3O#G>G8Ec+BZEa+@3G|D5uO5#HYcA8 zR9eiXY1s&{L*qeT>7ou2i2jhUJ&`6mven*Ix5< zE|-naYoz;ChB9(T@#L>3Ifd`=?DV^gHaR#?h>-Md)u)|ru+FrDJH3T+=4=`dfbaDr zpGSCOew?LwbcF}a@O??*k!SR$i+zxyB!zHiB^h{36LR_q@nmAkJ&;e3ZZD4p$$M%m zJD5^oP~ofN0E+}~Z|dNn{QuwyTf%+OAY4A3>$T%($C}xrovu4&KIf+oNIYdO7ng1U zbEhnu-szn}N`CWZQJiam7QflKXtxD2K@UjvtAXeg+$+(M&7iAj;v4f-l}IlsJ_}z! zXsIF9iSN2{4xB&2UV_*D&?iLIf6W3&z$)T$_2KCJg2OjV-UgHZEboSa)lLG@tt_@! zJ4hE=24%@{V>26|Z_!!*8Ikl!jx3w>f=p175$J$RZ{?>gUTnMz(wor#!w~qNWGCoX zRV|$7$IHe=>B7lQta(<*%ye()}*3wP@m1ui*5FNLRs8 zNkhYQV);wb=lRl}dD|q^{rr=09ReDSZhhiRBNhP2g=u{dSN7pV0#)HEV%QRH$(Zdx zxC)<_ycbRi*}zpu@+rBp=EQ05bIFS+9J+6cgG+W{ny<~xacQmjGT3JI zuafDz*k*a-`Y@Aj(`2jbl@Vh6&GzD=CR8dD5e`o1{{H?ikkQK=WV!Tj#TGtv1l8(O zhlj;6oLLdZUVHxFutn?#gBt4U4&Di(TdF4l`Ge{2=+Pw}FF;W!Og7dSbx$a29ki?AvE*yr&MOB|1enl9$>l@l@=(m}#Q7Cg080yN`j$s?;St3)S zW@MEvJ$Qz`!RQRaM}?+lZ4ra_e9gL$F+c}nheG@jBa#MY7Y-?wqNLVmta?1Oii&ix zzs#Y;WEy~G|5E=8vG5m6`(6Nu>=CfSs@H&t}^C;iW*0S>O+!$^jn zeFurqS^+{gT4U2;ut+0f;>J9L$o3DZ{PZbRv@YdPUF8R>bpl1dM8C+^heRRIjh{E?a6RPbKXvhrn(?>s1o`=Qy!$NNRTN1zLgW6KEyjQZ+oHFRaP{?)$mBjLev=CIaHqLzaCv_lOMLdy} z=$pepeFPz)a83SFWB(iRs|2+Uk~){Kude~v+|clsvc+T_kv7+E4F$3@q$|Q=f7sm; z>|}ZTYlNLkL@775nJ)FmY?62HpDEP`7N4oAJi6> ze|uEl{Aa}eOX^klE8z=ntT?YZy%&8nuOhjoiszxpwMM{qlvc3vZa&j_oin5OXA2Lo zL^LiJaM?4&y^*?FW)|{JTYzRsKw6YH6-Xw47XYtjg^95xQ$CzIX!gp8;x<5Lga@v@OA4%tz8IxD_=9&16`NwZz$E2aHx9|JkxY#(>He zwcV%h^pM}uC@q=@PJD6E&Agh>lVd~NKgXm@`Bxg!*e!k7P!0K7aY}@ze_C}LXyM7+ zp**kg3V3u#Yf*T=<$UelnakGf{u@~OC(3|>!vQjre63EF+{d85ghCh^?sZ$x?#;Cn zqY+-45nJ-Tt1-uG9(yM%E5ZDYFcxZ6Rf!rEyIT2Kvr99Mck9|=M3;`44O1MqDEf!H zeIDkc+GO{4iCkKg~$>oEAaI|9vkCUt@WK~_?NpC5^^tB)t6ee_T}8k z!tZ>Hrz$UbKbi@Zmrv4Up2!()X+iC181)_x=g7z?DAC5BF3GQv+y^x#rQ0Jjz*DX) z>q}T$#t}#=0yP>R1f82u=G|S&pd_AG!|a1cQ%VNri)7Vjw*!o;Kflxr(^mwDqx6y( z%u-S!7~~1MJ;t~_&;GG|b>`zpm$?*Nx#(NuC13!FB%W7!#lj%}F=X&e&&|9J4jp z+wQHlCaot!+!m^(fT>a`;t?j|b!>0KR;5f>KWZ z$%%epRQt8ip!#?=c?2h{z!9=yEG_?jtK-L67`$M=U~%PWmQ@HKc6-DhSj7#2y(V*z zkzRr%B4661^@E`Cnk!zdR!J2tO=SCi#{UsvIw9 zMj{4XTpERwHn}^Wk9eMc^24x&gSwCZw6>#p(04PqaHz%O&=9C7ql=hL2;R)X?bjOl z3Mn=`n%RIdNTD8Z>+BL&O3^Ixj7MrzF6A!)0|oW3YSjZB+-X|u7j~EauOG3rM-#Nq z){jx{5$JO5Cp^bcC2Oi5lE21{qfN$L|bxAnyvvU-W!C`(p0X5Z==sW&LwGeG*%lSJftBmgsk8JdsHC z_}5%XGyrZabKv-ud4aMUyxy)A;7uPU+AMXACrbZXu?mhlG=GC(z}YT zaLKERT=^TcUPoyxF0h8=XTE4f0OA(PcjRXDaU%ha=!Xub%_S@`F{oaZ7%J#*87Un1 z5xV;bwp$lrf}?qlkZ|D-AJ1~_ca^py6ldiB1k(z*emaO1{j*1ge232@3v(m~YA??u zu%FSM^UGXKaZlMy=6;vAR%nj%Sgw=Cic4jv(vm4}X(fmTMq@_s@VF;*K&&|HCk z3R}iAr{}hgq^83qoRv?TuA<05UM60+*!}g#q$1BtYo&l-)^Bk7H(&&z3xGjU&d>d>7wm8s59?h3DSZC$t|i>AN6 z=k+&SFIG6~7WHvE9MACmb~c2nVAn1WDm&V@&D5MzB7RIULv3nBB@ z5ouShSe8SNNqIU8t_M86^1YTrXh2fh+c5D6`hfwhYi431CZzrN9S+IiQCbW^ZivPN z3@w?~m6tI8dq>bKM02RFxPUlioR(O)8_mW4z-kXP@cL9s*-sBHeL8`Zgp^Z)Rs@vm zRvDtdu0kpM=ZL}|u_-G=2?o`c8H+e=tPB>JeyK0c2R72*$s{Hp@?hOFLXfH#k3vrw zp%kdye@^C1DvHG(p6@@4C5~oD;k|=ZyGQ7gMT;OnYe+&tzs3wD_E81Z}!b8!F zXX;{xgq(#n-qhxs=@U9pFY; zY@@G(S%sGU%?8g!viD{1MacPqhS`J7UnYArzZ*uk%&Rx9o!93a{QUEyB4;b+>B^q= z;+e5ZQHXpLxl7;Q?ku*3koEFwnnmxHLWiZs+^Yk8`T<=n$LdGvC$nC=Edt|OlFXYa zJ@Mkl!9wz*lIVoVRgZ^s2MF{69hqB66haUe8~>pP~5>8=fgPFlmRYup|>vh z&5@qIt3s4$G+Sq05dJ~@e!jHXBwNAibosGUJ=N`VHMh)|C&NSitTVDri6cGU6(by9 z7$Vkl*GsQw)h&&5&kmVMS(C*Oq|qQpph{!sbGY(HuKHhvj+MQC;w*58X4ihXMTF`Q zI$yCgc)}s1nu2m6F$oJ*!#Y#(DSZ`iLKT+MUIcY}?lI0%gwMICEGaD^?nVMiUxR8I ztG^s^(rWBEG_sVx<(?*uWLA{A5Dk9X;{wo$lzGXdgkkxaK7Xn3mB3V z9P7p_?yF)YF%G;mM{g<7e;3NpU9Xp2!I}q9FAS*PFW6Pj?x0co=vW%p=|%V8ig~Nx z|8kai!w*|dotzJJCI!449xXuqrI()|z$uzL>5s-B^#xK|?$wcs? ztmn@4Qr1z$s+YFR`w{u9bc}t}xOY+cs+YV{9EyaP>Tp((?t;Yl&cO$qyc2 zXX6lr|I*PA2oby;`cQNXiUyw-(H*9#hU)grPv0+VJfhH#P;qfH?b7Qr-M!6huzEWR zyx%MTk&VakGGB=5v#lCoO8NTx;%raW~eQm`A65k zO<%_dotB1tlx|a~)=Bq+xjUdmH>=?k-}4%ev@=^ae?{yhW)LA=$YE`aSXE*`_6t7+ z-`X(g+Ff-N+VIo39QjByxb^m_xK{RZR8>-yzEw*ZGNt%Sv7EBAH&HeF z9QJ#t7*d)onH1HdvP9+gvZ~KDFX7Gu-3IZ>m@zT=vF@EU2I(V2Vh`-?v*gcAH@ljh zeV-318#KCVx}jpL`<-yTyTX<*2mF}q5NxTKmM<3{uP3fx_3jw13{vo*^!hjo_xi}W zI^CRYsn#O9u`NfpuOTlOV${^rz%R99NrBYD4 zsYsg?GQm`WD)sU3geTq0JFJyh=bS(on1u)w5=P`mxq4{;evH>gS<54s@-4AMe_Y-_ zYSm{K9>vo)5}Dz_mH)MrpC;=ApLT{=oTKJNJQi}`)RJ$nmrP@o(-q&cZy99X36^{(dw6?brH-c zsis?j5vMwj)K56nmGBX_y*ikw<#jF;_`BHq6ZbT=8<_#+XL~mu{ei@o8AF|f)vqGD zXSJbPX#iAmM!Ce0H<^<*?L}^=+X`Z}bJcYSY0*7JQ{i6@KzP$K-9nLdb^FJ&eun`X zuNG|YQtP%4RxKtF4XePy0j$i&8dN1NY??G^~44^iY7oe#<1 zy7$&eAZ+d+T6%@N3LmAvJZ?LbL8K^q@{` zn1=_K-5Up?_K*%*rROEh%`2QpT{KLDYK!aUcx(S_h|(!M)U}Jx8pXs!bNdzzz4V;k z#ugTN$7iU7-n%K5vw+uy7z>&^QP|?Ukm8#VASB9EaIp_1pZqA~tG!9A|sC z0Gi@#N8*dY#plI?--2bHt1xthT1p?&#CbdQV;R=yR8{2=B8NMTUp*Vr_NlDJ0fd`#)Ms0Vqa(I9L) zPU4rOHQpky7{8jmx{fr9D%{R^c_})^F$FQ}x-4A@rz`-M$Mx;B%tlQ$ljW%#KKRd1 z+z8^pKT6J@V@8|agJ?p%sjA*3`DILbgv2?;Hr!(I%wvBD)_)TWcnL0h7gkKBP@4-X zp$nK(MJwFH!m%n_hbTQ(k3+D>7;Tuic#qz&%earg)HEJUw3A1Ai_04qRHM5yFW1Qq zXO|+B@+wIZRa*C>%X6PusGJR0F5*$Y+qKLbd8_A6Dl6%3={mEccigpsZrI{;b6kBT zs0hTSoKobK{3 zH;!^#$P1d_U>;%>S{y1zJuv7>oq0Z&5p7 z;xdIT%k9Ck=4go3ls^y;`%Mdf-cM>AIg08yz)XkPA~**2m81uH{5(Xl=f04hcGqw> zBZZJBLj}~2C4?@2?~Bj5B)9k)0-#RePijO!_Reu77g}rRVCtEko;7QHM zXT|(8EkrX91+d#jL+)AJF7m88YhHaT>G5xCftv{ta>fWRHo5R9E3CS zu83;w)!UlmjmZPHbz@K5MavJ}j5$0G>r%3fZKNsmhYVlTY9Oz3DXqPX;Zpi_QfV73 zNGxiVqm?r3&qxJnr*&m#MbxnpjWQ+0$l>24qvQ5UP3<~Y8M82remFI!?}am;#;uN3 z+45L7r{;ol*0P`N>-(%ZkwBDl)MX6%123teY#6xQ&27L|(!7#-DKw)afU3=cQKO297FFn)jhLNNXyypNxndp~EV5ViXk! z3YBq+!toDsQBxAD=KDY50A zKX@@#dvItyHu3RPI(YW>NbZK(H6fb4ENTjdTM2WyBia(;pN(rJGEi_n(HJjsmrH&X zSqpn`$}S8K)~8N6kKAhJZ8kI5YG*&x+$1_^x~6CHgo)Un&0K=Kbe;U&411QGzAss; zVz9;6$CW)O!bFyX3i482HUoaTuGN1{oOnCt=KQEE(tDj6VZ6|B9vaf(WairKKdR5l zh2WLlW0#AXQFT%F>dh!fyj;P{tW)dSrr{{TDhmWGZ`Ws39pnsHB|{KK36pDdISzi0 zCnt@om(Kt+PInsv6f-HIJ1|qtVpT#Z)5Kwrf+BXKq1(u4CdHYX!gM8VoEkZ#o0;+lI%G6g9-QPvf6tdWGO zbvM0RLaD9h9GEm5%FI0|j6;3+R;o8|Qe35!SsA9v|Ka_lej9bdAMWY|!5vlHmPlRB zSD)!Vz$1Ub9f^AziO%oAo!Cg6G=H_{wYOpW<%CP{ka9iL%B>}1o_lx&Sne3)P%>1@ zoLb)SjPmRL8RnPSr_Vrhh}G_U(wqm9jDfg>(Bbs$Ni$w~?R=p)?pc?WvS^x=T*-o` zqdzt30`f0J*s9LA>0{p5!)jcTizwCCoP|9NMsq;Y)Ceh-FeuiN?Ups`IMv%Rc&0hd z5@Y&PAE;XawZig=`g_K^D0u83$-Cx!9OR8y!O3?V2ns0G;8{Vn^xOu~H|%;AOW8zq zdZjEeFXAIa)d?6G-#i$3Yd+WQF>-Os_AmO%M z#&eZr`s{phc7FRA-!EuP^n#v-WyX4qTp#MwIeqDcT-&*BA_}el2?Jpm`fzD{wpo7l`;qChC+qi|#rf8+8K0>itC;PGBh6UO zh1n`@G!b@HF3$rx7OmYxNZ!N+##?mnjK}YZQr-h^&z8e4@PpHzjj(GB@c$Zk0&=3k zK~!eD)RVp0!N&#q5k&5T+LU;5!$mDO3uButdH-F zIvQiwGujd!if%Ekmb!c|JU~5WE1Ey^xTXm4W=m4Z$NI=NoM|_Oy65t4xbGT~({h}p zsr3uV;jEr_rhSh0!NSLX?j}@Og`ixZxZk(N?DNEvSy&O*InhEpEFgi;5lu9?4Eaz0 z)q12%$jvYShce#9h!d1NTZ;t*v0rqlb z+))Xtj03*~qm_KjPX$Hv$U?eH9K-Rx+^KNJ(l^|4&KjtJB&( zqL0uNOWc4?;GKlMupiZ1&IHH8<4_K*{^;p&VB2_^c-{VyIUo{4O41hvs>E)9LO~qW z>V!exRKk1recmc{Z(I?hq~$vNqUDh`N}8TO$<`2B-ew!yDrSYXa{^bWvh z@^e}TXi$H`7n2KA0R1U96gw39k>p0W6mHs=8JGO?Z-DrMilPP-=h$oUrs9@z>1rXo z>HJ*EKC#`{yGZ?~!k`e!7u6U`2sMn~q}GGqPT>($I$yM z?+<4?N7l`i*bFE=mHwJnQ{-~R-lJ}6Q|hJQK9P%btfR_3dlPP6eqH2UdKov@OCv^( zILAV2Ud;Eo)HMJKcuvHQgQfwo6|_pZGcW0J>QGnP9lcv944HhlFpxux-J zL$PgMgZN#^vErSzYD@{J+Yo&kXmEF~kD^o0rgOGZVWw;#$rJTdK@Wgc>fkgibC&Tk zVu)92k5$VzkrBclrm4kwknuNzdmtN)TmvtS@FG*i-^f0y3Z>y>w$mmtQK zZi=#M0fRl2PBzhhO8kn!G6GW`2WnMz=g-FRiEqSBDvZ6ke!yMfSo>a-8s&cJxXaCn z{Gk&&83_p4)%I_l`{KPU_hK&U?Z#%PKCbhBi24eyHrKA{0x4F!EfjYs?(S0D-Cc^i zyA>$zPH=a3cXtV1Tmlq#zMONOcYQw~E6Kg~y=V4ZbIo4vy@}FwQ_5MXZOT|U8L|xQ zPyzwF+rjyB{P>3 zI&Z9O@9y72vhwx9}jH19XO2D5eB7t&T0#h0I zl=9q*wNhE0U+DtNh|p8U?R*-~3+fbg_e3~q+vL-N{2boMRK$bDKoaq-?OyK|gY3LAhZ8_49oMyxseq zCU@UnV4Xuebqoqn8@-n&lmp@k9ljJ(uzF+H4E{WDSf|sKqjivliI9C}4w90Gc)s-;+G<+w@aig0nhs+$Ct7bk`(r0SVo~Vkg0!Kbb z1hajlXbM}>;J_29a1eSUx@AAPlt5{4lcY;^NF}<$mYR=XOx#*hb^ri{gb5cVMHgwIGg=+>h zVW)8!tHfPn`V%+FXEYqJafQ!Cth$>;U~#_*fsuigOHB^Y4k;aPVJ{oin^=NZi zMV8?;=F^Q8o+I|J{zaq+!XWbv=;XM&vDA96T}Ls?} zTrdJpga|MaX@$>~?Ihc{zZ7!eHgP6k!)whFc+SBaBvAFtLwFnmeW>YEWt(jWckCb> zW(SGQX&y@53c2j}{y5BFkCZ7*I3B+Z4c;S3r4x+QRFDa3hY$ReR8jvy(jFapMmS$Y zz-9JcR@Pi8XG__yb|y^}pF>m~j1;`iAoANY&gj$TZ^9 z$6!tPZ@fo{!N#pSU2~;v72P|x;a*N%Ps_TzEW1yrgmeELu@r& z3g!l0Tm$TcmoyHEVLY+GocP&Ozc%uVuDf@pMLe+kzOW`^9Z;^R3ut3 zptKybd&ZCXmm^iR#F;$qMM!$7M3~3(N*-g?UouI<<|6^m0E@}lC?!%=!@?=Bt7_sY zzpUCC*VFqdL#b^($^Y_CTn>#pj8%^{k7W`aJKKO)axAay3=czkK**ESrn-S#jE5Zg z-NHl@Rg=6!IpWRU=cx+guqu;QpK{cDYzJlQyM*^4CfrOgj_YaCilS)1_`n+^65War=J1nQU z&0_KMNvWmMdLIrnwbzMTX=dsAM+8&FM`|w;li)gadN)r(OE*vABd(C3Y|YP5ePa6z z4c%qhDky|R)LtBM5Suqy!#s8Qi5EZhjS=qKciKDQuFb|1!^YfqfMO@!rfKzks|v7e zYSE1-GdOgt>11E;#OZGfW*huM_#*#l#%!}=U4|Mi%WsL<$~#dLw#D2s`9(j)M@8If zcb4NM_Fu#A!b0DEHE~A<+{l*M%())Dmu1ayP~(rVl57m?ih|X`T1a&Dr*Kopw-h2zV5cCbZ;Ys=sp#WN<+7>jRYaULho zNY8_MG9FGbUQTExGtVP?BDx})lCW(*^kT{p1p|5*4w7SVqNFBw82(H?c3G|HyO*au ze?e2g@eD$N1sJIt3?_yuHaGSYYR)XA%2u`D-cVgMD%(96$@5=aktO5d+I|7Hh!aKQ z^|wDFNMsEga1!ih5fndz`Q)z>4J))rClT?FQ<*mK4Sz7+yHxc*mGm?39Rz6Y)PC(J zMbVhlljZdemvys=Z%w}~c|OYqU2NLR$o2R0EGh_pL%J4x&6EeD(IozEO8}Oi5BSp= zBgK_A#-d8cC5E?;*w9LB0+z#EWv{h}%yx)Ki9V)5@bVcwXZ3MjIDmoNmuvlT^Zkmj z(j>___Vm*nkIUmikj!e38RrrHc-bVM zDBx?7k0Av{VIs=b~t=qgybTw*ojHFf~>d-PCl{XbJx-VOm z6i{T^AzZG%(em#!gQXXTsk<=>7pD{FhG@pZ$9<+8({(YknV~(+soyE%QqWGFpoFPP z-W0gh+RJJaK}!mol3JWO@FmCyE=fi35*0n^3|$vi=`PjJ;ZJyi|Gii}=WxPe;Z1I22%n1-%(b3g?cJ}t+N?Wu3zcJi5OraKP-*(Nq-ZfLSWkLkgmw)yeizi4a z`AE=7q($fH_)L4@aY{b%^H37Q;Fp;l$ziC!U%a)JH=nn*J`yi`+pL>j$hN(_oZ1JA zrq$USpmTcnlaCMUz@*GMW_hN+Ek^i|Ccd{6AgUZnBnILW3@NL=iA z=5411^Xclvo-8&;{Cyq>9DH&apdq`OV2hEIP?p6oswk_kv*UM!BuB zKL_*P4~8;V*Kxrh;ZHdmj-TV#aX)hW!umy~j0~NJh1oEn<2?RF6yBEA!ridN(4~39 zAe=T{b)u{yb2v5h9uo6&(czX)l220Z7`F3c;NH~NA=GWl=l<=Y_0aq9`R$M=LFVr| zwd4D&MPCfl#DzAy&-r(} znKq;~nxBrXo+ePe?r)CmG{v7r+Ph3WFO2@6EFv@2cGNE{n{_^>^8Xq|wG|xW8hDB| z&V)_VdpcED$_ixS@MCI}OE}_vZC}`V^fwyu6*6$se|ZfwS|4^Iwtw63^M227?z&cN z@46wl{hDvE_d#t$DSfDXZ=tEmQ9`1s2Y5Q+b!>3n;?|mmqS26w73ft}MVo+)24`W~ zs+QDGa1{x^B0IBVowGWnj@`k*S+il}R7szQ@k<4FF`jJOoH#(Ro5Vm>1$GN*A(%JZ z-!)!{He3W}**0a7qa04G!gxF1CIvZM;Bc56It7|VU?`-m&MHd9bKNL;eMAnDWozrbLT>TiwH-ykj zz> ztCt?K0s-K-dwy@Gp(tp92)YY!BRX4#Uk^yG5_oA}TwSOVkjVl;^9_C^sl_Fk)&lJB z04u7{YJCJ(z=vl~#Iw!!?yQ6;dq@&zj}6wGHzFSg2+HQREnR{hlg_$3lizC8-57~#oa*6V!vC#hjSDeCM~~w zCz(tw(r^|{Zro(lqPQxy%ah~_LSw_b%00KRl$;8;8giQQaf{T~pamuvlk6%*1`Cn% zbZ;HCA%@r?ff>(c}BNQVimNz*o~kc&qm#Djj#Gc`!C>q`brV7!&f zJw4Y(9O3e_w39oTDy03^vP#2i7~CH+4WexIJ@%~P823yRan$@-%zb;nX(Rtx=G1%L z;GJkVx{hD1vUoSD);U6bw|4-1<6fF>rtU{RIg2v&zQdDtUxSLy>)u>2&qju}q6n~E zjILHww`~6T%TII?3BP93AHgm1mW2&IKx)6VRmbf{srhlZq88n9rzyB+O3HFsO+s^B z&n!AzH9qXZc)q~vRQpmL4H^0y5~GHKnc!U53_|DUKS!?kAnsNHQM(*h-pd!$wV)po zL^I3q1{8;KHKwZ{!Z;;M$@G$$qTFb`ek3shC&rr zoCqqomhB9fCJH5Guc$bkDMV$u!@Lu7G5a4>twB<#Z{JjgGK&p&Vo zLwkzBmEg(Xm<4BVw&36%!!GjlS=C!#MgKEPzYx5N+F-za!647#s*R_*4+(Mu?7ZQ~VlnQb zbAVOWpQnii&s1~vF*a@&_h{40bFFTvpU${t=J#D>%7+=wv)XQ&RrOw55zTZc^w)$n z=!8tMUyeO7V;q1$A%MxwO`i*ssjl7>*Ijq-o$g2~N9y=Bx67rAMo=^8*4fLGn=XGN zeu2l7_KpIY+fo=B5=vmO9kF%H>#fl$z_@azqMn|Wr{ts;GR|3~RuQ&DU54LTmi5YX zYUBh^nyu>_7_|rqPdHoygP5_Q)zy_6f6}R2KJI39*74n232VYil8a`FBNH5Lua|xZ z==B@*C(I?xfi-pc*)BkacH{=AFlRlwkvlD1YC(AfKy4Y8Ja>l*coF*a8pHR5xKGhe~O}|nq&(|!`>D*!kfdmsXZ6nR^YZ%SarfdcBXqS(P?EW^%x4Kka z%OG)(WIU8z-bFsaMLjK8d5s?EGJ;9O$j7H!xoG*>4 zFcE%T%aD9ehj>zQa>mK@D+c`aTq8xQtb3{SOMW7j*g>7EA1B-;=f~ZCz9=87*D%8R zzsh&7$jTq}BO>v1EB@I1i#0q^Jo7abWf{aENI?;k=_dlr}z-{Vbl)tn$0+RNBHM(u5 zHZf0W&Qi@iGWMr6q7Ac9(tnxeCUfgQCp0`|ZB6_|lgFw9qS>#^rmbUHT=S1{X;@Uo zVn+q;WO_e)j2Y-dZ-&c!U_s!NIO<=T(lN72ng*56CA&#>{Z+RBM5MkUyuRYxTE_XJ zUu|jYMDzRi>#Pvn&+H+RKOK~kYr2gNG%Zwnw?=saf$_TS16v2iY&%OC2=l%_r?{)1MR<=KUi{5cY-3MOH2OP<2lt8adWv_^<~4~3 zU`x~Ca5ahcFuC=TF19-1_O{2Mh7}P5Tb6~n3O&zByb&VZ--_+|gj z>~JmPxug2Lp;rc7CVgOkp=|PN){qWdGMSA_Ms5)w+T8*2`GV@&F|&jn+7mSQqBe(t z7I%{7h8k0ex*6Py|Lti>RPnP2bU>%S|5w{muN_afE1c!sKTkr5xA(e#6zNf^32>QK zZSh!ftmnxdusPXAEZfb^Juv8kj<7U2GUP^6ZmhWHbvdy%3*M6P)*;@^(+M9I-D z;g0NUm2J?x86xg4_>M1^y10W0#2eem@};(0g1M#ibcCA-0w%|Dd%aJgff4)jPrbVh z0xl00G3imGS8lPV*0$Z%1>TsaYjIs|J-QvKw=0PciQ~E*6%WUBf<0}c++%HH?DL-G z*5df*n2wq}?id^&Ot_oGWFVpqvg3*7FN2cZ{4k2>bVGK2y~^h5_aR`9TeHyiRZ(WT z5oJdM832I>1244H$hMjcJ3mt0Wc#{qv*;P2CY*lD^;>qQ9XP1Wbu*f}zB z7V|t~O`@`|WNgv6{y+h5XX+?nIZp*$sR)@;kY!ZSRAuWz+mqbgh0R@kHkS!15fR}` z+y<|gxZPgj)T7T?QpD?v?UUtR-#Fm~&=JV=J;gWsy19KF0qXUbumH4QXp{(Z$BM)~DD;vGsFGH>ICQX6$BOd+(Nx zys)HUsZ+(1?P>gbDv=2{b zmezcdj{ZT=uKYVG8cNffpV;#yBF)0Zh+R>L->vxj$*gd#ClDlgxMh>67*t+fZoD8- zK)YDhpC*i4ea_$j|7k$n?zg~?q~k!`7@Ep}=OkB`TqPBrQjqOgbH--0_;)ex1Z14V z;IR3Mlr(RR4cFOzH>_9eC*vED8}u?599K!(~f7~dqDBU{1%CYf-=Ms z4UJ+iHpz%L+2yad(iOMML(xG%__s8g(Q16>#HiLVSl&x95*ln8tSaiQjn~cbF}tJ$ zS3*gMRg7}-HVUG4oVuenPv{`pfxw0cVa}7baQ5c_fyV>^3tBq5H2lkNo`~;dcNrQf zCKEmz=n*!Gf1XC|FK z|VKklf@_w5?a93VpQor$eBtW4(je;-4a*!IZ1((b|c`N^t+{$0|Q;znod7Kvd5 z>Fff%C4RoB-YZ1UcGlSyBl>m3ReAuR^~=(7_K%CSy_k(@ zCYKS3*fVhGPq?pSS1zw#3|F;ztJhSG<|?)-QP?ij)NxTMf1ymy(K6V}NI=lT8Vb7m zZ?VPp-`d>#+kmCN0dfu)MXMI^#BsIWlhxkYW!K*>P9>0Wv7xnm-P-*;v9`x2gow4n z{9OuK?vnq=9ZUqQPchYGnkLA{Ts^=+VAeHOW%p+z7H5I87sbGk>pLQ-$3k2j~eSQ-4F- zpn|06qnzH)0&}?Qj(LOApkW~+@PDIOjDHs6TGO)P8aIMQe66!wa)49#rSq1s>o+Jc zvLteJ&V226MLEdwmK%Y5*vB(GE1I((wK9@-^%JsrShM3tUrxg0f}WE3U|xtJVahAo zs2Z4!;dC)e$O7W;u&M_$om#Nh$OFKC%6lGO;#>1%s<4eT-b7BaYotrcutrSjY|)24 zen^A^7!KIDFT5uF2Us8or>rA>Tlw;Z4*XqBB>N*&5kgf=ObiEiR`i7`flNV6V>ACw zB&~N-_e=RS^aML$Lc;X+8n_KgQgxw7N;}OT>F14OoJ_zyHXvFbSDn!$K9NPbv9n~1 zx9i-h;$_Q3=8e&Tbpf2Iw3LQMF>9S5>`& z`SQ7^&*4u&*>W+DsTX`^&;0t`nx$$2->Jd(@I$_gUF-ClU00Zz(0(PzI2iwo$>BZQ zxwC(zY#w!Ht8{GDe}B^GS5EC)szuTmQwGFbAs)W@Wshk-r{Z@lb>26(PkcS4eAc_F zL)lv%tRHC9ndb5QY0?qFiu8EFIz42uGJW}o^b~usK^(CyZsO4p^H%Rk=fgYmXEqkB zb23%@!A0IKa6;Y(lgAY~zyJ@7bMv>K4*(zEL?HCm9-w@J6HPr%79o`dF2aVsFEq%- zd~hR!aV?WZBFd7-X92JiEUewGdKR`R(SX{*g!$p1f@Wa(C#i|J`UE?kt=C+IkDw2z zY{&q~Q>URkfBXO9{;%YoaiG?i8p)m|Mm+IfNj#w%J6T335$H_r(Pq?G=6^@;%n|%; z{W(pav;P5#1x|VdBrW%294N{ni3@wmkZ+fh+)uS2=rnRpy6rSqTd9RfuBm~kOC6&) zz9rib^6TGLTJsrxg&%~O4mIVf$aAomEmxN*+-ORcr5zK3hXghN96RLnXrOc33G<`|8Ay@-6Up8|>zb>n=e3<_pP4=gNt>*~V*-~Ra=Q)qM0a?Y6 zZn>{-6qKz4O8O&cP`oyvMzlg%b4t&}>5%It{E7NlCnY7ttEU5j1J>UDShZi+pB*9( zJE6lB)~=bv4(Pf#hVWdHT#Hp3U=J*@EbDLWUNk*l7~X`Yd>bFJjsIcH-t^G=?eQqc z9Le&k5h(Lj>RtYg-q=Ck-W~UO@#;RybY~aCEU{0ySl!(4W6h`V8B{*2F9C7oB=}Yf zx~!F|qn*O!2c8_W@BZ>@0%B_YdZxGvgyPGJKmw2DDjv^Lo)apk^8B5X5`nnBH0*8! zP+5$`0X;zdPRL1KC}jLdjIlBi8&e;(D5Ru ziZ36hN3Ut3#%659i5W$C2%{o1OXYU|W$D~BTg8m)73Xj?oUsbFI>8LKz1)bKvOMN& zTDNZ2_@El}bU&kgq^m-*4M_|7AI!pce1|rZjawmY)$L0XQe-*O5$pDsn&6HC-F+nf zg!UxbpsotD@>{eg9_ff>L2bs&J#s4lRRU?DtM0`j#!5cP^aA=cWu`P+iy;3DN|^As z%dp}?T}fqInAYYO$-rbG8`Z2Wd2Xu}^nbsxN18}szkNxli5SFKW)r?=VuOhgpxjv~ zD!7JmS{N%R2q25rl56-uE4@GG=nTUalmkw&Nw<6++Wr$81x7O+zp4lZGwM_DV;T9(5gATa4x@agQfOUpo*t|Ygj3_U%S#93I z%t;_*-yxa$1#j{GW6hiypQP{xug|_#$ONKE-Fn1kLhdMx7Q7>88tifdjb)Te-Cy?6 ztChVtlx;RchE@-Yn)2H_!NNH%H5H!bzh&$1#qV&MwOSTUdv_VM)AyyXx|Xqubi>K(!uwMzfkC^nWu%|9&)HfP%?y5u zD(yd>dVhJnsd3_%glJ_Rg8ICJHolMzT~9=SuJ4Y2JL!b{k7>F833wu4;=c94Q-u{z z(L535ulmz~W5g3Qx0G9^BPMXos3Xi$^4SP9-ahq;G~ZAjkU8M)%X`C%g@rUoUZ_SpaPe zf#v>a|EtbRVR!@Ly$@}{1Tg!qi8hB*xlJx+a!eYY-!@9wV}OVrasVW~=ZPaRbL}5I zJLX67v!_S83%!hHZL8mtp-g$Wr?a2yr?is=!f}*KOfP6HSC#3!Kzc&3fM5v7ngHYO z%1)YI%TQ@g7P$46$_TwR>%eZ)S%kCY-Q+A z!m#s?t6f+D{`~om5}N|Xq#l%0ps;JCwtxOCOrt2HJ6o{i9&QLNCg!hLEgLv(;E~F3 ziM`A>38VEm_FS0J6}+3#C)A|>R*^cgj0~g{CAaX`U8`66R)t?xct^ED@21%u#Bsq+ zmb&JyV#{c~|1>h_Y~{&anFmxwypjhKyZ=#w37`{!tm%+wjFZFB!xijF7%V8Cz(@N%&R+9fw!(Ut4TxQ{GXw+g*4TZhxeZ_jEKN_ zHYQQ~fY2=SS8kwge#E0GlUML6Lclb*$Z_ENqN zDb^osHJ#{H)swAc)|o!EgQADG-(9}P)B*KjXw*anjW-MeqoPra@H4riGmhlKpj231 zyd!b8-?=R9w~nnLy4Zh|FaYa^AW0fH0}L{~i71&*$`r9VoaivtF2-?3J(dTM86*wJ z0<-RZYsRj}tT9PPbemyreqUSVdaH0cZJ{0CB46@DXgrfdSzxjy(!Bb`u5m*Ml?#zj zP1NYeGlR^$qvN?Z?9|Ag^^%1kI`uzIxl&M&q)t>2obu`D3wOBI{R*tqWu4rduLHIX z0#Pt;fPd2nqvhkw`Qk4v!HeAG8DQR%enaS52{;WHS|Q?3>kotW-mq~~YB`_PH(Z9S z&M8~ty9f!Ng0YrX7hr1qs~5!S7r~7_Ei`8pS*&$fV!@tD*Ww|dYUY<-R)5h%b4&>k z5g;?3MN+n@v_9S+82#U)D2Y(sPr8T$qe6=`rpt*4dvvM|E-UV7qUq%xWan7m6@v)O zfb756Ekh_01mvMptsdpeo+t$(E8j%@4^=U^eToWOrp?lqJkbXI)Q=*vf^j)TG+V?q zTSD~AUyIT1alvYq8{L_p`3EJ1(vS?v#OO(daGz&_0E?m+pPTOD*XON-DVrzy*&NTHc6Hv7HsX6mkPYu6|`=4kIV^n z)3S};jZcQ_7xJqw&!Vq)zLTBaMGbS6cdv(2hA@Pj@k8$fuf3YvZ&&xw2KeXf?oJWD z?GU0YTQ;IJBE3Du!8CB9nLj9JmuEO;AQK4}j~=lX3=}Y_OxNgx8m^jpsF0P- z=9Sa4VT^*n8gg9b5u2w^JQR6!mI$4fz95}nJP&OK2=W;?sMD{&^0I!hTg~Y_RE2u# z$Fz9{?wmp2R8yA!D$e{zPt%0!L|Y*ljGob}>cYN?{G_|J&u0vHZvyBh-Q?0+UrU>W zcm*wxJpFiVTpDy^_04Qc_4@Ql@0U6l6rq=SeG2Sl{@~Y7;mHFnX@{c{@*{VoMB^WZ zZ3|1{(&K0YWN>wKL%yVHNuPPmRACZgExKV zZvQ-|e&4giIeW=q4<9F68*oaVN0~ z@MfC%K1*pCo@__ygv)0&2|%rEsW)!U)^%U7p4-Y3`Cp&>JvljH3-fyq`D9y5n>AXj zVaEt!vsb zvBcwt&+v6_OcvYX&c>vCBclVpnGu9wd^P9zUa5QyZQ0Yi-5A_cbv!l394?BlK>k+~`DY5w{t6Z31j{w~Czo`tCo5R+mX|vmXJ)PRE((aa3JV}xc$6X6 zO`2Xmd_wp9L@w*xMC0ms!h_(8!Px+0PZqGTTVy^y#84A~|5JF`2vBS2Yp4i*@zK_YXDQ_%mh{+R_y z$TCTYAcc|S{WfH3rJ$`Pz3Qh_WL7$xh`RfoD}c=`F} z8|d;?SL@F?JBc84KB#bq^{pj~;$+deVpEEe+r*#}^B5^ui`nLnauwMF-Uc^&xBkAQ zdm3wuQ+3q9{)^K;^6Cich`>)x^SNS4n(AIvf*`GYT@2YiwU6VuSFYWwN+=q=k zYY>=fVlJHD^|jC-d1ow}%5E*ejGcqxCJ40l27JQgdYT)NwSJMW8JqF~+)a4MQf#T! z%_Vot5XwOe)0E+M`DsHk3&(f7Ooy4`Q&GOqM=8P(8QLsXEvv%6NresePtGSV_9TJM zouSntD{^ZeZ&Xz%i{*CcS1PRQzs>js<3Mi8yrMN`wn0vP{x@p4s{zRNbwq8(DHMZ_ zVP|c2kv1P&q3&Zlqx^@;3lM^8<4N^|8g&T65HJxq}x^s_FNT3ohk7Qx`30dj$#x{Q8p%$#|R061t9wh zaq%DuHL*y}MXpbH{(}Mw)$(zm^s)H6g@?_vnS}}Mr*ZZ)Xm!}%o(?~A=PV|iE&K0N zS09IzuJ3N&AGYW>pH8K;I@|1Qd2Rpu|M((;UMf^H>*tt$De^_NYO|cmcCJ&i2fBIX z&)n?{=tc8ehIbs`aN@3?O3~}+Q<_k#s~ZN7rS(sS5K9Qrut6&}p%a{4=IR((TPkt9 z1nWXFZ|Z%jiD0yY<+1wX13R#Dx(mn*zIg`AJ*A%g%QcD_$y}4boU7L{(sa_Td{=w!^H%4}QVYIG4*~US=VhGf9N~1` zBpTnT6p&`RYv{URiwYQmBA;#J{vrOzu@I1YMhEW2^2@K{2xIL`<1NEKiTLK zsO-rzMY&RK{e|j+q2OUqKOf13gSTis30=sZTo%swPbYAi{y&eK7VDwnB{z|TeIfrs zX*yLyDmgKwf(B8D3B%K%RD)JvfBL*ZIuMI+`DD2-Mx`l?2ln$Hi>;)b9hXdTa_FvK zU5nbwUCbNZ!uP(zu-bcskpgSK{iips4p@>U3?J(|=yFm}IwtCo{u7v402P!s z#8Tgu^=4fkf830hygEwy$8Ygb!0-E`)tSQo%qVHn1u??jOv+efm}$=c^7KE+;8F+_ z__F-7Ff}{7YF7l`)GHG#LJL{7if755QMzciAbM zvfL}?RFt|QEuMqr3QDJeksWJ}%B*+JFFam=l#vp>1TLS$*R!cWUs-b5n=zTt+vmiz z{4>I$NVYOr48*4S6FVOPI^SpY>z>40B3mu~WkR&mfF3>3?J)>oEZ+ zGl%GIUH#aMn^!5%|9YZ%x+t?%6TK0^fwTOA0L?21%pG{VRs3`+23j<0Pm2EQk##5^ z*uyH=Ec;`|6i9rv^d)M4~ADR zEHJj26c2iq(y$eig0MPAMI-ytoznNGNX!B7M8q}yVHAOGA}|(rLtbZGsegRRx&0?A zj4yi~6<`-MH(YCVPD(O_$e}mI$UmOAN5dka{a?-TV?lv!5x6kkq?os@D)faZy`CvF z2o~Itp_N40Bm->L+e=K9;tl@M>H*N?&?qXg5UZmxk6HWQk1Dta zcqd(T*@pjkUoZg;%Cw>J>vRdA36zxvzIorP`Z^mHZD|X;+BSjxf3~abYi~WNeVzmI z&yk|NpGkAZdY0%_vwS2N#WCRxJ|lw0Vxk0lfC{|_tNMFh~o#V4n`K>0~)Z@Gjl-Ga;XtDO7U_y>fkh0Kg)x($ zL5T?oQR8`As?NRtgI5mvjCJ4)bXLemJ=UVECc`Rv2DF&7J?n+>8&3)U#akfqj^<;h zAJCTHonzAHki+TaQg9cGqx&7r{4m-v(|E6$y0oagRk4CO4CvoSU^%Cqu{c`V-UB~d1?-ELKutt&woOUEn)u9E z1WPL!;99nk>bKt_QoJ{5-Kg2EvhUr0X>RAg3~Z|`p<^Pyg>C(uEG0)4|xu+?3P#O;$}VtF#8#b$;lv zv4yhAzeNS%@Hj7fn3dvT@FOofj2>4joWLHhvf(Ak9HagiT(qGeHM6zcPlS&CX!ZtC z`#p7rug2I9Ol5xC1eW5a*p0>zD*^x-4i{>TuKqns%!Whn2SW~s@ow_fy7+1TMMZ)r z)&_Gt3+4x1Xp`#h(eu%?f)Bgf3|4L711LM5dZ617KA`Dze_OM*^b~bN4|tT_XA#PP zc{Z+3`)x#zl{kCh$o3-7-? zb{&fq6_alZuoSsBp(SJ@%~3UUXANVF&5{H6yyl(kT{ zW%y3&9wd!7FWs?oU%}~xOku_5x{nlah8wiSsZ&uj^r{%c85`J|#e$ydA!vT_QDyjo zs7=`j^J_8ik?M(nn%Evw*XyL>|4Ik|4Gi(`IU#+@0zjM5q?=G;x*#*$ zWDe2*DLh#n%=plWN8^>!(a|BZNIZS_RfO|plruRAd~W{*2P&pGx%76ESREbD3v zc9b#Zs{%4^UB72tH z%{DVH)z))#p!&+fNkM#tDeyN%sO*B=e*0rdUd^@}=}8|(nqX7p_$O=0@pM8n34(6x zc(GJCl^S2#W#=l75w*H#X|EsTwRtKAn@oC!MBoErp+*r`jDByszS2KmByh!A1bV5f z`tsQ&^B@czll@U4m7!o7l0{m!DrKVhacS^d$TI2^%gQ(RR8eI9voFLml*U{gQNAjHwX*S_7X2%~65p`YAQT>7TySlbnVNXf@#F(8-11RzYs5*vLn&Y3g~Em# zGdsJQ$HQeVr~7+Xx%g#9v>>AMC2Ij(hVv-uhUsRQ0MBoKD(12z?&+Mlo<7SuW`9x# z1H&BwO;<=@?*GIXJ+y$W*XCmdD~DT|M!i;1?R&8#sz8gs^fOmkN|tZ-6|aEsbEtsscXNw^C7I&yr&!-l^|#ZzN7_uR%(-y3eMT;R-C9lcwPD#( zS2;+djcknTO(vg;gS3+ym#ANrTDhD99$F9-yHTk82wb;=Ag5aCuUPLy_w&p8A`3C6 zvnxZl_S-h3Ee>xYrcnJ6$YTiXlBH)N-l0s1L$`I}R&R}Cp+jd6t~My?N9ysu)Mn4MTA&3g>T%^% z<~tQLqkltNEcM2H<$8kh{IuAM7c3|U|35Ke1ipf8|MW6;hW=PusojK@XZlcajY%LO z@v|c%T8(!F$M;Sv8|?Anj~r+*afyYK&)gO11mmig z-|O|6qObhNNoM{NpU^l5g+~N+s{PJqO1JWgT;Kx{J9L07gj&hG?K3Bge@-qoji@uf z8&!YEnJmW8cRWwcwJdv2=$7J0Nk86c?v{F)xqx(aFQfpwm#?G0kF-^^qvn4UI;H6> z53x|pCL|sqj4WX=WKy$x)m573=Umi=c6?3IUW(~dTVy6i@9vZ`>!FVtk_Vju3rYFa z(2ktyL<^jD!qbGJd7|)oVUlZjQ|G9A( zGXGdT?qu5_POA+YLhdM&$88HC*U-|;1~)KJq$bEvHa2jZKi#z1X}y*epLKA@vD*LX zCI&I22G6yt?KaPbYR02`O|tRj4Tcqjc=V<|doD=n z$xe^#nOXl-KP@e-lr=-PI0s*ZGL`!QZQQUUckdK_RTbdLR*`*-cx|GZrXO{jk#R;) z(ZG|B;F`KWT|W}bncb$4Hg<0Dj0)wWN2mjduW|m3@;tP=5f4k`=kULA%#=<^*E?E)O`s=6c z*7lYeI+2_TLOK{xRtf8*@xTGR4p|DSnn|z%1N}3T}mHG&p%LOzpF zv@pRL6exUw>nq4vP)(5WY42jWI-zd6E0{Esa`%TKW%<|%6&|fQO{r6wzUHKAq-0M- zQ$8!>P+62zOxe^um*^|4$JMoMmfF*JA=`C~MSv5#U=ghC@*$*s&1aB2&`TG?`XV9N zXP-nrRn+zUbsUTQTSfY|U%5%>TW8IxN#5`5IA&0XU!=$aSSl}d0EK@<%}#_us6RZK zX*71T3^j~JdO%|%vh;t0J%RcEs3l`Vl%>!)4ihI{0bkBP857Y4&5^AIMZwG zQTQ&Ap^a+^%eJ!3MDO(lYU!>;kdZ~ipnk*l`JCH+$qO^6kt(R&SK%)B3c07rT(hNu3XvSt;6K`7HyrFrdL&(n0uHcd`2k7M0>S@3**B3F?~J zL3JJ|TkB3>5;qIXeiVi>E5m&+jNa?t+6ye2H997(oi|4MAGnz18{ou$Gf>{UmDTgx zK+!H{X#Xd6*YAcyh&5GBYk=f&mi{?BdP_;bF^Qg<*FSidaO4+8Fr@Wyt#bJ;*{{tm zulqsPq6F5C1N&fhtsE+0(iy1VJyBOWYw!(DS(E*n-u$b4Fj(P=^EVqt9?k-dNVq|A zHLbHVksWr8zhz)h|Luu$b)0^_*^36CcFOO(Ws2hw!b~MRWL?vdanl$LExCg@tPHz95+3LgfyXWxBx5qu{jmkm$eW7o(a5QBo&jD~9>|&W&wz2uUMRUocf8u6( zbaS4{izHKc@>K$Qg}2G51cHIIUt}ojEU@p6yE=JqcUd+}KAq^ovN@*H6Ow~%MkqY2 z8wpm%1`KTH@DV?LWq-q+5w&SqGL9nIsd}butFRy{wXX{=M1;r%pT|WYi!cO`3H-C} zoLu~)N7A}RvZ}3E&GbuNujma2SKSPN;Hz(>BBrf6#KP_^pS>P@1_!_yworGSaG%hK z(#oXlu#cQ@%FAXF+Sw6vf=_4|^7vSofSwc`kFlLwxS&inB)Isu==ul28&-$36!YXA ze4|~Ud6WhGSe@a~S_Y*Y;F&^Gu6qRlBT1Ko&FB72zVq5R?r*-f^`xt=V-Z0MAG zy-)%g;VB|zFw|41DW7r1A%A&)Sz1JVL&9!lR)_^U1U|XDyIX;E+gu8HF5DkL zJsgC>q(VV3T2JVNY6?M|*4EUB!@A3@c7KH?@3$`<5D}!W=GmcE3SRO0?va z$=U@A7CN)$Mhs~**8@37^6O7ZVaW!>4333t1(1qW%OoH0s; z$oE{ML(-d7g|7u44}zw>Y|Kfn(T-&HvvIO}Zl`Wl$oiSl12c$d{%dQ0-jW6%k>XdtKDE-ruzRgaYtQ)r>t49Ql#xoNE^R z2`>3G%ne!y))8RGQ5x#d{+e?iYq0It;XvSDRPCC`!7s_G7KN*lV7^=*#o-+V1H@Jw zWUxyr>f3P|GLfnRFsgVwg(XbgNp-f7~Qmt-cUw`jE+~}XQZRvoZ+&9)g!kpc|Tyv`I zyFN|%2Yor=>{eZ@!d0u5Xs+;!&|e#7mv8kA--xdLtpsq^%J*l4$1p{y>TybU^jA6> ze0=-C{8r&ZzxE+j5oFezg(+a)#*u~kPna#q-^L5K<#w+C2quUr;wrWQyIEMq8*2A( zYx!Z0n1CpUP4#eAV6OX@wF=4SQ)Aw*M+z<*@mMk?kPk8-btQAe6YM8;Dy#$p+6OU& zQ7u@DA;4Xl8wYI7>(GZYZVlH7Ew^2{>6xA287m-)Q&KyLMA+dbCju80W{k|sjFAsE z7wfOK@v-cmkT4sV7vcw8D4m~*`2Q;9G$kO0nV%TKPnhj$(Mkf4=Tr`xZoyXA3W&jR z6Os^i(@E`2Y3KmkDoo|mIIme-P-ZyM>ZmwhNXFb}_OP27FU}G#Kyci?I0OneB>>7! znY^|zyy4w4`u;Jte2@gk1e}6(;uV(2U#^U)|lK9T$>dOajHV&C{y#y4~AVZv!m-hm^ z8lq>`OgSTZ0&n4`<)fW>5%`+L>Pa`_j7s`Rx4+AEop>C&dm_TB!-X2h9T2(+M#L#vz=}Tdg__f=tTztW_972BjrIZmFD4Rh+W zPo?9%%g*vjzF#MUsgDmy;&Dx=N+12@{D!?_38VNzQd$?TM0q5UXVJ+LV| zO`S>N8zW!Ki4)yS$C@lsL&uUsn}v13PLA4rX+Mw>PvK59JB71eN}T-Vp;%~MwMwU* z9kKs69g={wjaJE};2ragWCO%FLS#$mMzAXXR(f@a*mYf}q6ix<;Q{8#3`4+WmYW`) z_wXra{!J7xZyWZpDD#pLR*AIB+? z8yHM^u+NE-hTSv+mIFvdY2IKY(AUAzJC^!pKiU2lR-U7QJ4p^Nl+lbbq?D+XLer?d z!|Q>XZs80f@{-?hIqw7l;>Ww)Jfk13 zz8)>1in<^~ex=B}wq0|!djsuwh%s{NI8Lbf-vAkDxm(e-2B)399R6d~GE$fSi*x0V-2VXS^g zvr5VLl>!wuS)`=j_9c+t7r4%bIT(vpAnfCM#$)ZD&3?NU6W>QgPAQUx>%^_sQ9_}E z)=fD3dsuHtMi4(pL`FQt|K zifMK7ZWAz~O@}y+6Mx=6l0d`eT;k_RKQ(R_d>Irm~uvc@Zsnu|)%Ga>7)nq`JsKHlZCuY=zUVQiuTyrdWcL zD%;Bd2i}GTHu_q~$Kdhmryr#-%VQjT9BihgpQR-o`K++t=7f`u8PQ_Znt#kpZ$1dJ zF1?vuS?*mPAJdFDbIdvGusQd=H_|CwpdAv)-pwCuEpBO>v{VuJ+IgG%sNV%YV#pku zB+uYVO0S@rig9hOJr<<2So&Pv^}95-e`Z4S$Ofuv0%O8z;pFztt|WEu-p?^(U!O9R zw|<8~uG9BH#ptt*}7t7P&Nl~p*jfrx| zDmG>2D1Lw`f?!~Sr4KU?=lxrE{X1gE0zW-_S3Bu}ARO;u&T93W%490~#8s+B)fuXP zxz`J7R!MrE4AyGO@}h8QZPH42nm$V z*3p*0FhE7Tv4>;(#B^8eFA`El0%ZMWhqZdbO+3Uu0#*&)*tmhs^ctXlpav`>m6J67 zsblybPy_hhaA1*pgb|1^^)(!bKtj`inFkGzk?-=>J`7#4TSJyFOedFZL9r@**qH=V z3sWq44p-pj!UmcVc4{n7eE_3Cg(C4tnwz#&qI^c3pd4CK**{+VbEN2@v1UL>=gKl2 zmOoT%sO zZOy_lDXuX#nwuVxXhx90^*4;YGqp0NcA_};`q(F#>?J1~Wm3Pw!%wAOe1Z}llI4oO z100MaFnSq0;Rg;BU59|AOS{VFA!QaJOgntobx$$~MLroQDM_`o?TTW}R+IfavZ2?6 zFC!CY6hqIK=Nuv*IwxGdlTH2?A`HPB+(d9zyZB{>=0+{CD&=Zy!x^0Be7$CpP4j7X zfe_sFG&0*0cB;5N?hh=wvhYu?^B|BQ-Y;5>HWqg)Lrn_VsyHON{GTvI-4Um>@E5pR zwzg=_>yk9E#Et<#1nB(b{|Nx|Sao_09qeV6q1-D>@JCQ2n2F=;`zx|);3@Q;@WPs8 zp8jm<(3fJ#O8_$lK{tjv6{VGUX|tIPj-D7E;#6=Utps)e z97rex7(_^Yub8jM;UM6Qa4i%ILW9yX9Iz+%lfnUvWB9yb{@?t8{F6V$@&x?xPX7%EkP1MkD&ef$cJV8W0za#gkc5OGhC)Q&@x|EN zyY1)AEvq{X?$=(~`)m(P`@_tWhw<67&LzjoG(pIqZk zHFPm6vGO>z^qlz*u%D`n_|p?YV{41TU;nTWrcKN4M{0LBRfqV4WQBQX1HK{SA1e;z zaJ20@H1a?X5;9=;qh46CfAl|Hx9aa>$UYhX2QVBcwe>iyJR~R>qFt!-=K1lX=OY5 zXy5C{@|Pfk&wPDX_|FPmS^e!?yhf8gDA}LT<sTojWP`4!3ZvTIh8NDGux12V|YNv+e9 zqT13eyS+Q1J4zC(4ozRQIr*AG6QMbtDfV9y!Gkrqq`l0tuXUT9BW@=^ogfnZx5C{F zK4AdZ4p6s~oVR`B$5JCXe1ApWwvP}j&UpE7U7k*lRS=v>^&AZ0|3b^QAo!$%llLrH z>H+0Aunb5Xmm-2A7o6`ecK3eugr!&1@^(M^A~hIfUvoFe?J1=)@{XR7Zgd#B;jJ?fO4sV+jTzT3S#p^&>H-|*HUCg*Z1^QzXOQ|Fm}I`OFm&&f%J-_Z~V#a5h6N^KDB+Au5U|LKHKdzLolP zr6g1JCZ%UKoLBn9xXUU7sj#9E&GubeTM8!h45vy$j6dCv7~jmIxEYit_tRP9o(Ok$ za?FP9Lxun4gmxwA6ziWWH8z$qgRb@QD^H*NvEW*8*LZJhs31G2WP20_`F)wC*qOh? zF=ng?^8?vrYOzzw2bqO6+zY3UpJ|heQngFPNv&t64_JhLI%X-l7!*_G=8SiLpXhiJ zat6Ypa4;Rlan+qppJrDTx;`BNUP!Klt`OS3{U?+Ak3|C>LBM9oOPEw_-NrU=6-Z2#fi)Z6n?J&I{FkG_5JK@OYxXI>XEn z^qPJ+1kW2prqjQFit6TE2QM?jPT0I?KkGaeeoR$Sfe{~!p;DgrQi2wCF6DgMCMez3Ikb1Ey}kN{Uj;_G3r|Ki z>SY-k(RRN)_7b_DJZy)-fhHs=GYvYHn;A>c;DO!QcH@OuupcY@U2?WT2A<5fkO4(P>?8tp;T zBMyDes91}#4P*Ga_F3<+Jh=f8w80R(lKP4`vbsSGp>R-Zp^b@D&_XqE-a6kx;4Fr` zui2~m+k2T%5=r8o&zv`VndNTV?`|8qpnak`bSYg?1BnkRLHGia6|6mp-PgwlN|U+y zC8cwGr2j{ogoRWk4LJm&6Q`(u;$~X>&9!1Pa=j6L{V#AREEyy^ek2-kLsYQLmHEUa zP;BkRI6?s}L?9_thC&wi3BK0pGu6**4<>4lYcF5_77bTCa0%I?#=fu}t2%NBO+xpK z?35JHI_VJy_E|xZ!WZn}0k%LeJS+5=jP}o)-rFyH!5=b@kVleu_Nzy{J@OPxhSGb! z=|N#u#1=|h_x(6u#I*HWz!{5{&qssJH02Qdf!kz# zBgWw}Y|g}EmziVq!GX%30qf2eFh9%?W}Tw~#x?taKwuN~&Nssk-936!%N^=I=c#A! zcwi2%TF@IHyz10Ti?m3#X%J?*6+`+i;EP*T2LC>t!~3@%8`MfU96yXV2&q&j62-af zSOxxhkj!5JOfELlAi4Tf>V2X7B>MOZH)+C!kz(XaHC}nMV3I|dQ%Xi0(20)B^==7j66I0CC z9!5v@^Ygm|1~eJMG~QTZSJ!0$4#eNVyy6#B-l)$=T$AM-o`vtz^#0xhe`5tA7HHbE z&P|=h0*WyTgaeBh?PyXF+?(R5QS8kbs#$7DaiB4Q6>i?+0Zq-9ldmVu{lV&r)|-S$ z7G&0HqVaFdk>NVqZz@kM45Y|>c|!OJl?tGIK1XF}9%iFK|A0)9w=mUS2D>D5=nw?+ z)>M>SzYBGO-nPU)K&IW*ucv1Ro(^T@R2A#4M3YC=gBEa;VvgA4-yrfG9K5MfQ@i1? zmVyjoq>v!~G2&#Mco;-eSfi+<#FDQ<4oe4>7~fShST$)))aNz!cGLd(o^5fDB0uy9 zk5dkf*Fw=S`pF4z7wIV>Zx31VI1|k;SlHV2X!Ls6dD))Vt}GwU%-)UWbgq5B>lB0F zsE5c#E9J3k%4SQncC$=Vq3CC`dqpv1=f&8~e{(iHRvV$J3heX(K0b(7Jx3^f$_65N zf~RHW8Of?H2$`cg_u`nOME9W|4_!T03Cimsc6%WXc>V@sko)ND!<%GTZpcHc-sQ5w zR^_B4jtbNNcmYr@Xi;qG_kRzn*A^OEYi+OnW^E>_V^P)J*H|_~M0CE6{4+#mA|D6N z&eHWo4$U8vRx3c;{Y!cL$ zVD+F)iH?Wmhmx`!2DlXddhnN3tdxZInUJY3wVCN{rkr#ewikOf2F?-y@hKNG+g{j^KO#uBH@j?l_z@gi=+ z`)6_(d#PESE=7dM%gJ4D^>FagS^A;2#upBu(d{SyTDBI7 z{$Y7{|7ps!m+_HSb_c&SHGp3-YUdx@>|f&}?^B*_%kZft!e5|eck(6EykI=DR5od$ zlnVFJ03UWpQc=hoo9nyg1FkNnI8=ZFB^Cz)?SiTG!vhjL{VckGouP8fbQ^kqyK?Z6 zz!y3{r39<-d~?Jkz`f8jv*_H)jf>2nc-b(4oz{I&jnN{(I zN1k(Jz^6QEq}g@yLSNFYHS+w`G6`FGW$+Kd{0{cy+xqbV+39lmV>XQ}_Qlb4b15d? z{f}K73znR}H#&TH9-@Ib4FX6q2#mysXwiS(zWMXD#u#PJUPu94A$X_KW|O);%x*28 zhrAStw9~K>Ew?{Xt9)>*K(T5MJ*LQT5L%hm$%@iGYpyweSC+MG=0SYT{)XGF&jVa% z#Berl#rLoRvkl1#P1X&hB{j_xhE`3_3yPL92uZ;AeZH2)nEtE zlwdS)4}v>)bJY;j6#X*354gro^o+-)N<#>Qp?m-jq_?Q$62Elv8@IwzFeC$lEBPQI4Hbek4(AHS3|-tOR_2f8WV$f_U9}`qOXW=-NB+)(d7j zrwD?*VG)UT2IjtVLEeu89(f2!Ctnk#wD7&s9luD~xx0f&ygX|-&JKeZw(iFq{9R5v z#}e*tRvbjGOKS8xGSbxFw!&)dm;4lleX$b+{b74>Dpj^^r{)qnN3&W z*JG*oM_Hq1rxZyp^^b?~M1Q7#z_u=dnEk)2{4ZGNe1gyil0i7attaHOzJvag7Cpej zKKtUlj-q%Hxm9eslogf!UGqCmxL`95korZHutWQyVj*`KebmBV%uG~{!~KCw%OUYFRul# z$djX%3V)=n4kgb=b{lmaysCml`)GpM4*x~Amqs76#j?~J3?(I91Njmt7{Il z18zj11xho#KRke1lAa!;&H5a#sVPWGRlu6JUleEm8dw*+gIJHU#CnA!9AP9RDPb-ci#ptjJQ#SyTh7~b2p za^Q0iJBc|dZ5b~74~_r7PcRgEl@NUeX$?@rwAq`V=H^XgHwT|VV=xGELrEDA>2@~| z4}f($&wjS&xcb{Tfb1bf)$43k3Dd9egaauGJ(L^YWE7Ph=bIGg0uo}E_SC7q(pO_xu-@|_uHNcv~8VqNzM)X)A*k6lq)*F zkm=bOX!e~wwOqn<9I&_#wj!USAUzbNTX*42(`Ch8Qbc245_jl>TKAMb zMhm+~FA-Xls_;}h1j&;fsg)rKtf7Hi)8r5hL_|U-+QG;eys@1T#6_$+n~DgyWV=jM z=N1RCQ^%HsOaE)$@V`rJ8weDlD>S8{I;TB0e@7xVwtFsX5E=b1G~)qA0d%mFAdjwK z{&U3!)w|cf+E7mvxK4^rflh{m*DK6JhPIHJ0u1()rzs614i3%gni}tp1pHLWMi~^& z#=4$8h z@$oaD#1-^#tB!`B40=2C_FBRmRFfuNuekeKI?lpSHtgp^RDwjyI#)j>sGwMH2FAKn?ve8HJ@uZyloox;%vrJm6xrU)#8CnpfT)EyN++8y=v16Qj;= zRmN7wg9Q4gzio4?v?qRwtEK>CAPRKEYuztu zXXT?~qzeDH^)G=WL(W01>) znVQ?^O{K=_5JUC&gGR`ghFH}gb*;0^<0w?9}Qf$_j z(5hCC5_62eSTQ$DM(h0Pk-Fp%+V{raA?NfyJUqNO;GX3b3TjYU*0R5;R|U3s#n}0m zj`3+jO_=({m(PA&#&}YkyiBce@0 zN)7r;8TwT!rHl%GrIqaLY@2NsqZ-U%AwGOq{5AWK`_kh2x;@wAc%7}Fus%`O+X1E5 zKwu>y{8Q$+X7KRyu-+di->lb`7{{#Nj&4(!8(hLlS{&O8Ac1tk%(BXJv3PU%gIpAd zRux@FMT8|X{&-m@;kC`!s3P1}J1{Qz+88?bsRt?$WjVV;R8oQ%Mtn?MyG%uYo34)E zgi-kOdR`yRwUPb4+@GL$9@gl*AkNJ-<6OfQrZ95V55jOs#_;KG2&BEC5r#rx@^u13 zDWAi9Q9lKvs~=4!0}kG@9=)Gc^#6ZKxqo%JuP~gVgNYQ)5N^dgqHadwf2ges5}}me zo9s_xklMCtj_Z&?fG-O*7H=6+8rKovNC6+`lp6UcDnsH?E^W90q5(#+>H~XVY@y6H z7{?WN@^ZY{J2o>g(7Ww!PiwaRp2yi7p;$j%F2Pt)#f&+7TzQ)r`3H!hpRXA(kPW4I zdt3!Lq$(3Aqrc@_Av-olw5?z37U?P-S%>0nE4*G8JG{5f-+YLJs(6N>kYKp~l+; zUjF8oO&B?0NoC`I9%Ii|TQAc#phW5OCA&S0Vxtl`L8a(B{Q1MRRnA-(>pjo&Ecjuu1ngX z?zndW+3cttY1T= zRDa7|F%i{;?cmy}&?LTww!W~{=!d-+cC_-a1IEY3qS@EF+4u*|X?T5KU;JtVvK0SJ zHCjyMPmB>NNfLr0RIQk0YT@;1_a)8^dj3?moham6zV;HN)3f}mngA3y#TIe4mbAq zXk`%BnR=woJb*?eEY zjj@G)#NrUwps$5F{QDa(JEc^ClB4RFcfHt(jv}RHh)QwsXN1QXUo!F(1$#RsZ1R;WTZ}4;-NQ5TCkp^FQ6sfvkmb(;=A87B=h8|c~BrWE;INONaZvM98z#4M? z3e3+PxzG_Yu~Y%-_IF0DogJy}dE@r!9vLSWd@z?r?s~M{=%+A^{d{qTd_p@&9A6!h zJU^0Pho6eV0EXKbgX;YYZ~J>AXGHp_TOPGFz$|ZG7(7|g`glQ1TsmqI?S0n7T9i&M zueyKMI}#i*ey>>&XuJ?1DJWTp8}1LUSp5;{%HE5NZxRi>Gy9lylf$Yjzr8%JM5FXv z$l(x$P33Nkcqn>p_D05{MI0aUk-DqD!{@f&iFuJ;BxspUBy^_Rs_!I8DJnXejHcs7 zrQtPN9AG;Qs?F!a%_D-Pfp^ZSB2B$hR1Xg{$K3IC#gWewE@bt+fTq)`a3Z2^I~DHZ zZYTXnB>$^a`QL&x2U?CGE8?AOQe@9!{Lm78WRApHfYQBB5CPoBFvldUcS0fluzu)V z5bK-q^Ltq6)pnkRbfkzcKKT{@TJz4f`23yx@wy-h_Mgah+YMu{#A2c>5-dCt z=~!~Ir}>snu3M;=9nJ4;NFSB|{sis>LpE@SQ_@#n=bDRr(5T5?e&tYwhKIwIM>uS8 z9S#8Fjegvm?Dh!W{IKM%iCv| zj_8|YiPYo&OlkjO8V_$^SH`c=ZTSK_4Qg0wwhbu_i5N%n=be&zUc7$8>qhj#a{%}U zr8;6Cvi>&5q0@uvC~|^+F-{aFmYG5d%XW%A31I)G(TaepmcxLV^|%5<81>wg(`@M4M*nJ&hO5x?Ha^g>*y|Qb**i>HeBS~1Q{Yk zw&<@vT6Md097fSK=fyDy;ks^Yy10P>K2aX-fw#ASk;WfAshT1^J~raOC!EIT=|M+f zrFT@5Rt_3Oo*TxKFC)2k#kD7wN-$pbj=sLD^gwBdMUqyS)Hf<#p>v}fK87jTl-jyk z@`jpu1a>Nr+YRBZ_O3uW<(RwdjtkdFWRiLhf9Vr`mYp-j=+-*uv+iquo08Bq8oHszZR;R*4V8ZhzhvfbD`C!8)EjVH z|3uGmVVqE3IlK~ixpX}fc@;T)!7ORCYm3DW*Fgn{Qg*>8F)ujMi!gZ9b?CQUlgz~n zf$g(DnMt80@kHvD#`_$EZ=5%B4YAM|tksR~AD!w5aI1p>peQ#2oo|zUbbAt+P`jyN zHlp&eKGB707=CDS1~Senti>i|oMyNBm^I&n24Zk$ca|=UWjJ$7RnKR)bv#wPuOIc# zP6&!BSwcin%uNnRY}lR^HInr!ouaSX$9Kx03Y%;?Mri)^zI z_6Bp3-hp2(CdMn0qi5CBxj@eEQ_XmKNp0Pw~0c zIPPNqU;}ESf&=$xJxoyZ;eWxq0|J1pE_(3tODd>htxoLWP3-rWH8F_u!P};Y-AQ5F zB#Eg-zxXKAx-kV(bK15r>oW((q56OCgK=Pw#U2+CeoR1JcV%Z!coBL}p|S<;f6&4? zDUzj0*~3;-5KTvAf&zH~*aOEVN$GyDt=(g74N%K44a;oitbg1u5=IM_#56)6 z%3+m$YP|7B@5MXlTrv3-m($~?mj(xhbrbn5Vyd{d0(7E3>7}7PK`6094blz`0 zwbH@h^IX3r|8mU&%lsFo^r4h4m=D{6JSO+{NH!%|_4U8%uXdlWwk4ARLaDjhrDpWE zskvIi{A9L4`o2kog+=K#8?otgtE0C|`Fa&Gdas$300dT;1R2z2>L1f4YvgrWyxNkO*A2+ND^m;0aXL#w}T6Oxyf*3TYnOPuMA-W)d4`)zV!zQ+F3do%7A#-i)>T7dZQ^G6Hn4yJM&P#?5O) zCCSqpZ^&uUfe>|}s}Ya${AVwQ5Mp|{&AAka%omm_n&ZVbo6#_xQaFcctfcBe8ir@( zC0FaLlLO(G({V(v?XG0QaCMs`Rae_YSaW4B+uCvgi(gBjM=d6b97nb-s;=WA()%~H z4X+$U?z7Sdn}J6q;bSQRb?@z0P;t7HcOKxfx1Q1lPK)3JN6F2?)=smciRrN7cE8Q6 zv^Wr?$y(rc!jfOzDp8TO@;lT*C?KhZsc7SfX-iRWX|VqHnT!Vly%66X*P%Cp!~75t zrK3^sAF+|BrfcLy$xh~bbP>rc;|s}uKYZ{!g0I3Pox}$2uxl}o;K72BsJRqp=Ti#rODV4F?jthhxAnr58>Y-n5 ze7p_6*&Rw+K&BmP|51HO6Gx)mLMv27AzDH#IW&|e|3-0X^!t>^5^&6`$ zC=cr}1eHMlX1)E!r=Kiq(-GTM{)bd{$*3ZP!xppWfxhZME}IQn!FG{F$`IBf zjlR^xO~ef%G6)t+A;wjtUkIfXR57<%;>qcmj#Z}-B+72`tFEn2rM#lx0xm7>Q{;0(?qMrBiP$6 zH}XyP>oR*0x=ix1-x)cTQA!bw)7o#MEW>Hmti%=t;rhmHlk%f_TD2mFT4CK?KIfut zr$5?<3ExL=rT<+QQ96Qr@h$yXwtV3cRR@PP^DQp+9S7aO98HOwCjFT`tGCl|eTW~6 z@Qy^^J<;Gd|FmP1`zp314dgIlg`;M?GP+WY3mD`!lg%& zH!aqu_HVYwjKdl!y1a8cE>BVu=4)SbWee#DCs_-v8Y0B5(-H+8G`tl#6*y6&aik2} zbI^>R(5DsAfr*u0#J&jqHW2t!`U4GrvKSOQmF|X6$UzhkdUaXkJ{`NciuBGbcUUkp z{RI5Pd7;hsf&a>A;~PjYHeE&rZdg^|HV{!yZt)6vC(6oVYYkZ2-C~%EHBa~L|KITE z6|#{2c`22Ft`qRdpK9K&dHY662y65mJqwy~-F7rgWjRWLd zzfV3Lksy~jw36?&r5%@+C!OstSmnAM8(}DX-Ix_~Q&J~z5mH;2Z4?~4cD!U`4sVjc zQNFpU)Xf=@S%v%XL|b3=K3GNtuAL5c#c*Ve=nnmkOVbbpKN)|c2sK*tdc39<7+Kv} z*j*_}7`S|xzLH$>kz6K2UM%6mq9QzhI|@fl!tG|C>1Y`+hy=RpnfGiHg*@IZlW)o6 zs=dS7W$Rjg*{L(g%^*0g@Z9lUT{m`lK@OvS) zG4)K(^YOGLl7K~@v8K8jPcc_m@4L(XIE=sDdwBA`fK!}qi^0jvq=g09iJ#!pbdKz3 zi~EVzoz`!UWuc${) z#XI5(4hN}X{+{e}l!WcCuEEO~zd%bJ&7^lz5-^Z)Cn)osKl@m#yEPBJa3Tb64h8lt z()lQo_PR73gA?qZYrN@?6G)vRqMUG66}l$fBfiY5-!20Y+x=KFTPU>JpI9}!O5(vu zz&Tl%H`6$r$3t`*;v`pGKY422-*G!Ns*BC?kEcIcxBO5NB&2aPqy~=5XH%3Cry1O5d#o&aI_jB88*Rv=!SyVc7-VYX2;(5W|kO8j@er1TzoAu@R^&`Y!_6)9?ry22F!;ejO^I=;bZ~`FxTu(DMjS)4-Y!n2n*SRNH&Jm%Uh&;( zk(;g-a43?(+GeA<*i8HVd#x`^xxWnsng|FOL=-(^B@}TvWrlu|l-Vg&kvN+kGiqMn zQIvn$v}qJ3qDv67`?yxVPd(!+a3<=LCH;X>g_OBrJ6B}D5_EZa89stSB0D+mPH>3T zfk=Wqsb-jmF@uEg2`Rn+@ZX~^i@d15{D&(c+%Cs1A3hZ9w`N<)YY{8rjArgGb-?Me z`ncpVs6`of;=eBDZeWqmajyrKp8v(vZmU%Ek_1etzG-Q*{J6-;n=R|2r#5kMx@xuU z@2^)A#-Hj!#_0x8aEIt~XM|?4;1x6PJshwFP+09#=bbR^`!l?!)0(eFZ1d(gk&M9k zhO=metU1BQdxh}y=8kKT-SR%N$pdH5uKI`G%y@#`fHSZ?Cfs+$)dZ4acPw|7PiE$q z(c*CLp0jtlwl{nEB0T+D4&weB@I-sUb17i0(GJ(wi33c(+1lDdd8tFZTZ>Af*#vZb zV3^rb9RbnVAE!n(-Qvg)_2HCGF-Np&GJ)ksBu=M?)wC_i=FK?ORhDGx4-f z)s3n^JN#gD061fZp7Q3@e5ytkux z50Ai_w~f)7)iwYt*w00>?|w<+7h~GyB|tH8{(YMOz+YE8`!0a=qvr3^Qu5t464Lge z$JNlYwKlh7Dwyt&TL0@xM9lM`1HF%TtETK<5Yc3w`nUVsofE{K6J;XKQe!5G-v|+u z5NFbIZt}@+xkRy+>6B_dHnKiR!f10Z#P6c)YhYEvs;JjtY4GkvSlQUtQl0U)K%~$i zX`Ap0VioW)R*q)~`^_q?1U`h`w=A>CGRip6K-0AxPp8) z#QZneTO%7@--@Ei?s8tf}c3e1ZGW6!jhg5 zMCN#`3|4JOo;t-6NA8orJe)H$+J?Y!tLTldWfBaV%)@Gm`5R+rY!ecnW??IS8pXsq zW3E(VU+|%6R`(x8n-bDWcnO&2NRU|&AMR2EZShzN)TV>1*wSJ8`BbwN$zS#;X#4W7 zP5o|s@A_G8vxJ}AP8@H3NuRZAloYVMknrm)ZEnAlI)0b@9q$Ee0I+D5?DpZ5>po5+ z%d_bd;Up(39MRaNs1+08g3mhJThwpwB|dCB@vqcQf&40aq>y?=(N)AP6`Z_7QsR6) zzL>3ataVHXt*-Irp7~VGlw-=%ET)r_lPdz?Q81F;z+<4H0nG7@F3x}Tz@Y4u62lXt z!NEBEY!O(&kg*~C^!G6R_jrD#*)%NPWLW7sKG$2kQ5xuiU*NNmAH*=sTy_FSk0REx z37v$6yre~QsmmH7)}G4mR7og@fmPHny2AucU@CU*qhVtlcm6>EJ|uL}xty7k%6{;8NO6&i6MbdZdkF`?%jyolxyUSP^%e!G5Sg_(fo&Wi@Ay z_W3pNQi&A>Uw%O*nZnp@dy7!Y8LF#?2=ugz@==66$N1N-cAC2cSUEdS|GL6cA7SOs z3(}c?v*++$+_1&LF2H3)T8YO#yjR8?0l}8Kdg~*FS?eM*JQ8D#537~TUBa0 zFeYp4@5fHQKIko%Z>%gy|JYfJkoS<*?m3KCuk`hm>)`?G!dDgXwh2=~g?poc$w z7bd)IiO6Sgx0&y{ZLQ?D!+ghQ{T@Q;9QOm)Zn8kduettXo4*r`-`TLY8T zo(H|xQFT+A#u>SWITx@8+I9A`*cY8#xzO+*uHO`L0IW_ZhF8eo*tlQxp>99Lhhfp? zGac$XJk~2K-dr-#m<66vB8i0|+ot2+vl|?xE zu3@P54Ny5`I=X1Rjs1}&5wY==^3I9k(Qh>zhZhT{?%nY$`mCyv4qC7fQ{X!-rEhZh zXr77Q+xqQ<56h8nG2w-)m1XA3+SwuhA5(7`)m9sBizX1<-KDrgp~c-@i(4oz!QHJ$ zDegs!Q`{Yb6)RSpKyhi&;?B*t&pzYau`)*fW&O#!)_kTtO*>?tZrB#a$omY-&CqFW z$CJvWt4xNB6emDSgo6q_^6NvEpqKQHy3hloWJ%TC;&CyD`ptc}d=VMy#FS)uXOHT` zmVx`4C620nU2x&gjxT`-Cl42=zb`lT4dKUc!8ESj{S7AD zD=q)d_!x0Ta;FKkx;G0|phjB)6;Db_b{~i7&BL{~xwyQ-;=7 zL`-2T!cI-YxCaElQij6lR4i#8JQ*j}*h7s#uR>v$td_T}bcqw#A7SOLl12*8Zw!*c zr%rTSPj#$3EQMyl+`ffz`B%BMD%<;srR{=>4zma2%jFH#kVhk4>@Kyf`vrLe-%&6A-X@zU+n*3YawVM zC`)b@T(9HZy$eK6DCCK(A1SG^7AfoN_g15rfWQPGJTTt6hUW&g)b;7az1ECV=e~lI ztdrDZfsC$O0ha(XaO8E}L3IHANdky6a-sIzSm?M|4MH}LJavM>f17WR(EL0la%MN! zrl6n@yR6fW<9JkeAer4Wts7W`KQl{-sk_+z@zY|7uF)Ii!cb^>e~CfCc6UUU4{0-{ zOw)X+VqFp__#a6~LzIhuO7bCnnp`GwFhSp~`5w_m;TLpNPr7)ow(D0*@5{@Fk^6N5 zrS*U>n50IK^e9`Sbb^)%#Npf5-!msx8QuoYf!}Qwva6jFTmB zdOAIOIbC&m`r)#y@msavekg}~IWdUuI&sDxp-3GuLr{vaREJVzDk%|iXq9eYi~R~9 z#zcIfpe}zNJq8Apx$}|Jp2!38X1wp}#@y)bCOz(`|8Ry9wkf#~+@E8l8?ah2C1hqjUqFkfxOE7%NKHW!fB%a)0+4OKlw{CbOf; zWQkPq+z`QfS=5EuSUk@C`|H(z!y^v)7vl)mhRM8o%E*!g*qS2P+Di%GY>j_UJ0_5j zkn9V&3YQBONq3T(VfPr8h`qYY` zt_=+HpZZ{Ni4Qgb8j zo|Ns(PBl*Bfeu@zQy4E&s%vT(;X-{G7-0!g@3m<5_gqce_Vlc3F5^Jq|G|M}G_=Ff zCN+#Fh{d5 zt4wyIKqnMQ$|tzP>X89;D>U^-7H#wstcv7sMj@&f_mT|ctrWrE$OHGFpVwu^1%l5e zRzl}1zl*yt4X^{F{9XLoacH8LEl@H*I)bPNv`$xyw2_$%6HOP*s_Z&_a{t3sY$GgU|}s^r}RO^S@|TyH8Q@v7?r+`{eEE2{@Q+3Q8mL zuU`L}+D@U2wYv~jzmt(>tGn^TGF~qveP`krB?KiQOK3L`CmRF3d*bZ0Wo|$W_v39$ z;dLlcFimdHFWk|aJS(;r$&;5I0dVIr80}nu=~?s0 za6)I&6IhDxia`D<2rM=;lL_I@X?}#+?l`T9;_l-q2P^H7Dh03uGjuP*uO{W?9`TlK z9s6(av`LoDi>8c!Fd5XOIaD28ZyWiwG+wy+S?uAo(!AAqWS+NeYEp80#!#wRR774x zNwLVtG;zuVm`oUgKSg(sBIci2uwDaj#x#Rwoh@@&)_P zfnuMc#t=)G_i@w(wjcHstmTXA&Xb||;9ht(8Wl8*QX@Mo8v)fRW$Dx2-kyk{6_+J# zw??n{)`ep|_>%&@HM4e_h z_g#Ho63t7FD%m6G1BrRmQ;&cCQNeU4mIgHSoUpo=Dq3T(9PH%|nv(yz9CI~*uNk#o zNGyf+e+CA5gw`5_-s%zcfkpGyDm=^>T<=&Xo$8Sb{z?9yK|l*^=-1W#-A8KUu*;iC zZ>=98&)M_d<2xy>3EoLw6%lBDU$J;ZSEmzm#uwYLzwbjOVL~v$XTGw(pHB*IW8u$; z`lqpn!L%lD{#wR0#pAl#bM2!%Kv^%lq6@UlfKo0##BBp&yJly^@@{!)R_x*g%1%DS zNin+0w|#b0YHH4E{*Lj<+qz>Qr9WefIO21v-4siNF=7*@?7-u9nJ&(B{p**ro?A0} z2M33=M%%t^(w77J>W`LUfuye;BQaU&Mz+2`g{|s!C?i60p5y0F@{0KFswFEv+82t6 zi#EM<#PUs~H|VIi=#4Dtp)c4p8L+fc#7}WN64zd@e|C^b1{?Nbv5|Z~aknTj-FEqw zrT5F-k_FOgATE-p`YT0cl3D}fPlfy6+NQPB-y7{)z-69|`^Q~-@u={8Lej4Kb@6EG zN}8SA944=hT@5r}*XxJtBLP8|=ihT5$d!@Fy;eu1-LZp&M^Jxi^_I6PBWuUEtfltR zgg2IA*Su%lL>U-tkkWESO;$>kQyzkNpVbFNA2mXlB;1|CjxvoV0aM1+gvN1`7f@~O zYL2#3F0M0LQdj!d1(7lLOIhuTv~UUAZ_A3O_#WrN;1(^qrcfePmbo#IAD@4rziS$e zF*Eh{9ie2K10IGP4=vaV+OSenoZa|lyFTnm+Deh#u2q4>cqDm`;so6rBzcPLf!!`^ z{0{z?o>qcBV4k1JuV+9Ac2Q~|;f}O}H+5&lzvV&Q(HR>uvr2y=Fg%@^aiD`!b2)~{ zpsH8jJjL*%bf)X63;*Kxd^#F0+s->Uz2xV3+)pp7o1z{kc|NI&(s6Dn=m4jD1)}$f zLSAvAY9Ch#wnXm0SA)3kD#1bftt0k>bFqNUy`Uh8gSU|nUq0uAnBf2MV}SaMqQ*9u zhDEPX!VAHt+DL8O!=5V?xoLq{LU1vsiaQLyim#cSk+SIz9nmPAS0hY~Ex9`Q&(>eT zcjUW`mY8uobsuRH0qB&)|8lqmQm!rM`ph(>wihmAK1>Tv z!2V+%rqOSK8=gHvF}*Q4)p2`cC@LTb9Mu@${b4Z%o+@Q$XP+D%9)4eI$k68lL^b>I z_5t{Zk@v8$9=Xw6^zp{&|3@kahT@O*Yajq2JG+ily-I%pt%wVq*pL!yD!E1oIRCUn zW$|%U>ediD33mFiF*3|qN*_BsH*C7!)?!0SL^?^PB!V;SDZqUHeeFP*KGI|$WoYy+*=?nBH$K>Mgu32WBAC508w2DR@;9CqRb98!-) zlm~148$NPK@&XTNb7v5GHh1#x`5b82X+I+;zB5lRnW=507}DAWu+srCcL5VGWY)HT za)})pLxXo`8=S?bC${2U(OjdKkiT7f3+}eC&Q<=P+{`_FFJq$5up&wK@ zd{*i}GAORkPmA6DV7=y!>6nI^wOnR@A;f!54jV|XI*crYR=cCtEsy2Hv=n$PXi@st zf$!|k20D{3klbS%Rv4_E+RdsNJ>ibqq%7_ZS_N4#+2hC9XZs(Ub>x4Dg$j>8qp-#mzuTDOFboVQ@r@tp{gIPf zRPyiybbd<;UCWRUh)r%~2JSKWpsuVcW4evT`dNnZX1_~L+weoNm+-beKzZx;9YkmY zhr{|t2#c6~x+-4{6eE4qG7o+68Iu?zFjfFWM2g6*M_X*N5>g}jg?x4L#k@bi%_@tK zInVIC>`t8u#e)zLIz3nsGttG7=a!L14%}>Lx(KDE347ybp1W#PMI}V(G_vJLprJo| z-{G*nxq``}I=Kc1fx%!P?Iu-*L%X_v9?jIC@DaTnn(Y5xSDm0{RsPh7F5&R(WnAPU zB+(%B5X=w+^OO3f5vuy%j*ZE!dFhioHu&9#fKFPux(4oTz~?C!5uf$ug-4%)L>OFG zHcb|KqXTONai#D&vEAhPGp{xCcmG8mV=LK$SLh@8NgztK_7dpyGp67DQvpR z^1P6Ucc%*F0G$|-XP-LN0>(x~qAeiYM<#|jvo|xI2(7_W(|i1S`R;7LJd~lWYDuM` zk`v#&J9x42*cETn&!=m)F+Av^ytBPz#plhoa<4}%3(0TkPr^YghW)lmhxyH;nAb%U zNYl(G7v23|4SHL$)8NJpmX-lcC8APPb6 zxBq41UaHKWztdh}C7<47i0*|ooiQySLGc3zWDFEX{5WCC#UJ%57oKwM@~(GuoPSN1 zC&e|6z1U`%4N1x1JI#rVp5wz-OK5bQB!DIEZVvA|ZX6HVu6j;78EnxmhGm)~b~*n* zJh#_qRmhr-nOw>0m}6*1$zDS{97rbZ;p*metG}&pBn(~B zxoR_vmR^Tte*Bx3m~HD~=T`FLb-5|9ejZB1yXOeJPfY2L_2!+eAfic|yFtIVWxZ{d zq8f=KR<7~)FBxteTyq>m{x68}v`n=#m4Uy>e)opKs+Pt)$L`C?TaXY4))x<#J!8TH zfZv7-C`Hlc(eg~V7^^-EsQ`c^7F4C=h2Mn5CExQl?MpK|WO=4N;gG<7wi;qZWOrR# zI7F5elv(ZE`mbf^sOY8s#_S;XUjPW9VMonaewg;1O>Wbf=x z0PUlxj^WgQ#~j0)A%}sAq@FlOzfB2ss-*ZKr-3v~864?<0B!25p#EwWA$IE3Bb`C- zH6toqSogalgjgI=#FZrDA6@*jRb;)|_{1e{AAt$0@0fe-bIm39f0F|=P6aCNjg1-@ z4|&{2z;bXc$xQukd>=N12JN}Qu!`%i(N{9EX)ZHh1`l07`%dy`wY`RK zCdWvGpioD8h&jS)4Z%69fo6qw^2HMWhR?^?&G$?b-!b(6rDAO{&*jhVBLpbvR+@hbFaqD1bUTT~1xG^CUn(~o@{7D!z@zD;E; zN`1#=#Xs+C!}~iR0AI21s+4<0gX(@x;#r6OMGcx7+I*Ebt=W4#v0C68e}RPiot9Q0 z6kcebGqJWGRJ-mcJuK~l;?pzS2ih$|z^&(9Rv-US|F1;p_oa6L3hP=epLEhUD4SA) zSQeo%2-9&IfGmnmdX5W*j+GdYBQ@KdQ++qU{Ve~}QK%bm7F+Fo-ebP6_ZPi$=Mm@P z_gwwQG_pCiY9JxU%UsBVMqNXY3lJgKfnuJo*-TwtMJ^*o4Ol{gz!H>FCx|~E+J-7T z%hz!d{{?mQ1hWe195s2vN!*Sh9P;fI=ds8&GsQI(PmtcuR4(CA(!FO*_4Jg4^xUu1 zXDd+q^*SVd%|(M5ae==@jnz;}f_j^>eNx20g*wa@eDn)%_g^~=(^5{bUcL!3Dk^!Q zsG8fT?y~-!N5$~A{dFH|D0CXofz%NS07wFqWTif2H5Lr@X(0qK1CRJJJh867I-Vmu zy{9|mt%1>5Z%C@uZni^-9#YSE5cio#c4VA4oLWCGH6*({$Mr?j2_YVwnEoi(QR=2J z)X;-7fM7N9c-0eQNooKv7~n;4;_g8`xHX+=QsaONJb2w_!V z@}Yd++8EWBJtt|LOjig8hVuf-}mQn zBxQc(hf*xDh{8OD*DRmP8=izw&Xl_79&L2r((=FUjBY~YKkN4QCq{2v4PAjV8 zw(KiTyuiipAH&o8r3Es z)kjVkB~6g}JTR258M10%=Ge0sSHj7NyYZAQTrZ3zGWr)>#lPryj<&qh>ZWmlrjUcN zB^t&NCqz4Qqb8Ah(Ti{<5+5Hg#Hz=0$jEe_ah{2_)un^-MV#n@d4}nivm)(TS)xrkXHIej)C&Doou_2mj97HbmR zgdeJ`GY>%k${4cnJp&dnBxA%T=9$8-j%yhSiA9UQV~|>8276d!bK4mQXJd$O?lRjq zxYFAp$1Wx< zbo%mlL3wXQat4x;vmFo0{7dfw)%CVptnjz;0jZN{M%hn2=G5fm`9o#;)^I-QI28n% z@Y+T;EJxC$Z6SMrK}mfj{SkZ729AWU*RiToVYEmzX%*7|f6k253u$dLbC#bdGn-5; z8|6O!1D55lmcI;yQO?g4i={L-?msqok?<6%bRz!dTKn-sV&p|ASR9oWmxI9uTTCAE z4ZP@SbsY&C84P?j0Xh*Zla&PFVhV11f9r3kYY?E2YcWL=9lLK@e;hh02KgHy{NZ78 zA1vhyr}v4Kw2k?PKJ_N}j3rW!B(-J;!-H zkG+jy{=ZHaI4>xPpwXlV;t4|U!wJE}80rL3)G(o#t}xB@U>^7IPK3o%)1#;(B?`Ib z9klhgA}$NJfIdQyPy@dP-YJ!cC53LN78dyX;Jv(kk}(MSpzxfKf|%8~(YB;0hClxX z1Auq6)w-tuR;#t`;Bsey!%E4Oy->+wO~Z`z8h-*-Wm@IUJqB=>U579xjD}>hMb~(2 zOA#(LCSNrx)j~tiV6FD%rk@~3ly+@m6R@Z?=Pk`?JQ=QU@x+P@Sz4~G*Y!{>DRwyX zl5t}(^>*8jn{5yD|4}(QhqAK3c@-IE7i* ze_d=9`J2M&D@O6-m%gsx*Z9ia z;9}h%1GbiZqe;(v{#@(G>xB}6Wkbtwdd8y%$sDT%A#wtS!kADAbZu?LKU-PDSlS`z9A?1)MoG?whq_cf>-PGu&2PW!WMZo8E1g(->vQ$oyMG8> z49oLRQj)Z2{mDaAP;ppqbw2KHCUH8>j+1moUGpTxU%tszKcDqva+&1w@G6w0CW?ux8dYd(A(v68&vXSy5_U^ zTw|Qj_rE;TG9KFDVGoXZlth@sF}#~s;*(g2rjl)X#0f$SB~OF4SLi|oJNCyT+%NMY z__)iZmaC&mnOljQIAE`TY`J8XPFI4Esq(#vrvv{3zm?!d?-I&C_Y7b%Ju@NH;R~mC zN)tZ+BRP>|hMtrZa>-B2%nP*9lj6SV1{n4^ilBk!u^X^v@_+TFa1XYkwR#z2HF~t$ zvbyyBETon1Uta9T9uV{!x?CK2=nuTLtfwW0Nuc0CZX2<=D3bG8fYw;A{f4L{b_ayKY%Wy65i1Dpn8=2k*mQqM%|FL8l6m&s=G2x(m1rWS?V#%%4_sIEwo zw7^`pP~Ze8fR49tvqDqz8F7oe3TjEoWMXuIOFJoQS$Oj+b}T$~R~;)v16sDB?@c*o~u8S6y9Tq@zqQr%q#9xrHswYvdZJ-hqNaDQ-MW z>ml$rF|)|uc6y<}W3yvWn|)5h+M=(vgDT^mutCGan^r@n_O~g7Iq&KA?e+^V_Sx-T zEPYmz1F4#G3U&H3ozY`FlHAB3@RzxKa7|N4;)#g)>%&s!?1E4R_D%6A29^fX-o69x z4|F{R&1V8hi^LSjOunrA$9GJJlGU@+kNi?hdGz5n!V`+MZ_1+W`n00!-gKxC!fsby z+qE>fUSC2eoH|ENJwm+!O3`Z$5#y_+9)0)GV|0a*?BD)1pGWw)B{)aBl9BaBP zUR|$l_gW?K^8h;Te%~7#Dm#zR$tbHB!;{R26KAQEZuE z)~!7oy#)7#9%IozwI2q*6%;nflVzW2WvG(PYOL$Jr4Li!my-Tl(apUp#t@o4Pzo`Ex*1BqtQ(v2dhS%Y$8!^v; zWXon%!2q`24kf0e6r~jpZ5{4t|4okTDYW&3I=aORyB{Hr@(nbd(!PM9Fu_6K0Q!NR z1%9B9>qiPku=g7C8H?Ei5gw@#XciPu9;w#a`kqG7Ce=%rmEjfQ7Y-d%pVAASV+?}v zq~Yh3Bq?-HX|o`-W~V;L4L16pft6eF0kK)ExzuT-*0Z%P)ge(3H2|lT)0b%UM)e$S z7O6ft6fyf`9x)C5o57#iSxQCx=MR2;O)6v~E!$~`$-4Z6cG+{|aKik>od6v30Hf0` zOtm^uS?f3VrqLYZ=_Q5PVcWit@qOp!{57FOyGew4c5qktx+*sypu3-9b0Xd}cMyZ! z9)8)elOVgMEE>~8irgW;B96&Ts64i=$4A!LKnR&G55$|;sAWC*Pauw$xBQL>6^=~w z`>Jbah+V$cPLUo=N>p3^waMk*6xF32μw0EpWk5s3ENeqcm2dy z6d3KZN3bs1@o+8YLV-BU)^vy&WF@Tcfr#YqXPm?Reky;WNKOj9ZsagDmst%CpI$eY zIXI4(J~C%U9f;q!&)e`jH}1POZoOSe>~>A8&{i35ABo5I#lIF-KvqA8C*e_j8&YFM z*Na83FDyY3k>A)S(1-D6a$3K(4$9DWEyWgIzqQ^*4F76UvflpFdWnh2Y;x2^MnXA* z=EZg_Pnz)_pzBPZ6VNT90%58v=C$?p@v@W5HpH~@Vyn>UJ0fh9#3fS2$*6FdY>xZ1 zv!k$2@#^Y%vo-X|$~uFz+60eP8N9>@?%>#d{~kSmj{uPCVM(o-;iA)FYMcHeP8Rfp zI=D%&`kt;J!=>iSyM4h|<@D{aEh{&-N^cR?n(|@oiOwoj+I`~fHy^==F4N%kMB?jn zE3c5GrP`3fh?`L7no&B|B2%FJ7@*rXYd}&2lGz*8IZ55@a}@R@429o+Q#Uh|rFEaK zAvHU=H3q{b*^s#^auov)0n&`HXOE*i_@h}y*v;`6!8d^L4bRV$sk-I<54h`jB0dhD z@36GaB!?n_cY%=;n7e5{+LzBg>RknO^ifgYLJ<|pkK6B=>UQ8VOR=CliEz{Y(`sT# z=Mvdz@xk+T-^ z=US*lC=%j+;3r7G3PQ6P(l6q&b;8YLd9s4{JviCZ-N zA=nsYS{v6b z{>rp37ya07cF9-5jIORyk~jfO&oY_q@N0=`YU87y{xyoi=p1|X0lW}6t;Fs~+R#7x2#t#@1vCQ;` z^U2!3MAHesVi}cr+Q*cZ)6<@VgEu46j+}@w-y7T(=JWwA~A=f6p0aOkK{ zq`_tz%M!JZvcxrd>FBsr4#c9mfs6ZQ zerRiVN$KQFZtkrqdk&n2MKq+-HqF4tyOh-p`d_Rz#l151jOd5RZ2JU$EB+pZ+=-p^%o zCM9R{{$ZIYKCGyj6Uw0ZMs`b|8g%t@$vh-TM&nC`01po?-QRy={0TI&Zg*Va6bk~- zvNtWN{}+{(fi7U89D2N(B|)APqvld4oMKPAPvwM)qxmwAW3k#hd5{p17FfS;_f2#@HF*d?w+wgsp~8(L~yy1gv*3Q$436& zHPueOZTr4Q$-y~y0euh_F|Hseq^#TZly7?Z6>%4BEXi~kLOP@ugIh7w%Gq+#i&xwR zV&LRw27<`RG$l9RB?CuD7p&EsGSQecGP0A(Pk#5})auu{ue;~UNW8oDi`R8HF3t~! zxS)fa!C7T3zJKRgVYhO=8nM{ske+l^Eqska{eCmKGs*~7bR+F0|Hhc($~0;*RYMSu zenxGosI062OGPFtJ`E5tC?rV8@74I#G%NJ}FuHDE?;-xOUwlN~yz92=^=SDy?1F8l zHrU5QU#b@rF7g| zI`uDu>!9KLo<@pgLbRHu<5UzVTC#sLX8{67v-{5xcg}C8|HGM&OhBqmVTk9hStJrd z@bXHpIvbB;)y>)u?lbm=mCH0^iJZ%7Di4ZY5C&Zel>rd|&)$Wx@aT8HlN7%GmWFV( zx>Sjs&^y&ft+|nh-T1w&5imIpOA&UsrzRW*?*)Am;%3^ z%5!E(_>dzIlen5Fj1F~6NXdBcMwMb*iAWy|YrX};1en3g`_~kcmjkU{f3tYC%jat- z0W?9w@SSI4tj)9gAY(LoEG892^?I}{tfp^4kPer`oh=MfEcL44meWD33z z1k?A#fOuSnivAeM8l) zS00#!J=mFOJiaom+}UG2Dv@V{KgjHF@6zSVzS7eD{JEWck&G4R52AsWOWOmbw%~m? zOEN1Jq(lUSTxGUv84dzW(xgv;pKE}hzr5iQfsTufPV?~vN^)stR<|ip9b~18X4}gkViQc zp-+H9hAI>1q_+l3W;+|(agXOn9;i`n6+T_TLIi@&ary*&YqIz^%6qEm1c0@+E8=SY zq&%ZZ*M^1Nr=#Dur~1Ve=f7biM)xOS+&tn9Su|f-=W_cP58KIHg~8MN?A|aUz#4vg zm=RZTVuGy+T@X#)6Ruh}s^+zS5M77Fqe>kE>CD$dzLuG*5Y>$R;0!;!s|rJW)29tV z{mG;Ryyt6{NhmIi@vk8H^)a?-P(;vktJtmUZgFo1gI5<>hEiSH>g4!rvgY)#`Z2$R zf#-#j4*1^%A{+1_o5U{X<%dtop1ENBTQu4v<@UwD^b!y}-Q56&pakrml z@K_~CX0thX_!{jzC39H^;Z7L3%bD4qYNrRC)4?WbA{we9s^%RAgWve1g%M~W_VE6Q z=RBX^E4M4Fk8cW|{h2Os@EC)7r}lF%&sxmy{j(3g`fSKVf2n+Q#_;^#2>UrE+VW4A z*@x)SP!4Bi}cDEG#grGDG6&odtNK|3${Nq+A8+?ieD4TUaO=#zZbJ zwh>L62nP-RS_m0$j`I1=h3*77?AS9{;c(NcZ=-*xTkH`HMzY0TUw9kb`qt^7Wh^b( zt7VqO7JR~e{nOHTrHV3Po)NgmK$ou}Hh`)=#^d>KhMRdC4XWSj@*E^0yFoe86 zP5~+7uqyV74j(W@Xybz6fRB2}+kai6%gYOfXNfYYT?4W8h}?7S&X1zPKVgie^-F#% z=Yi!s6LIAONO|8GINQUwO??KP>+7EntEJ}CiZQSx&D+@npQ4@Hk~#b5zp8om#3ptF z1%Q}kxs6nuZZ%cg?TtS{O9m+F0o*q@|6pXlShYKB{NAjO;vMj}uAk@TnrD{Z{~O^t zT^&2K{M&LLyu=8Dy)(9H8njuF`p`K2+%yR5+VZZIm;Yvhi^@!e^mO3fuM-MH0;x9y z^2AY<>Sf;@17~g2X1q@~`j)up4j zhAs)dW^&*{hW_H9U+)?<<*vO%^d)ls!+B6tq^L7eG`7D07iM*je6~y2_zjZmkaV~f zjG@*&pNYKD>WK1Bu+s7Bc5Bmj8*H3MBez7G3nrDr4tHyjAW5Fs117>9?aOj#G59-T(qCHZ`wyw~5=q8yMwW)M#=NF8;EJJqks#U>~- z6t;dq2X#lXk(3j z1h!qaZ@NOXZ(7D$oWTVp<5V}2+2b%2uCV@rPzv-{5Z9!wL)$tkL z&??|g9o4JQ$oY4IJjERy*#+{hPhE5m+ywte{O4l;gGgZmI_LHVBMr$tAG4)Vr)gn) z5cU)Fld8D$knmT`UW5P>n6jZiL?Q)}3-H}5F{3)F_b*pl=!d{-rlTrCQs6&%s-mq+>8=|&L@4&Eto84H`|UBo|K<)P*9BFn|WlZ3Gq*s;3uN_7S_IDrsge)ZP0BRl=Yc_Ci5$o-^0F6q4=Tk#mYtxS9 zs2iy)QYjH2EbXw0$e<+JeND8k)shXGsLrup< ziTy;DqpvD`j%fO6KnUuZ$j6RkXg7pUtljQMi#6NJ%UO`*&8s|;dP|)z>OIfi+TW=W zzI-S-9S_CU5q0{EOZ5v2#^WJ~4(O zv7~xhAn`i>Xq?Zs=<@urag@zv!et>Z=Pq&z+1vn9i3M#OkMf1KBK93$l=QxMnv8%WpMlx|b?eVGJRpH3q| zZdDs08(eYwnUvTXnelu2mccZH^}8+QJU%?~HlvTv(wgeUR8+%fk431V&0NA+*oGy; z=K0m9_<~5|Kq6q*>C&#v4Y7$!5^EGDd}EU)nS`kzet z!xn*3P+*OwnV}P-9C^kM3*k^<&eJ3=ph@TOb3uT88+YIq22v5?Dq;G44Y-phYozDm zi}wXA2kl2MKq+Qnwge&X^N2;ACgM97yJe1lJg+<3x?cBE-}_5Z{QQ_ zav2Cj`=uW{n5;$3d*g!8bQpDuta-q%{{-Q;)1cGz7ta8+#r@A$iU<#%od?FpSbP?@ z?p6j{z1qPM@H&&wi$*6lk(20v^Kw^>ySy0OQd7&4iXV4(cRaxjRmbMT(QmQN$3$`5 zf3kStcV&mrI}J%pCxv30n08`8L)a~{H*NOB!O#FWyk#(=eOak-JW=PY9Yu1?=3jay z<~UKcd!Nxn_INF!6cuuIR{vxKHyrQ?3mYM04g(yecus{(dJx!Z>P+YaWl@GuvTr$M z##_7u8MVP~R#qjvLPA1{LZm1kd_kd`v+ytj>(h^jr>V|qw0*f&&I!XUu*|k^>+JoJ z__0(mPSk;iMyAL6hD3^BsCeQTVe@g@Z~Ys?jKTAtQ|}YS___iELa3~!vbLh@pM86M z#=p9@hZh0(0%xhg@!fBsp{Z2J+>3;?6^52fU(?8Pvef)mepg-ZEo6UwBNz2Px5Rn8 zLF+q7s93(C znkEJ%;m0&(B=r!KMqF**Q8Cq6@gH6K;K=CV$I@Jr`@BAiCv%a@Wb6x5Qna4X2Pg(y z`{?J-`<5q)#FAuJBUNg^C9-Tyv+t`s_0AuEfEu+G zXN=#O6x!}$+@gBLPUYmX=f@SuFLp}XmCIdv)i9N7dH17J_lwQjM?%cy=X<6X)5iJ- zb6R?*0J_5PKZSLZ{EvLA>J--ow~5;~whS|~eIIj>9-ucLj3<;Ja4*Eb>Pm9H4uZEU zoHx8m2a*1PFBJuqY@3!2nA#BUM`Dvw@F|J>lE^c?Z8Q|Mn8*vHSLv!d5^r)_!>ZR* zB`X~pVJjI?ce`XC{S*Kw10)A<~<;?igslUv<)e4>9 zG2kd0Y$*|YXXcPVSvSuNOTQVFHFB=rx*#2l-*iV7=BQ2jZ7YACeinEj;sbAg=RQEp zSzlRmr-+uDHMIFDSe&TDO)!{(y;ml%aZ%pmJtIo5nz0E@u9-s-jyaokbdSG}{l5Wv zKQuv}E!srMs;cwo^O1Awf@A-4smj&N^`{tQ-vOYWIscOh9UTS)4f-_{HhT`h6==aqOv#`?x5kN_#bP) z3jQdJe-rtMvX_jy@fT^7Gg3wwy2{7+~fk>z2H(!>XF)KwVYQNVu zwfc})96?NMEZn26+ixIa3*Gsgzq}XVa_QKIn@EuNS zl-F#{#yfW2d|w3M1v4E2rb2VRqF`!uH|JNI3(}15+6FI7qs;8#kb+}Dst z!?zclkAAR5e1}3h!4g>&imq>1n^PFUu2Oa^&|U_;1U-|js7bUCEOW*Xi_=R#aD{u< z==_ucpf)}~Mb=h2>s`IP#Zsvzqrx2?SNC;4U?D`CKx|{+qKEcOTEeH-htS^*V2*SODI*amrk@@kfAJTct27i4+g5Bhm*I8ISEAh&L=Lf%(=an(! zZoffD2E9korfF^FZ=8Gn=aV`vQXOSN{2H^Wo0z;^Cqpx6GybhQ`@Yz_)+o~gdtLsZ_M9+7+;+u-4zV^f)tEiq z>U1E43Zh@i4Tjvjs_idkX7R|sW40>K^C2|MYmp-v4x^+OlR==uvk=|MVPRT8hDctt zg>x*)u#uk{Qn-zLjp8Z|1O|;7**u+1l+Iw;h+Xk6U?}= z&cZH__Cz>!LMj!;g+c`Hn%RUmvIC|n`2TJvZc0Sg^@oG=cuct6ity!$o`F%Q&{?P{3j+sX79_(D?lEu#J%Y^q1?P zzWPUYCnv;@L>~lVo z|LtNL1;@{ZvH)Twroia%>B;h`EL^3!Y(Tn3Mmx{nBgdcRqHNUT2OdXGoYZJ&!LlB$ zP!(A`U@bt2{+cx>t-X0M_`PwV{oXI|_pi>SDyqxhy7Wj%vn_1bHK-?J&V2!S9_cl& zGHUC6S))C?L`i~c3993X@HP>p-kj;aub&o|x@rNB_G<&$&khLE2u0AE#-P>`M?kdxEyyyPE$@G z4(dt-B5~r8L3mtMkQh0!t9`=R2)y5TwzqC0N5;5+BC|q>yh#?Ee|PB^I6;Y4vxQ^s z+rmR)ajH?ZLvD&Z4|ESiebJW}?$iYm{5*wamdrc~R3Jh+AjCj`|0ssnD#AwLnf@CX z6b!sONfb+ej^=qnhXC=>xn4|cIpGXFvq&WMXygO&*bIky)#+s{B#Zj``t14Q8TnG} zFSgrYl;0-?Umq4>s`0ywo!+)uoKyaZ`o?Y~*o-UwA3bT$7+7T?^V$cnAzvEK?O8Gj zl$K&YvYa(I*}5~(wrrYYd}Tz=gNyPUpZ_jA8!0bhpuTvLt!mHo_p(n4{_Igk4*5(^ z30e|?DW1>@HxT+-Zcd zz&YI-`>B0F+iDIk&o%ayvT>k8d=3L(-yOec9h;*x_Lw0mKyZG4)o7Gw@es}gW948Z z#ps;Yd?}G+qd+>g3ZT>$-Cgr*(+OHDJ&nAejF7Tyh1KsDp%Z*H>ai@k41lY3&l&YJ`TVEP z+dEalA4-#dL0%ceLsMB>9@wZd*SfjXY+KpjbR=5|;*O4n2G?CM%!^3CeAOX3nr`n+ zL^=HyX14KR0iZI`R>yjmHM6PZPxj1 zsp=#F(jR6ANf1C2ZkhOc(tNebFCjGfa0l^II6biP^ zxHhmnMDK}C{f^3b%$m5vhxUD*DC{{xQ?4nFx;)T8Ou* zvQH4Av??8ux^RsvgP5lYpN>jHB>^uWQEg=r)-^NjS?Bp7_S3jBmJ@u5N6Lfh7D{&w z&_2M16w9B8W$!3QAx7m~3H*uVBmd5r@83NKEzNvCeA7a@pJF!@GX`eN5a{ob%{7E? zf7FY*S_RCM4vEdpNZ&78hTPvlNi12g;iEyOCvr~f%G>lM0_UDYiyQr39CvjnQ2DS` z(f?<-E86A=cPg7UW>nQ+=#j@!ac?nb=0=>8lTQhgnP-wW?02PBTyHu=;F+9{gb%MC zIPONWD5j+{W`=l-81K$on}~ou2VAfPJqznHG)c=3EE=~aA!|YazE^%h{Zf+xfK0y; z!xTASVVh%RgnFXa@`MqQO!+EuId7t%|-to)W^rkxd4f2mGqZy5`l^#R7#%2q{SmAESu=n zbffL2cz5Uz=?Po5>E_6!^Z&5t{xcY*34Bii+1EnxBX=1FG50Q+6fE)NtzVky*@qB| z+Hf16R6cG>YfwpTb7VlsDR0p5w~Lcz5F;hb_P zgD$Bj=tO#M2rkyl1)6lUDBo^)y&3VftGB)>*)ULY{Th0Pi)TKVv$!#CL_KLt_4=iC z(+s+d$hPt+I69_CT%`|sgE+{T2%zqu@%w@lLIZ&=RVDkGt#<4( z_Qo{Pq;LT5o|cJS!R-MgfL(_m|x14z9#gq)9^)vrBX~B8W)AV7qJq{ofN3Ck8KvO5G~bn{ho5`_7Qr z{A$*ek7JdET}p}fiwbrAzm3e$HeJB(frf4u76vd*ia$Db08M3nXUe<*D{LbaDg1Nf zBRglCF}t`3$Fg_Pw<9N7pO0o$y7CulU>RaX6TXDNpEELfyFLcCh96o7@qFkiSh)0f zNMj+C+}9)6iFjbl+0hs0{=&;m64noV#JduD0-n_BK8!xv&C;(9Ze5Lu)FKtsa*+VL z_(+3%uxh$&L078P-!eiQbS-g>q8{Z6mYJF(Xou$5)Wyc00=4V1Xay_KMEwk4`okXq zRE&vr6^`+NEUDl%D*^-dtSD7t9lCV;NkTW&zxI)L2z*U#rWP~lbXk`T`uR`ywO?Tq z|E804GzCDdMoBA}#w7kwHeJ&!A*0j6T$n2j=0914Z}W`a4CW~PsRPV(}AHr>!#0S8-` zpN>yPv48h|-W}-pUqKl->+6R|UiOhZp~a*aLBqw%8?p#uTn9Wq+yroG4->4pq}l;vEaDw3Cn1_*@A62SUDgKkX) z90ebs8+q-3H_bLZ;?5Ob`xCC~1<2J~8ShZt%UXsdwoC<|*Ma;ATr_O_;)@}KC@n%F z=7|80FmJJ7h`L-L_y_qUcr44^?hJbygB|65Wo3nH>`z^n%nVz;BDo+)r;AQ-;u^jH z`b23Apkf%;y7cb^ySJLAu8f@^!+r+=CVrZ(x<=rtvrxx0-M9cUY#4;7zl#3Yz&LAr ztWFYuK$7_P&93!=x5+VC_ixT<)z1KK3Q+DUm%5${(2r26IG9-5$&kAM&r`(7s2|cd4U@3W2N0ykn@>P0iGaQ<1)3~nI1J})pGJc96*$O4C=C^j)yZm|C zD(JJhJHLp}*F1B_e_dFq^Z#O$^wX6Wm5?b>aHcvHd<3F5SS>O;1|MW+#phjqhgs`z z5Pgk37uYd2wDmgGP9Od&=aJwAPtDWtQ$!P2+t8v+Sy@qqH!bL*TVF4)4K%@6)~Ipf z{>sHoYp*ratbBcEP|cmlFa5!go|`>^|N4=*;J(qqkyL?snj{BGx}v#Hrq`LQ5e!?O zD#Tii&N0?@nPD#RAGU;q zJ0L*fj=rtmQLPFBvtLzrR$aneqMCsb5{3bAM1GPxEB1% zi@5sfh@Bw;VMdRrGtBBQ!D!XqBl%ZyjqtXr|J-f$SY(S~8%8{-2MjAlj!RYiXN(<< zh@B(A+z;!iRi*Pv{kfORFjqQH+JQznotSO=?~HdfiQ7`ADt^Q~ORQ@QFDB!_@#Da; z$Th@=>5^6aKP36TpC373jVhbXM3eBClLuLsJcMa9P#a9NFb>(`tjv|+rw;VpGGfV# zs^=Tl31%7P%hXOt)er?C-w?;ti9+fnLWuCI@f9fEbB9bqTmR4N4qm(7BtSt<)!DaO zX766hD5rvYI01@Mjjw2L@C+prBhdee*#S^WYR|N1d9m)j5O!y6`lv+wbzWXDKWSNGvRNLN3f^w!?Bv%NRV_jj8F~inv-du$c_o1( zh|Rv=oYFu}CDIfGrD|0Y|M5U#^6K*YKYI@Mk=q)`7nMTjMX0^HsRVtZQ zj@Ei6%|Lv&pch6QrqM1V8~n|=8yFF8dJmXHS-_!%D!&$LzgR2Wl@*oJv~QdHK)s$E zL??oKFck|I(#wHFc;?ZPx$7xgUgK#$cN_j+ke=M-FId>cL_RdEOVS~Eh_mwHh5RVA zk=ehJwFJZME?2*}kLNIc{t?O68Z4?Tlz_Ec#K^a&+&4b}-j~WWy|2#Hw%ErH5EGEV z^XyJi{y^mEIP-&fnGMY))+6){AYrUhZLWh5|2@D~H%@1=5rg)LAo-&7rFWHDTmpjN zd&H(w`!b%1?B=#+DeE#_BP;;8sLOCab`9wG(g+D5DiZiLAG%J!DWWtvKT7Y?(FNPq|>yATepP;@W5Wv-Fscosio?Oz0fu)oapm!(>yOC6JvV)p?Sk z)~*ll-*N1@CDU$w!T<45eo=w+dOZQ?Q4t?Zb^)TmKw92bFh^712>qt-gtzv#jxx@8$$1 z*Lvfgx`tc<>niA~X_tRgyypp+o3F6nEmZrvy`O?58sfg@70zm9yW;GzeT7WCyg{3G zVM%}FUCZ0*tONWX2!@leGFrUSnGJO*@wUk1f%OV3s(?%hc)MD&D;S(gGTE!n348f`ZZX5173;W#{WLZ!o!f} z=i$pkb5lb=;t|HF>abWtWATuoLduQNhrW=PT|Q8Db{e_5W|e#mb#|S3cp*J*Ya2;e zZQ5p5{Lk#5QI|~)vkCqVo>=oQ$vHjk6Mev8HCaB~! zoS;qAq^19+GM9zU)92l!&igIay`+O!!ManWYDrI`-E<$OCnaP6Q#3IO-Ef~`XmN%g z#?t7h3Cvxh(WG%~@XY?9wLJ}Nb5A<)4{duv_Pmeh_b@fO+4A3eAM>gqrBNp=mwf|b zV5d33;>6w24LNdZ2RgfRE$hFg&OkkwMZLI@&^(H28$#n~59Iw|TOpP3<4Ao5`21O) zU`od0!(OH%!u`rBOh3+9stKX;Ju&IX^-=HqvzIM4W&j(@;LU_tz6bfmqx*IJhP&%J znvL*kRQNOZql6Wu0AX!dnOhtwb2Z<<8|SZZxd12DfY=tEsW zNB$LL<80mF#XFX6u=U6v|G7Y^-;_Ah=Kkg9WN46xijbcxkJjz_PU-tWh!tOZ2qp_v z{@5y*9dwB0AN1lOZoi9t-S1LskP2>V=1n2oul{^DIOOWkbAQJQ5L|NGIR1r9LJ(vL z-a4?;+xdHY>mFXX%t#aaefxYn6z(2$bhBb`>EyncG+Ks(L+Nkj#ENmT?ZN ze>RMp$`Neu{V{xwL+Di9CE=^|e-J5=RG?id@B{aJo6eW83*~e^;!X!1u3ybisZhlN zxy+kkc2ZX4*#;Xyfm8QSkH}*|7MfT%LC+krd!CY}g{lzTHhhvaoa>dYNZxD9|9Yhe zgad&<1gh^K&bw`D8Akc|rW8;N8khl5f?wmFTq@tee=TM#Rd_BhLK3~jDN4V%IjSzw z!PZC!2BJ6U_lY~S3**_~moLybX?Y=YgP@;iZ36gVhDCu=Fot@Ith=&v`Il)8wICE)y)AFsqY>ax<3iPekWHf~vczU% z&{|PQEuiicXv?5#k5@-xiu7M|kmg=x*Wt4365nYl9#@El)D28)l7C$ga$i3X-^zVJ zuIov2SN|1;jI;FndO{Tgdqp^O8*8n*=V|P!lVrb3bAlg1JvULz3a3rIy6d4QHy6L z7-7uO1Rf@U+%n8ZdqW}`XWh%HpXm4=(YE85IF}NA=4_qWo?YPUU-SPK8tTek?Dm%! zpW-XHCwQN6_9F>FTiZ6rORb7QikX3ep}*t`DGCuGDtNt8A9wP1Nq-%Y-$?X$La3t<7drlhSU zVLFjuE|yAbf60#s&l3yPhxw<)*oNtZbzcn$R|BVA6Ve^dOEc+-Xkh#f$p>39m~K2q zAg;9fi#h2C){OAEL$_D8K~&fN`Pzwx|M4iIstjXu)sjgk@0JZIgzYwgsR@s!U|$<^$pjNXd`CSkpl{^#;3VI%cyaK=UsJIvU4sJdYwx-5^aBC~ zz|jLwpDR9zIBN-aU6=nz509fcU7LNrw==p2gUmC+X#N_zmnU3iBi%!wkJtp@%_V;))uMAZa4=$5ZmK+7;9U=gZ5gv>Z!P)0vAiy{JF^ zogMylJa0^Rbv;Htl94O@<8j`3BUeH)dfSaG{|Pc9gAa5#NnGAytu%R%?(rYL4lI1g z{Z6PQYsk(p3<5dHnF^=wi#QlNXXV*u3I#_$5Tb^qdG#9h`eWG&j=N!ACbJv#o2?O0 zwjy|yMs%p;&tvVy$6kM??mZmFm;0sHNWk^ya{NBFXkAP8DZsFlQDxG=&q-r5EG(aQ zje&jmxmEb}3YVfM+zRNc!a+?WhNLe_&@ssBhvgjZqx`fL`0Cj4!s+coJpqU^M({O>Na zYVj4O_r<@jbIR4#=la#!#a{Tl$ZJjV7szH^e?R?nuRQQnU~?0hBMv|vU*ju?!=+C#8LXoQ1blegjp9|r${9F;-nYjd;^Va+geTI`?s66L&aKbNN zbqQ}!e}+G7@XclpNQ(OObHWd{yU zH)%8Or}EKi^aOyGBezpLQa4iV_52=kieCdpU%Yx@4a3*Ga}6(VP2aX;ol8-ygQg6S zn~khE^w~SW`$JM?!yb;ul7;LyFOh>;6HcRRUT)uL_D*u2H^FXEe%_B0%>VGp~VNjj6R|n=FYlM5d zVw2;Nsr*_@!pOJeGBY$>zuH7K3V6`In;Z{+IV>$PX6q$5{vf-GslO(IK;KZTkqKK=$>1%gVK0`ujX% ztm0|)qCM9^@45Ke{kpdJj_pKln5bbm`{oIdf?Q7)Qvv4Ym%}6a2W!3O8?7#u_+Hy| zH;v)i290@)JCk*+o@nW0s`9dyv_Iri$#062{*mEG<-k!?_`9Q_c&Kd8 zq!9o6XeRnVWOQrIGioyrC2Tn_bZZu_vrH~z|7Fpmb0(&0JLx=;LlTQ{&_~nCFSJ*P zf;VjI`6vl`Nq`INgj>w?I@ubV|IO7c_}3txPPbdhPE&XMlq!M}XN);y3J2}o#{eZcsnHhQ=4PM`<8d7Zzhp=EzL1Nu+o~M_091! zG9QNa3hk-N#6vA0<29jH(&`htxYb2_?5)Ar5%N{l3GCf%@KU(Ba~a_BmVoL+hCzjj z!wZ~7))r0Euw)bJqu3;Tuo|%qNWt258P~Ybj4YLmd6Qf=OgG8r8@u2fw%3S#o#>PF zKe4}%oh2-;>m*i3s*+Ir%)2Y*MmT-9i9*bNKn$8Dhr-as*tz#T^KLHduY`|o@!JXbbP`1zX`ee zRFF*i<gxopv)TsNTIa}uTjWMw4ao2_bRwj75v3johfEYe$QFI6) z%(}WG5OuldqT}VHTo0^gSsQ+tFKUt*JWQ%3HENIk=SZ{^{WqnXf=*T?_@?E<=SFWc z;9fws@S>)>6Ry;J?4b*zK{yi@Ka2z~dYx2);}_9q-(OsOPrQX0hHmwG z9QA0tG^0TY0OFfr>6X!a{5WeAd#ImdmHYL+$d9F=sFyWuqjSg{=J!{NW~Jph24?G{ zA(Zq}vTFX@wMU4?_J2y6D1l-y66$#&JvT@8xjQGr%UTQA#{97mC-O>*A2Ru64j7MI z7i4ZX-)<6Dqw@`Iv0SpL%pM(_!{_A{7ikt`lrMyZEKT431x_vO#0lKOJ^IaubfCQN zvPMVm28*0Tl?^Hj({XjPgkufz@pw%4FEbvfwQFuSaBxaLLxbGa=Gab39yg{uny6>E zxU;p~v5sX^rbXO$??1~uU)HaS;i^$YcrZp&?T-pB041bxKifk!VRx5AjE4Y&QC?^v*Uh zge3j>}k^Un3yyr6_t;uRz2oIu#pWyiz=V8XqHrzA+5d z3`QN8RntD-c~GvsOl&rniK|V-V(A5fgMh(hpic+MRMHkl2Y)J9;irKL3D|~{QcE!t zVRfAuf*xg|78eoA!x!A>`#8*@4Nrqh75!IYjei_qMhzdNsLRJUKvYqni4>LA+`$f1;c|*+BcyJO}ZZ#KeI7 za`52^`hRk2LY@(uH-wOinVm0Fc z{&4XUK}nk{erLtC)!&WC6mWMH{d36;6NX45Pl zt$}v=XmRyZ^9b5T%0rONr(=}xD$O4p)-dBpJ=p5Jeca^EDp?|7g5igneq}AMn-`c9 z-5-t-jepMby{ai-a<-wWmDT2H%Ql?wEMh52G#j!n13q6H!8^_+umH)%*4CDjt8Gb# zy5zTHO;P5Tx-&ep(hR1$eLOeb_rgfy!zZq{P@*}KjN&;crjXU+>A~i!C*`rfh&jOiJ-wLjx z$ufd2q(|2_>=fRgK0u>UEbUgh=Y>_pfNq`~(Kw8c(ptIwXFv>BS4?9!UsI`r8ujV@ zite9!33l&bPWC~_3bf}!Lsk@y_aUXvw#}`jj7eOK*z6F5Vkqi_Rb>+Qj-j&Mf(K1I zZQijTQe&Ej0nY{BvoBsvQ9o&|>#H)&P}F?^>`Y(f%5Y2aEuWvtGqISn8VLD^DlqG5 zDmoUjX$*C(zS?$Q!w%e>-{(7Do?4H4yKR#GaHoY^j-e!DRK8^U@mo;~rdw3&BSh&y zd=M3dH^VX60CjY~FAD(qdA#}c+~@^gvOxVCH%S3ctiJ69=B;lZmw~luxF5sk(4xTO zMb5a6x%QYQ_8?_t{zg^IJ6}P{K4c8`w`TuBmn#8o zc!hq!Ud*5LA^N?-owcN;pGPpFgIr=!Lkctj;|3SruJK|Ex9b}e^}^R2NG=t3u{nv` ztc*-IfCFpDe}(@H=KWEYLIWRbln)OMkiYhzGXMAd{7?Bg8N#*5SQ(x5=4Aj38Z^rl z@03AKz+LUZfp5?#HdgAl5$*p7?F^1uf)5Qm$YX4PZ?q50wU;K8a1qXF*l0B|&)ITe z(=eo{sK_k-v6BUL(U-RnH9N&35Wsn*Z=>`NM9emsS!U>gvnoKlOz>z1_lCCE+U=LQ zTghS>3Gy>0>MJvN!g}~xVa_r+SXNeGwXkNp6;l+etuBMG#92Sa$mim%mF2M*py= zHAfKP`!|QMTvqK&aW!YQcCSnV$j3!(*3&aZ@q!w0c400)iRy+V4bjuFfl<9p3F(n@ zhGk@|7W=xHZb;k4;mU-Q^K;(i*L3bmL1lroOvRXLN0R~J&p`VVXY(IF>e%@C5sOO= z1FyAo3=Nxhik!^XP@(DVeh!V10Tw;+-j#EO&cn%Vl}voeHReJw^#-cfw)L#9Uyr4IVi{>2E$k)%?Us6Uov|AN|hMRx7&|QadUzIRsMAZ1QH&g7KndIx;yNT#p%S)CKiRPXds1J zbk3A?%Xz0Mc+5Y%l@f&$Al4^epa(%@Tp>^iYfuR)m95~axoOR>kBt0+rk1vf7VoR< z%*4=8s9wqYd;SW};!xXzGSz>IRf6=v5RJVY9ztS3=Nh{2cLu+oR&~KDrnuvna-P1u zgoY=Gh0OpRr@)#wfptVjUq3BIubE_uE1XB6hoL87ipz31KX5tedx{SH-{DJ&Fi=3w zDQ~+sTGv*;A_b0yby_llMOs`0iL$O0A|2b%px>FvW!n@b$16J;ad}Z3yL_{+Y+&$Z z1s`=6ksS$e9gk%7t`5R8l^4PJ{zml9wUCj2>gHPjLZw1oh6P}$V~}1^RV@zazBUPj z_265;K{gFM80g?<`so;R0ZZ87>bl49AQzBma8=%_Pg_j7z3Ys4s#mM^V%7nt<8gZsiNg|e~ z&e4+b+y zGNg|X{^HZvW#oWK)`Rjht=?W|h~JaTf`(72!`b z=_br_)y;X;4qJ;FVq-g#TKx3=P0vQSUs!CHrtE$Az+^0+o!XU^XCrVi^5lR$9>E0@ z7=-GHnM5|(C=QYQw*KeOA1sVK7>Qb#Dhwk0uo2(F(8N1xvUn+yL;Sz0K#P4zt? z*>ed=|De8XiQZgaBEEk0SN!J=W>fSBd3yR%XB~qjFM}{A5irTx0MI9R-!f24u@m*+wL##rg>AY@+hK0Iy`kG3sbx04gIHxxw zNUMkX1!4G_$0~wul(1Anf0^@n3eXZZ4;^LsgMvJp4>_m#~ z=Gy`H`B1FB0zX5Cdh)dPYOAPQ=Rjv<5@)fa>RZwGzr?(pJ264q*>kqH8?yEE2GITI zT!CZ8+b{F#A2P0wRRoZPRFi9{1B)aORV)BqQ^J!WkC<(j z{M!zFh2Sevyj9wNqtAaW*cJqk!pJKrt+4!6kp5XtOf_wn+}Nlp?CbShTgyqlY+5jy zlX3YQNg}|Yq=`sQX+f^2Wh21J?Mbk*xt`&^7lgZ|DYD&iKy8` zm)uID=iCV<2H5fD% zn8~MvTi>pcK5w!pin!K%U3gqNdI^~fNm0hEL|?2dkz|oAe_okWEtE?#TL98Ki}Afi zmC2xXQck}zO9|@AT*^|+Eg_{f0CS2UeX5W1E%SQ$+`j&CS$H60B4C9JTwI=`2x!2L z)Rq<{X|5d{aLmpqj#&>wwaQ#yUS78H2NSk=QxwXg!$f}msh|%8QQ&R4?G+YsMZ|O>U){)2t`L-srS` zIT=axxK}==_f*m>;^YT}yvz}!z3n!-Ut8I*PB#^sN2657l)ko-3;?81V{K z@(E>A!KmJ}%|MEhpKD|W?M#;^A*5Mvmg%!M{av)0TK!GxWONqxh8ogCnkO-RFAe&o z_`2?~X8n;zjXXV=(7|v0M`K` z%ddNVXhOls7z1oK#)s8;awTYf+%^8*21^Jr`J7R5GfIqa;H7z$@ zG{R;w|5ozKVR6zJ4bole+gGDK zYL~ZE+;@IEEd%D6S!CVn1FLXHXTR{xZ)vrI89LPi0(eC8+1@ku>dUBK>7sPntp!*{ zH$i%^8rcwyD?CMKDpay2>2F!=@M_RQs+s~Ia z=!gQGYg-hPwI&;-caRfKa&0D5sQ(>u!sG6~c&%C7o@#W=vXLs{Qd-rR*vdk{j zY$iKOiw#VLScmV|QwsKVsjHl|Xdm=jU%n#J()!e#Af1t~L>RugHN?CAJ(sb;NHl6D z|BH$Nna}|ILf8s(@wzQU_zVDgxImDT0(pwk&w z96{GB`Ero5f&wbP0iMlx7U`#E&x1rPs&+Uk4=E|?hSa%rEK-^SeQtK59IerK?w!mB z#WgKnt7jQrt3ZxRu);b_CvVNq#;i!%7(}m8u$yRk)*!jp;?3gu*p1JRON{-#OL%*B z8u+c?Q=P7sx}aQ|fel3l+HbreGGxZH$jjRyPC_Q=gND7*E*T!UGd`8vP;NtwB-}NO z+Z+9SVHx@7O+7JoWkjRl?yPd2fkkeCI}d4|^<28aN5hjv5;(98`U!~eI9t^9Rfwl{K zzK`Eiwj@K?BeQK-`2}Y3KkJT1EJXf)%_CJdPx~LSN+QDYRZeq`)Hx zWN{k*?i9GY5RC)B-8FD0%qLbV0cW-|H)wdm&)Yg>k@i``aw|N3!13tpN5k^%D@Rru zs@3Y;++4X8d`GN`a^M!#vwh8&opkDHDqMGdH)$m~fG*@N{F6u!^A2Clk1+N$wy_ z9$*?A0x;)4{J5~Jg4Bs9pw#BD^`~?Ci|;<)gQ*SNI6fl*N(gTN@C=G_K?f$rvJpL5 ziPC82<2{OvVaV^HwQU925akvnY(?4fR9oiz)~!2>@3f;pwM*LWGz%0QiS4@B!>WO< zZW+nrDOqg2zd&yQ@u#A`@>d-aJ7le!VI-q#O!?ae=WOf#dKbyc#`(&IRAa2)BgKQI zW~h+0{^vI^^~dZ=6wl8ET!he3+G^Z7T*&1M%+|mVpmsc5OZ3=vw?-^raHr{XZDdnv z2uhe7smgK|DJ@r1i6!A^QQDyJ+DkBl-VnR}AVG@!)VaB3LX`VfZw-9i$4mYIO#OiB z=8Dapx`_7|O91ft^66B~X}OJi3*vP4Ml|99=(A}#fJ(bcKM3h-Ew!vH6K8ueA}Qu@ ztFUFg;_va2`4#f+IWWkwe%#qjrT+1P{t*+ci2>7eBtyeJ92P(jLSgAX{o{ zs7;zPHg)eb^7#CR(m{Tid6;hVpkIZE6%3%&=5Abqi{!66}j*5WXQ8i)LKjOAFl zEEGnSimCX4-pwH{gK*QWO-CY4>7#bDxplR469!{m)X<5eM^U+6(TNzy9p7YH6BZ!AkDt{a_U>gBpdu>Zg3$&;rU?&-Ehm}+w^%VK#3qc3b9sln! z&3pr{FzcU@9!K98I8*+qLxrnP%%^iW)C6CP{|vQ_yVKWC!7Pd6gjewC?@UcID|%%4 zOn7p25pwI?n0nItFj#76=&IKj>BSTh?&(xsHcI%&b$L(SJ1&Th{?fQX^AqX+?H9Qq zV+7CY=f^ER_sDD*^r3p;qgr@3oJ=&APS^jUVw*fjii}qTBv4a8x0RBekdkEe@T972 zITGHzVo5`T$uDFo2sUE=jo8U82tY%Lo(FOCt%;n8kREJPsh@uSRKO3U&mrws_O(40 z>|oG)jycIi3_V4i+>_d1LpLa#SlQS&Zs@!5`m&kuI9s)@$N#(l6M9P z8fg+yEv)v&$#wU}Ya>@%{t|rKDNMH%8tsMS5)AR$1ELu!wkZjiLK<%g5m^v{0@6yg zhARa?Qy$V779Bh^>m)!VmT&HYnn!tPtM>lh&};|ohH%;-%T)!yV9l#m7h2McgK3{|}J`%UAV^>V7FvgI_54i+w9jH)>;e&tmYsxStYCOrDf zUwgY%c085@kOK3U>*+{YtPw&8(oo1w0wk#Ao=R|kdWMr+{mY2NgIw?7QugT(Srd@Y z6mSNc2kB!V%YNDZyZhbSJ#bfR0pVv8>pxkzz>0gPa(DemeBj^ww~c93nbdov*Pl~c zq`Emdbs>L{sK%j5Ys8zC-v1TOyoM}*ZNDZs$_tw+F;Wt_zR*i`bUmc}d}n&zi-b4S zBD*B3D%{e6oXo3k%@d0r$`XcrUZd%b#2PQ-c>C;S8L(vGe-frMWDqVO>F8g4{ys zKMJ=@8wL-Q!VCP59~|BN%>7vud_|R*7lT25Zl3;jcE-oYKaBJtl=f)yh{45CYuJ+5 z!zb1{5}f0)?05#DNps>%d-{2o`!UBzm1sbf_!Z8_#^{@h?ojsX#)jc2h0xBKY|7@N zzHk8R)8vPRuWYF9toPvkP`?+-JfFWr*{ty=?R9#9P$|~8N?!I1w)+mMWZF*)b!5Xr=nt8Jj}ra zA;yb8*jyccOh;eqk+4X?J^9H1hw&gOCyr3+U`dczb&$a$F|LYMYsk4%TBK+XTb$Kl zn&wKR+oJcGb4YOlyK?~7L#W5a-mFu4W+q2sk z$k$Cw`q8du>yKCI*ErcfNVeAz>QKq`*AWw4{HFxV$=Af2J_!!7luQo5jm$65h}?;- z7{vUAu^^r&Mte*skOmF)3&TcCs}~yGhsxEF+x$ zBN_f4ygBd8j(0tU060;Vqifff`A}`o-5vAlwJdRQJ0VDg#Y1fHxinNYFbYzc|DDSm z3!k&|WSnWUX!$e#J7L)E_HY==eGw;R9p98|Cr%x0y6CG5MTquC~Q2!rOU)>OO`@9S50t?dJERBeCcStu#cXvrQOLt1A zfOJZCH`3imOM`TseZF;mFJS-r%-(a&6*JtJau`Rvloz+F4o!X!ss37}ajH0{5#xT> zR|TO5BWa8aMQi-h`LZc4Gp?wUj9nkwZJ+r3)L?kiuUFOB`&o$e+^p1!YF|i5gD+>R zwz=BSCe*5FaLzX$CfsQ@Ja%GsP73e*e&q!FA2<2gJm_pbE({SWh2E099{d0@DHQKi`fsJFf zxvK*Y@jFj{R3aB=UA@Y$rOXF$!h}(jsn^nzabuqr?YJ;Mj_uSZT4cM~Rn-abgt2zh z^39)C-u92Wv^BcD`vj@05S@Wnr%}EUHZBSmia{lKAi4hR5nF{ zD5EIGoN3Wcm1`u;)oFc-Br4LZE*0{9WasdI5;))J$iJE{7k_!th&=iMKUAfF{r<+D z_#Onmh22AATrX0Vk8# zvsETtv&TW%;QZoQ;g8}c+7r6Wx!XI5cif$o!QDL8+Z25z;-dJ@A)(#MRqtp$xw_(V zta|vDDqQG4x*X@``P(Iv->F~QXUy(#Z|6`0Hcc^KHmBXblcU2M1~HmNxsA5eIPcDA z3a)H4t-mLcmBi12o5`t-fd6uD!F3WF2Gyg$z!=HC55{^9z$MGFukDG{i_Wpn5&}Ky zk-0CDP6#{qa-V=Lrt>aCu1VZTgVhiKpxjY}Qt>9D!o+Iq1{#~a& zQ|^2t@Wy+y>$TrD=w)=YdpfaK-!w{Oem_`RI9QJjb1?Aw$!~MZJ){MB|KrJpblVhJ z(!N*~nXsb|kl@0oU|I<*F#cJC2RRw^{y%>Z5b$L7WL8xu^q8X2$VeeHU+V>dcC2+++S1|l4;PT8^_1hz(_Ala3$4~Z7KGkaGP&XO~2z5(x zQDAiQ{4^D|v-AT%t&pcxFR8xiE3mG1VFlT+!S7jP+lW_rQ#``;`nCP38}?bWNS|c> z$o{O%DSyke8FLr1;SqXUy*7H_6$;Yn)rA8>Ga?S`uOmVaw3G2-oEE&fdfChEIu7Gq zT$?!_E0zkcwnb_NEz_l^a(K!07#`nMYK~g8=Vn*%8p^)yKGLYOGywmw{w3t*)wF|9 zODhF}%Q)%LVaO41Ez#ss_^M3AjB>oyhfp?T+aJ~+jW2i4U&+Mxt_>JDH=ku7w&@1F zx_rRS$N6dn$MIWW=2lW@H>IT=-v*xw(~QiAf|#peLwg}Bz01zI(YnV-{4Oe>4E$eH zo41e_dQeSlBHjFcann94iP-m=`V`0@lhB2##M0DK;9!VS0(2Ny7W~fB7=8pd=!#J2%PCfY?K85+=6U3!CgV(S&6@#O#t z?vbc_8eFbEj%sw~U;N8c_-|83GP=^esgO;s`6ellL74ly6LI1oV)N|-CLkyr?w@}G zt@(lo-UW${7VQA@_$9hWCuwB)&C~_6H%9znp({lq4W+k#Y&jH{!NBf0Ne2m4y|1!H zKxQL`UDY_yfu74DW~#(N7aGSAF*GqTd0ME{sjPmET3j*kF1GA)u1 z^=pLe6ZXx`YPQV#Q+ML%-<~~LV%p_u+`=0N!!mkC z9Az_f_*CU8oyxkfX@gZZUiy46CsR&YFC zB+Gsa=q2q!I5(vR1Fb=qL^6_a7AO#l#MTHY&==Xflx@M~+XRcmb^xb)!9^VV%z=?H*W;z$e?M(aQI9miJ2ubn~)Rps-g{!sHfzP=0y9#23A ziEbqrMoKM`z|siV21-euHB#+4rJpEoj5ViSDS9S6chvZCA3Y%(pA4S=a7dGUZN0Fj zEen_}aG-9lw7R_0d%=W_D5zQOIc9ajANHCMp8-3*yTds_HL^yZ+U#X2Wz>QSLEgTL zNLYe!D2W%ij(E-X;BTr^US)9G)&4KXoGttEZ6{9sQ2(Iw?jFq~%enOM*tL3dr+il7 zr>XX9odrzIZ7vo}vIZJ2MNBHJ*EDY-suN$`1~f*j+mj~$rzT(h|4$szQ*6n6`E%~1 zw>3w$?*a<9B5mS!yW}0;25~bdXJtrZKO9=uB0Kv4iSM-E`sbX=x%XROH<$&0@Q4st z8R!s#ED5%<*`|#M_)g%+Ezwx5jcB4J2(@3NMM-x5bd>wJeNI_H#NQBk@Ck3oJeKWQ z&okCYtMP}>YEKOn)1Lw7abjP3aCD+bs6{%je($UKgY{kHpu@AsWi#i<{-)a{#?Z%& z{t2bW>wf)jjFskW(zL`~9wp0lnvDi1D@@N*b4*NuzxRSkwYEse|6)Toc;jj2DCEaj zx2oDcU!4Ap(bK2d8KD))yK1QRt!!;<+=urc(UukiL!Hno9bu2g99+qtr~7se!mMF$ zTAd*NT(=`W6<<=Rgd+mZ&fSLbTlxz?@3J=ZLet&#nF^tIqM_Vy({D<5j}ZH_*JPGalXB~WwzJ?>*(UvQ{%weIb^+YcD
-Fbjq|`TBYjvb$WW*FfuY?u|$H*XJ)f+@Ew+q4>NrV zZsafPTHKwu3Aa;HsIZi)6#~~$#r(1BwIk+E5PWD_n{@l-AdzwHW2J*{xd0#EN0I4g znXg~QHYpYk+O`thiZB#Xu)&3~3hh2*EK6G(9Db_rx=l{R>H&UNC(CuN^@6Q1ZMn-_ z4o~ao+y5Jv3CImM57YWy=fxG*QW}GYu`;0umnxMIMfMhBj`E0 z%l$}}Qifhs4(4G6EN~rffXSzhyRXDSWAi=e1|8vY!9V~tfN``8XR4*PzdQaHY%kO< zTi*UTaG7?g@bukcBiHxN_lKMmRSG+L^z}95VsMiS^iHW8{tJXrY-Z{zN1DigE zV`h`!?x8MqcKt{HNQ)cu%kb$JJNii!-0E|Hh)zzz`{j`fG13dPRF5m~*e#ULGMDgu zqu;9b3^wJJnHVEsmksr>nJ#}+)=zKERN>~f-)bud!hrKC@*%aAe zMWBa2^DW-bc0}K7puuDg2h<2XY6Hwt%|nk6yU>z2p>2ChMa*yt7K={f49#_ngCI%{ zke5M+MIL`4cC782O&N3$^hztr&TSi$mtv4L4zpl&P@#`+4Celv3X$ zPETLJX+YN#yh#SvAV%_-_kp6dxObVd?Ywyu4*d~rR#UoyPJ!*9jM$bT;{8qAt~MMx z2$E#!oE^%{^K?{MPw=bOD~(06n>W!5(J@b2RSYZ>jr8p?52aJ5g5%G?ZDYX%^Fe4g zgCu;lMpSkLst<^BV|w#UiQ#B?%h~&oL!p^?g?m(yS@F1W< zkT6Z-M_px@J!4A-#~k?Sb??(nHVmc3FAd0$P@#9mL#N%ITN&^ zVq4pqElb~k9AT0%_>k@;a6FzFHDIG(2Fayh?R!*RKLM8snS+i^7d7mu^d#91eLypn z30aBU;LqW+r~8AN1(UJytJ*aegi4d$p5qg)XJ!I4<=$vwzlAyPBxz= zCp?}by(Kn*2vq`2gSEzJix86c@91G<@(v*bJ;nMlV?piK)&be}<3FZN9&^?>v*(@8 zR35hojD9VSKvE$;U6BO?vD<7AWVqvaxMO~|v6wKI!Gvb|2?lBjF8~;QRMV~YHeXB? zi~W<~_b6KG@x!~G>2JxAWP6>kp`n13JRYg!r^_#Um<-i(^tgVPd;<%2 z)Kw`c_akk=#|SdJ_dLLvCS~;`7gJ_eGo1Wu*69U9c3c0*jO(>ePM#8=r614hwBp6L z^i=LZuSdlf(V}0n+_I z3DZ*b?t@)sgd)W`5616Og-$#72H&KE&DQ>;ynSD!(snKqyIxn`Kf%)8Z_NgNvVKxN zBfq*>iVY|kFy0jx6;S~>$?s3K!aF>#&F@}cp5WTvwJGNCA9X@QGNlE+7h58C?tTaE zSNZOjPhC(G{rtR|^jA_IhD)VMo@87@2srqR7P<{~`a<-+4hle-o?fA_}LylYUd)v>Si5FX~ z*N4*uj4QCJSv^m^WdBPQVE|9+=HXAcd*DvwKx?y`7^M66prtoTx6>rRO&AX`Y={q1 z={2|8B>vxvRmwpaz=Fsn)@=OfchDBWuY!J#(J5; zNGAZ4?`&*{Ev7=27IpS9BVu?@{>UcWY-r+tz|EB!b#v<}5$^PJz?-V;vNxIl;2al8 zzrsw2PM=~k|C@X_QUkruUdd#|DmhI18$9Mh5exefC839Vdr~|VPDUqC(4N07Vn6pN z4mba;*?#_=mb;mkSL>j*_}j4I?nC2`5pdgPU5`*0+2Z;eEaMFRoDj!I2(B$}Obzg|jj0gN!T0?E&6(sB!4h zrc^U9%89|wre)Cmj@vr|qRY+rb=|R?EdgY?1460xZ`oHxOT+VNv1HFx)|t+io$Y_L zrH@mwm)dp`2ra6JjV3D$QT)yTT?6-1^Zwj`p0o2_z4l>#F>;~9W$+na6svlT$TAg` zBP0nU>QlItpZLd5o#gc%4$x6mlk*NwosY)oA))HfZVP_zpo20eN3QFaCP(9+w9N|bT7LTA+!CL21w}cztS@3) z-KwEp*-no^qL1ivXd!ff|D;4E&|yR56RW8Jtr5aKhUlR%+usIZW0rV}&=vKn?~q-Z zQLiZgRzti!p0IBN_!wV_fnP~nQ}B6yv*X$Iq7FcyxRT(3d2?To5!$ zEYdA#AnKji^Y^*2!-x?L_KE*V&@N4Qs~;rCSaZ=FEzxa%i7ii67miN-${R*+R?HG8 zMLEtt42(s5j%s#y)F3cg&)Z4M3DorbEgDY51P75QFgmTGNOIRx2!XRk;-^G`bZA^5 zF1hH|gtd!;?%R@cC?=Vz)?22ikDh>>4llc)TQRK&l$OJB0GTEagCZ; zsQpcB#+Fy#kod(8g0iO(t%@HiYFKIO(C_%S1SiF_8wq?t z&ls_xd~Q8>wQfO;{IS_mwdp=5E)4od^pq4e#3yHtMcyFeUDf2$Xda<(sCPuJkzDF= z{b!hKHV66q!Hc@RQjC9Fg8@ylPnu?XPPegW~X1b$lG=@GfG{1c1Gu% z*q<7(M}0DO?|?b*N;%FUwpEw=Vhhb?u#)UV>iS_G!@t(0PWCm*2Med^LA~W|3Ln+c zYEE*{yOii%MYk9aILq+M0NyezsAIhgz^Kz3<4AgF8W+9X*4w-}MPGa2yXr!2-<9LM zUz4Ov2PV`bUyY-2J$0j4 z@%9aFus!*h(MG#v>elzOSsN9a@<$#cBoxTQ@Rt4ZhNe84yy@m#^(ESubA1%NKoUl7 zj%d~XrTRc6!?q1lMa?Sh2z$n8Qj zym9CHhoX&DC|{#i-i4Q)tDkAUx=_@#WHemU@)tak$i(|`=boR}ik;c7!*Vl+KI3Fl zDs$q)`_2`8|D^Yp#M1;<6IL}@5}azct#kC&h{Twm0&hjY!@^H6SUm+HFuN_;P%U2# z7^Xh5`-sVzPZ`?xW5&^jhKL+hCqDH(A1-^hB2?q~}pSF+<=nfmA5 zFeN^eLXJNyI=+Dfk@=BnuxX+pM3h1nkF105BkZceS{QQ}=bQPUl>8tf?7=1`0I!zI z%ia>Vxv0-5~Osb~XOY%o3q;cRs%ZqEnivL}K}6zQo$e zZv3VMf6VLKl$LKB`SZnx^^r$z<(i^T(-eg5`-Cxb#oQZc&hqF>8yi(M63Svn0momV z<)FLBt>|!N8|OudCig2K=Ou$3&MP}NerNC4q-8^QB@x@BB9rn8j-uNu0qzGT;*i3G z3D$>;8U}Xq*YBmnC?DTryOLHbs6xt04=bgL%f!O=wvb-W`0fP3`q?zimNOExpBDaV zWNuB{!joUfV6UCOXxCnMy^cL96t6B9jn9mru5;SiRZrYTvKZ(`@aM|XRFpjM?XB*0 z6k7uKmz`@rH`Pp);wsXfXDM$wGcut^=mP-@2|0TgY9Pozkwc_#Po1-C1_+#D=y_Eu!E6;qmQmFb1-QN6$%Rj`TsV_rd@ZmBr>8F;k*v2(e!|0QudSU1J0SJ@a&drh zwR;~4ri2_T^Nd2>HM=LU+zDesML;J7I643Gyw8E?Ox3n_^T)v$$8(5ro&Z$){sqsC zJQ8QkTi=%+FYXd$EBE;3^UBLUauD)@xYI4KqX4k}o(EbAk09yMTdN0hqGSGWu|?B%nM4J72m zp`9`g-#)>l!t9B!X72mujdq57+o?F0F- z>^vxFj%+vdsL&6EN0oVe2XXtt|ht$M`O6O zIWsje=Db!4S)y8QJ%fa(UVd0W6S_F^oyvP;W(L1Ch|KX|EkcX~$C&G9PMHZq2Fv!V z|4{)!kbkLwCcrG95%pUoGvTvlls|z6tKJOyQq-)!tl!vYUaf#xT_z(35v2B>%WmhF z@YKT=*JE6)So@!*CYe4R!XV<|9Ct5aRI#x734~y zc{_3IBWp*qWCz$;=utm<5i2!QlT7)IZowBm0xshp%^{1VX$n3G$NFxHSxRg8P@G^A z7QosJ{q3?D4KTFX-9xSB{3luxa!E3?spdaG0bc-^#Z~#U{yL5Pm{OwhW5tndy>OAv zN)dyjix7W!aj8`>NkD)p@l^;D+|lE z$Cexi5Opr@uW=hRJ%@StrxY4J3BGu^mlP>zw@AefXL{iq?Z7xtL3ivdLxxKf`N1DK zW%gH0&Z0QY(%ptWIgR!Y#gP1@`Z3*(QN%A!t4GnMuId&%c*0 zg%_AL0FXv`Eay=B)Rnc;HwL+~qBahd)0~AVPknz3^K)WLIW;1vGoOLJ zZ%bDDtziyjB)~FzA+z3!RnD>(zG)KlFQlqbzl1z*{-dwc>~4&j%w)!tgv4pE9?n{! zjpT23YbDKi0>(cq|M$r7MFLni!PB|p8?`~-xb$)U-+JnfPr=s#$lfl=nue2x;JCcbfBtfMmp)8~c=hl+kLY|!PC_v5tx)%| zVom4KXS^BpzaJa`Ndj;6dqT&8&lO2|+|M}-<0&BUg#I}}JxZ4ygV=z%rU0=l^Thp+ zO6L{bw|vwbmhJ>yF!OH^oq-aip|;_G+5&R~$}%x@guQ*}V+TsC@&PNLM$RfWz*K=| z{dooJ_|=!xuqlOi(pkXVGZ=4v(0^BV%SOV)HbP=#ZT_78X4i&BFnG*&AF)0ZAq zORT1omgHkqAu3Gn_du^>(MCc`8QDlyDm^Y9hCfO_hrU#%<-N+`Wz+A>jjr`mSv0H4 zUaP2T^RC*(O;EHgQpP-oq-5Ce_-Q2)6xJz7|4s|1466Rjqe6o-@mb9CTWdk9r9cq6 zCKXD08!xf5mVN|rt}4y5MD3BcS=Dl7K6won!aVd6Kv{DGcwCd*aDH+=w+}w5@r%>o z>X&5rO!3Pwgejb~{AnWj^p(z^S|gS$vGCc@{B z>fnTIXOX90bVd?Mx z2LPi%fW%;8gJzFx4qzxv<}6-S5JBZVefPpG?eKlyiMVi9CEXRDy{n1H5?hR5KDx#e`1GiY>eciABHoT7rlHa!|-u5DGK2cr>&Rd<(Q{CsX>()D+d5^(GQi7fLktLzRrVpb#pO}VVT8j+|J|4qM z8~NA_dsAa5iGqc``aXWBx9Y|XKu)tuXY4Vmq5wz>4!Gyp8fLoYkt{UuayhKHoS5hr z_%44avNS330_J~+qi-72Z;7+Y)!<$<6h8Eoj#U;&!DaQH1|D+G>0hEckAO54$*tVz zGHH_4DNZpS(eop!-T(ESq)v6`f5iu#R9?HF zL;$ukLV~;PR)4OzBsP&zPKSG`gXUye^G?5<)N_O8NIzWU(Os@O0`q&wVr7G4Ndc8N zH#hPi=ShzS3%Pc^!lpVCM)oZ9r66HgKvLcr@m-0Q18VO&)vb&V-!9_(;yQeze=w?F zpV#uHD@b*idF;tc*O8YsP1+D0;7*QY@Sh|b*ijpp)eMJmTu+AzJt-S=Pwz4RiFU-J zw>+DIK-`|4eRDCb2 zmCX&`yYs5`AY;ubE?<4uDsf#)A4@|m z!9D#P3@4_rVZ(;QtS$!~<(?5K(ONCdxgV(pdEG%;SWi()Vz|GwtN@rn8W|W@dg_(o zko`~6_+yxGH3p?Yqsc8oY(vwtg5_!ECto8*I7lVo3(_gJo_))Ph_oiYGB{O*nDSxj zqlh)3wCT=|Dr~?Ba=o+jRUJgn9kbsx0|rGdQ6=Z0GS~gt8uUd;D9qrOmvf1FZdQ7F z3#Mw`29C4uor-X-t6SBB$j6q ztV&QeT$KQGsN9oLDv7d3YZCk=Omx`0ReyDVeujt>8qDZr218~j&(m@R$SCA zO>?dRk<&>)m&(D8uiUOmu(-DLD|S5Hc?$Kx({`hnz%0q8i3~*8=>P#B|IIl46MxkN znNp1g$)C&5UdORVY)Dczb`x6Q9K{mUwe@!8NVe`(nYubd+m*_^7aKX8{7rD)TiXXC&) zDrJYYG3Ag*PEZSr)SH}R!IgCG6EKRyt&85sSuRakl zJ9l09f%K~T{y`J8%ll35Px8&014TA*cWeA8t9Fz9x>sj6{XjML`~X_yNNI>M(*7KK z?iy9Ov674)C1tX0TygMLxCb*11;;mPrrw8P#)23tK@JDtQFs-=_C zJMXA#-i{5HtBg&26o}7|sv)!D6ou4y(kM2!UI@pTNxRZ#0S-VcS_#lHqTWif){0k! z&kFR}+tispR#1qepH@CEVrEZzBw+rhgS79tG*;9T074D|k~@OvFxXo`|A^hc9Q>bF zQR<&Rd{IeT`(p0cO~gm{#|9cIVz+rNE&VVQooYWE2l$GJU@h=0{V{&tkY^4PjDSNc^0mm!~c65A1 zpj%2*_KM0S^2&?}yD390sIj-VVMV6(s*pIqNuU!RW~SE=5E%0Ewczd(u1iz*9!r0A zNNM&oHPGw%y!!32T2U0}Ldh>l?d~8aC$}ct*%9s!e>}o%g&KRzUlMWBIdCpN?0!r8 ztLt|0Ra=&a_v=~AHwvPEz z3mRRIYS*>Zkdl(nZo(tE_%Oto%4>yKI2DuwuT5{X*YD55qpQ+Eagx6R+RaPJm8>;# zdhl;P$BhiZo&S}B2xqdRW$3>OJDCv$w_pQx-$vm5XX@p$WinEnxN7BNSN&=ONX0BO z8UK_-W)!026mmZ>BZZgoG+?Qxudh~jIWSpMn8;UOTFEnKBbfX@xr#kc>xK=rqzf6`CArbRcO-IW1cKhO>_0rH2_LqzfMy3dBj zNGE*aVc)gGt}Krcu|QeW0O-0GS$4`w8wd>6GYO5*S*(iuBWKbD53Q{3-L=q`Mo8Dn z?fyLr2|@JnS>Hz%O$su*U<8z_gM=Q1Gu^)plp1Z{6*fI8sXGgWyIwwQ*oCeqo>d`l zdy$o1{BMAvmFPao5M8VXL2Pof2F^VCvM?bk-_%C)-4X1IWe zL(mASI|Dp-tgTP)Bt{H`W2iZY_8fz6Uqg zzLXnX_|FcLD51A&AxRspJDS_s;U)v|#uqEoHKv`e9(a$ZSzJG}*Vdd8%7p4)4!2|^ zeU7UF0Py?D>_$_;9sH{4Y7Tohauj-R#Y~x^*++@ zHsmXyYcqK7%y!f|ubAv1i8T5d-)ot^qZ|$(Zm{xhms+Z%AA)~d?h5?ErZz`&5E*e> zM;dApvM|tP>sw^$?^V0>dMZS}Vo9W*Re{$8o@KbCt~om~Nz{ zob40!npG7^!Ky)FE$_Bv>PZ1j@bc{lWbI&000Yq}Tj=O6+z8R%N+B+D|1P+A(`ykE zEv8BAp2pMbvxl#%>pYf^IwEE1HvGo-a|&2y5&}8h2nZ=b`p45Ga1 z!fHO<*Ur`tR7OZB7r>S-O17wVMvrz(vRRD?0PKqel0FPntI=;bZ}8xl1{hwY zM^W}t&3qL`qCK+rJH|rvHkL&-T0gnVxc7UCIFurMFDom1j1GvI>HXy!z4(7o*CA*rj2^@N zQdp!s3V~57n1>J8&%IhF>sqfedNakF7JCqQVu4>*abiC(NS7Ly-MQ-&9wT^vWyq!lHR}Ndv6o8}O#o_hM;K&W(!C&w4-ggc=hsfz>Wl z{uJ>(T?m9ko26dKcPzj}TX^&$a5#FMXiZ0aKLm((;MrhWSRKfNvWDSKPM!X1B# zWwXEaw@#O5Dn1Aekq!-e6Y8F$MXktRPG3Tp+S=6QR5gl^Xlck_A1lKMXn~gV54bA1F-^K%CDxFmej9xe*;L&e zK!RgkS=29SJvOZqN`$?Mxc$WNWGZU=q!6EXJtHVHnq98P>IZm!+j#x`PyenUC_N9w za+es6?eT~Z&mORp*wBzu7vEJqSKHG7(VgRQR{lY~$NaUV0Dq>o(fX`8|MkssA!+G9 zzbc{3BNibLqk!F_PrBxP&K<_rs| zO1$Nl?LTBPk42=~v)NCXx*IAQ@Rj}`&hjq8f;4tDrs3xIV2AgBYK4iKag4%K#M-K$ zs(ircK3=@pSI@4(k14X=Ja}dJvGX_#VF|2GS}ypLXg*V2|2o|xWxIBrXSS6K!gIJN=e}u7o~aU zRaouk$H#2)+*y6Jo;K`n4R3llQ>u^;UGU}>c8pcreona)mP9VR9vD$XIQ`{K;E&h4 zH`->!qM{=8g~df8q33Nh1G*YX9dXw`ocufL$_@qO@l@tBPe^1dxF5fg-jukB58WFv!ob!v1#7E~73hw2^xYKK$}U;?ed5rc zAuJk<@2+KqG2K-90Gq#gq>v}({uSXf)J|JDF7IyV6T6@1m(117&PEgjGc0)^W(hN3+p?1Xgn4|?76lQY-w z5CK^UnATc$nOaR z|ANEkK{9NjCjUyyji7g2^oMkLlAWKn?J;EqN~j0~zuN8HO7G;_ryPAGTC|3LdCjqV zl~3n!qAyZR>vdj7T`V=|RbI$>qFkq$bj9$^bTm6DFJu|=5<6Q&B(%rH#YMtKnQPv3 zmkt(Q`^(sO&h2GciZgX59zxI`heSV$RB=OPv&8RZ-De!=;F=WMGVqWDmAH>Bt85Js z@6x4X_G+FePV%AltD9rK8}-0r!dt}LNhtLC$YsA~*$%z0W_4=GtayE>v}%d%nr26e zr(3*NqWVByhG}D}ozTN2m?I<-uM==Zw@09ju$?}z@%v8Esb?raopwd9*Vb2@XH*!-Kr*2NBUcQ{IsPRMe+XZmalj5iY;XRRJVHFK2~V zcYgh{e`|uKW50`#&b!dnDx>(5>TEgnmgWxk#%}~LAxeiA1n33k1%_GumOJ*pTX%no z7V$^>AHNL*42gDoYH5SJT~+LavIjz;Qvxi&nD}}#2r_XM=$7NpL*-{%-XoWh8ZL_#d`se|o1qx2dlL&844Z@^*q-j_m=GuI^Z?r{?r;MdV) zQ)&N*YmN0#{NacVQj&_?q!ennA^|ijj$Y!NDDjeXyAYuQS^H0 zuN8U^-J^3mObx<-Cm-i5^fw~s-nl|`(nXl_Icg| zBsNCSZZ)I|;4mW*M*6)zw3e-5buQ*TvBn<(0akobuwBLwyck9>I$g&a=lIvLm$Bqg z7t{S#FgjrPSVbjal1zjbU?JSSwK&!BPp)*r~Kt4s1nh0ME zS`{N5cVb!*51%p@ZQAn&dpL6aruWoX-K>)+s0}xOjS#J;=~@IO4Ii3bz@CZiY;T$; zE=e34g_#7o8)T9F--cL=UPI=}kMX&}q{ppr*qF~U&Sf@DV)TaD22`&A&vIi+-)BfN zP9@v-Z$`us8;HFl!>WYqPG_}6hZt*^#UCpZh1qUkZzFl3wv!C4Gedj;36I37ak=WJ zl!Tdl^7tL2wgLXnRIw3zv6|=~jZAQuH_`w^+t`0!dHM76~21yZ7l`XWPEj8qwli9T4wCxwxT-@6Vk9&rR1My zsN%4)%R4x0QMU@%F4SB6bKt3}J(H%Zr zSLxZjN|f>AZN=FNb82h=p>jh6p6oh_=AVwg?RswTCHcv*pQSr588@F6)H9N_onJ=l zyqA}w4|1^pFMyQcaaz&ubp`pBPCbV7?7$qX6G<620IPXsP@s`jS+VkIjAa=kf?A3W z)aqCd<-soML4z-gsIXE_dNeP5a%*aF}_6O)&)LlZJm)8^GKN27U@~L%kZnGv!v5PLy@BbhQfFwxdb&LX*d}_6h-2_Fz zPb_n#;tT!u*E8-|N4;jYI`4J9th2Bu68+q}5m>>Bj%-_ZyTOSK*V`Fx+PgcGDIrF= z{wJmMgy(;WH+m)T`WI`%8g;qxrIZ#%ps%_@FyS@cd6A2Yt9Wr?!Q6G4p1X&}A2Bc; zdr3SchjY+5&IXvJ&|xE89oa4rhLZ^MAkeaw`YbS$Tp{>GCmpWGdAgXx@5wq)$NihP zl5ML{a8rAw8oI~WsgG3B7DZtN1}J)TEF|IW7pswU}((OX`IyFTy>2GsP)a*}uu}M#Eoid9Jfa z^s)!A+wC@3ylzTft`fb5@uQi^_EHS&_z6R59S&$1E_q|!QL@>lF;_Fu(HqSRxJ7S^ zX?>T#=|A%b_b%hs)DqM_N4O<9?1w5Do(T9v7WR~)4K10ue@bQ!T>`L(0vimg@SG_Z z%9)OTN)CKvR47biK=Jw0?jdTGYoG7z}@2 z7s_Ci?(QYGL7J!-$E9BI-E@dHSo(eyJb}B|d9C_PhIJd@oeQql45C}UAGZ+1>UHae zl4}29AhOV55zr?#SqYThrbGr1XX91@kODANsS|zE#Ut4f*W*X>LKwp66Y+LDx###7 zithl?aY+#pIEs-r;br=m`TuZ5#_`pVpk5O;j7GQog_m85O9(g~n6+-Yjt{BLtO)ld zbgJCZ_EfYAv5f{VKb3VP90nY=P8_;GPEXv8&MUsGG!$1x+y;%@%x#`@+-2x@K(*(A z)Xu*@NKBU|YvSUq7UQjS$i6u-ZKnoXXyS-|GwmLL!a%0ViV6lPq|a?zg~0++kno9e zk{L^9U@hwnV`HzYIG)Y`6S?CQp+0&}g&)e~*VztFDPR{=0*#0he6)-`SO+uQnC-T3 z^6{;{_MrJOmt_BXHOS;bfGz|WGPoc~TmNw`$_g%}K8B>jTf_fOhNXR5outaKhg0Kh zsLa>f{C{-41yG!Avn?8i!EKP>?rwpE;BLX)HMqOW;4UG!OCY!hcMa|uJh;2V9lmex zeg1pyzEw|64MS1Ibobk`dOh9#pFiqeU@T*aZCc`kcY$hQFDYV9RzO zy%+TEu744VP83nj+)?P3hIRy9e}k^2Ax)b5**lk~5@V(j9K!$F40E%rFJ8+2k>lQp zp>p%oggkDjNZsPm#3dE4nS5L>ZbIj5yn`4+d+l#a_Jg!yn_K1HAFmJ3j$)1ki(K+R zrP~pW;M1PZZb&w1HkP7h10t3^I`{zO)aI+MeOymOa-ZU+$fi1(zCa0BFi0>)MzKGV z@IAP%uaEi9r>g}V;`fa4a`WNwnQQGKT*N}_UwvRtd3~kacpzQgRqspL*qxQz)ys#C zS#^CIiap8G?hk6e6B63cs!i2}!dfV2aT}+W#r7w=PYQK7k;y`C%P3|Re76(TwIFx5 z(g6Qw&I<%aUO;fuJ}}3t_F0XYBgk4=hHM?f{wev9Lp6)&S0UE* zne}x%tY~_NchZ*-#ntqXtE3T`?|_>y7wl@71e_6sP{wPV_-bsQUTznSN(S<%#|JdT z8`6CKbZW6y72G0>DX&Cl_D(18;yuY1l5d7`RR0AnRnEK?mz_%BA zOiavhV2W-J<+DRMw%{{8*51`1_<4;I_)UBYm9`1gfvMu-#o!V{8}IE2~7}R=XKCe z)q^4{3Apskyrl}7=p+}(1zNmBgY<|qti+3T3B6nWN733RKAl^fh^JV7z})PV^lZiK z-Oem%P}m_F_4mzI=k@s{oO#`veHGh)9bEJ`Ne&=;dxa31jn`Lu=jCKQggtF`^f=p$ zBV_D9KX#ef?K9uFMlQ^4gOA}LLE3ePcg%6N<`k7P8|dI;>{9zZf{fZANs%15t_JmnC(| z2H~WG5I!vuHC+d@c$3AGQ9(OK>dO4j1>k>bz?*zT1$K0XB8+D%@lEFnF7A^dchg(v z;+WNyO?C~mK#=<{6UO$%ESsTttv9|2Hvj{w9s|0kHO$|}Dyee8Z7~@S`;A4|j{t5F zE2>tt9#e}bn$YG#{N*p0$kCM>FWY2;5Hhy#5!C9gMac-2;_qG`#$4GF2T+_*mp%K5D zuWpw$yS^8iD`4bCRD?6Uh*tjHAnoX+Fqwlaw~Q#(|prCspxb|-+4JetU)rp zak-Ns7@htkq4Hp(j<7Hk1U6x5QN6-lk3A5SA%UVlkoBooNRM1X+8PR9*dK_1KiGh$ zEfPbG2G}F4F;?^lF|hNWgH>tl%(AF`E|tNNP6%NwMIU ztQ=8v_O7ZX$cYsSGX)-S5WFQ@SQ7oWeSxfpp0=U=2k+PUil7&;3pHdxN3Su$Ag4|C za_DNywjKZJ*nP;eh7nDx!W@(fY$M5`bkVHY1g7 z85f4ufbfha@KjY>Q&W?x&!)sYE7id&BWXhI8|y)&L*{%BKb3!^%dc_;!n~=_?^rc^ ze0p4aZSV6h^4#%e#e{q(RxkpU0-BK(>3TScF3tBYDHVox_}Z<&eJMgKj2k++dQyes zDr{iZL+t@&r?)WC$Q6Qr>jF^5AlsjxeUBgmRvqniy{h9t#q7YWSPRtnnrT{6+;~CF zDhu=lMMQRBxZ0g(y0&+E%EGfKJ~Mf%9}tz+_N{2Lg~S|)H!mCL%jyed z?$m;7gIOts@Q^j?b}C!;>~f#Q68j0`vR+d4HR|*UV^3;g-auDV)|$;PhNP#G9M&6r zhmqHsyncy4TsMk;NX8|b-x=${PD*;NFOxdweawPbaiDUE@R0x56w@(S4ZUr7vGNR) zWJeC1;Yn$)PZ*@qFaE`V(~|}^VlG2x0d1eS$8TR2U?BC^u)b>K`l3(D)$f`Uu^N*^ zSdyNf(R?t-tv&z<8ITAWXs+&(GWGf+(GIzzh#9_bU$=}9 zeK-jk+~KR?j9#k7+$EpnS3{=7ZP(`>J`<`8hT&J7Lser23x1PmLP4s#pGb?A#q z-G4Y*N{@{7WER5)cjLRV|KR8#S#5|Z3CViKe z$lE4LhYHsg#Xw&rW2f=KbCISJ=cWllPhjGw@7*qX4rTvo5c~&w3o+MkqdS_$AJ9j|*UIm2Rvr`e?-QRcwuil*_V}879$ibm z+pSHOkgOdT>!J%%=|64aM85I5o&p8#0(ZuJZ)#QBqxCnu?n~hT9i7tM)TQ1Sr=u8F z;w~YYYy{_Xm|7O*y0YRzq!xj8XZQ>n8w-`JnG~Je3)1zKUHcKs8!?=duIEkutvBx> zTj{c1WrW0tHQ>t1fb6>Sdw6ah8lP`|PYhDRGb*C70>Fu#ixY!tlYond-!*3C{e}Zq zt@#K)W$96uWMf-xk&BAl% zX?>xL%{=yizP7s=HP%-^JQvU@#aCM~9}lFA+M*D|iT}gQvWEd2DzETuqA~o_o_hT- zuj-jQB>|(92tw4#y^}BOHQ9WL<1Gj8$Ez(?RDhRmWPny|Ig{f zMA-AyK{F6=FsVG70bY%4C2tNGW0KT&X{D6Z`NsG>CFxnd83DDu5|E>_#5}nxmPSWF zVt@6AS85><W&2;gRLKx^MK0Qd9$&aLFBuRNFF8cqFXS1dcJT^tsqW=bm`enlM#`V-{u@COza( zd|sa>OD(LQjp4BE!%Zw<5<25s%GH7O>wij*jEqeGdo6RlFww0*-7*N$|AY(GgW`dO zWax5Po$ir~kkaWv4zjbU?yPym6YJF3ieBuCvzf3O)}sAFQs$C%uv5cN1T|Lk;?(Ul z#XpYf^YD@vjl8+<`nO&}%3p%U_TU^{dN(&FpEu~1vk%A!pRt)BP_CM+%KPU~`rTk? zfl*{KB?91j4ubTigY|LxlWFyKwuYPZcCL+nxVZoRX0E^tpVn%=-vjm?EtIF?FuXoE4e?Tc)x$zSn6C@Dpb@zL-TzkD!Rk3(S?FUw z{kpI4!z9h7Xdh{K#gSsEvW{0d36U~ahu+AV^5iy!^$Qt5MI9r_A|A8SzkcEDSiL>t z9vGXq@!zz}Rzx05j3@K39?7}L;pAYeUL-9+gpnr=EFpXZNmSBq3A6J`_%v#pEfU(; zr#F82yhOIoR&Qt};w$KYNXLB!rbZDP}c zRQ5n~e!u74YTm~iA)(Fk#+X5+D*OopHof_3I^G=8 zG0w~I+0G1*AfNNHQz4`pu8wsJr$N*4Fs{<`@M5C4{$b-%oSM}G=;^V?Ib9XL{f%el zMcjqm$oi0L^~UAaszn5X*Rw~m<2AI*wR>?D zJ18r95jQWd$Cslc#=zAdIwig#?gydX^M28Hk$?5)8+?`Upfsj#eTA{KEZ}ouLgS}B zdxW-taJwih>Uxg^a$CLAFP|sYRgn<@AH*L+DEc22Joi%wb7JJZ%0%XqNm&P46=Fiy z0S?TtbVyN?+u9@gM&HG5Fcqk8IG)06)hU+0<#0s>39j%jYRaXHOvINZR6Y(a%dFiW zuf5nH6vw`=qK()ZoXA1hc>R1sfx=lF`tfGp3qF)aX1x`Cp(BWm^o!nUL3F`;{HF=> zmU^72;oWj!=F>~idMZss-hIh}g0@ zuK(8n#H9oSiyv-Gh1L^_QP(;wg<4L`P$7S-ER&NM%$#k}fZOOwBqxoimVSUfQM}Zc){9o!Rd{s~`FYIK+;vx^zq-botso^^`dDXQgEzJo z!-nJ6Z=s=m=2_oQQg&>o_#RD2?JZJVV=o6T7>vL|**`#IvbYzHW@J-MmylW47&nBO z7AHuMVyyr+mNaFV=zbQ-FL#>}XCtWTDw^Qet@p8@nu(kzhab{Ns zcg8#T#t^9{n_n;VH=VECe%A1u4n@!a-Lay{%R+%0Ry|9-ACDXIC(!1*c*0&!&Y{+o zOI}1qif_Z2^zy!<#HsI0Y?w%&1mC7K2vEKaP3I2*==)9KP;5paM4GZj9Y_` zBp#jrNqAH%h!y4$jPY#yjm9U+GwBPKRdi29jzPluH^9$U3dT;n?0{PC*0g+g_QJ#o zY&W_Alqr&gm$>_EjQL8vwg;6h)r>0OMtzoYy2bXlwysq2Lx|P0q60&k_4Xt7v!}h+ z*|}JRe>LD?9JO-G;6T+!+^t4$_4<+^ft_$)GMRx}s@hpa(!L_-B$YQLoIVu9l?l!< zUyOy;Kl6JqKh&a!vE;NCwcqcgw^jOHmpbBjN8%L6E~NV+++phFV0~5cN!T|>h*MDm zT%U_}R5Zt$+h-~iS13-2A1%9m&aA46U>1meg}t-YD{wz?}AT^|9uz@D&T#hqHHEpR|_TDz&|Zu z6xYSLHN0!d#g&4_>-f(~Rv!aH-lJ+N=!n7JkhkJ)q7g*fPQ)%1G4U2l8BkU&{C#{G z@V-n=tUoNMWTigCzgG`u`S8p>jR8M4pC(wCigihI%wOSJ;|FUafDOkhHMgOV<|A`7$QbPHudLpP+^;zU~j(WvK75*(pYO<+&h1_v^kWszJqF6()LoU3cIX z!}#{yktZgx6J!49@gr$=e(&lGL?qVm$hO*)Qwc>+0i+_5t10OS7Iw)M8SSJxMCnt_ zq%>ayW7#~(1#$2n;JdH$12#_vvp{mc&Jvn$GuC3H_XXT*SQ@uKur=D#)K68+7o%5^ zp3Nvc3L1-&-sGINKia-*v-o+~`TFX0us)MLAANKHyF{Iv=ZIK?>S4_K{QE{59Xq z{>_s!xS$XKChrxD1bqT)g9S+m(GR?mgZy3UoY&zsvTOj;!tbEa*{OkGlg7+s!^Ey( z9QS6QU=?T+MUbR#Gjy97AwO0y1`=3G1Lk*RFUs8tbIqQqM>8Ig(qs1PefFvM-A3p< zi*B;4xv->a!L#O1!sxYZ}t5ocG}hc?UU-G}sgfGZR_(AC1>PJ3y2d58cx_4`WS7frVp zhFt5hA5yNLSsuPfl}M1T?6{0t2DJz0|-! z0*r5II;Bd8zbp0C-K2Ko_k~V8KHL0$u2kR9)YSCS1xI2n$-z|<=ppf6kMM|bRqM@vpx)4@h2_pKeXNuQDR4UfqE`iX7V#7{`#}m8JGN`Sk#RNofeEYhPEqs} zUHG;eROY}0d$=Br;f1nh;XfvO%{49`k3|-);wBI93CSaX=Oh(cZ2dRn&ABe7zG>UJ zV)GU1MA)T#I7ysfGF$#}Bny+?n& zzR3>bYLc3HY?K(a5q=~kRX|Sy!c1!&HF@W|FZB`#3{*O8umke20dw-NN8>Nnk*?2I z0)>MPBtvNGc`XKGm_E`Xj)FwfMN(LVGV9)$3U&l6=FdW)!8+f9kb%~TP678hi=eMJ z-s+P-1gOCWs4^b})5k9Zk)L-YyDn3sw4;uwcru8fY{Ru$loBq&dmHP4Fid-Ps0ERne06=@u@HZ@%uojy)lOxQ-u{s?rG`xG4dR z>deX)ZTyJ)?{CrI_iB#~13J$^=)APMT-)BK_(ioFH7T9ApvEd3&)cD@MY&`+)}=zuVl{73_)PAgS}Ptn>d!7DTc7oOK2rg;3G2K*S=a)1@`L>F}l z2DHdXV3@r5ns8Q-lyE9J?qqG5#P6)MpPyL4Y~-xox9aW970R~6C>2+*+*^#t!B>H9 zaPyD2?^nb=e+D?8Q2FrwGW7^sClR2h#Yx_B`0edJ%jaMNhqx0MkrqwPraxR^Lq1lPS&#iC^7@0o(CYmCYJCms zK+iFsQuxXcZBr7hWkj~)780#r0Z>Jld-&ZczGMMrWI$&@7&f4&@jX$;LAjb0W|wOC zf?X)tA`gZxy1wbd<XY9OHiMIOPo9uRoj9+O95!8?+lL zGJP$ad+W^0Z_|%s*TK0>DtSZ1_}@KxZ4^PYY&m5s;W)@r=JlwLz}~N{@sc1~^dB0A zL{;N8>3^wyJ(1m`fhv6?z>&vh@1YO4uFx$OM`|fW*T!&>8GdUMa&W=P$rzBs_bw-R zF8w5%=P9nZEr{RbWf|Sed-G?X@UmRE9(e_@997b`y|3EM@98f-0 zL@}7Un(8Mypt#wgCCBY1+A1WAw`T}bQg98MlJI9f40wR&_Y1WXubkd6kUFcgMIY_{ z*7t+6>5ETe?De-6?`mBM3CiTIl|A}TsL8UtA^8kk?$EP>elvwsicbwAus8Se$}llZZu{fk|@`HDRGdIKz~MP@rgkKA+RyC=BKj-q#dVy)3^u+$7> z2D-H6`n#EG5FLQFkS``>Sutx+@9=~L>qlSxtCaFO(^qV7o63YtYwsLm1hnWVVXdc< zzh2?;v&`0@%e0*Twj9BgRtq*--$QTXvq`L+oQV+5ZJqRIye)2p71ZZ_1|T{y%VO4CoM>a;e^+Uj{1eW$Fl`f1!<-U zh*-1AD#3&|%CRFK)^jm42gZ+VswR2I=AvHg|HfVq8*+I1cAnVApsseAnJL#*7ZkpE z{dv=Anmpa^tMHR(S$ z^{AldjL&}v_-ke^f}U3|{CTaMIKK$7H$~-F`0!4~E#5sc@d7-5f>|S5*M?EXJ1Dmc zns>7;9RIBWh#pe70q3DEGfOPi%5Db(72W6gV>wK7$Bsvc`+(H9$c+V>Ft2EDK|um= z>PBD4LHQSzZL+ekm1?HWD+gR1P_q^T6MrKqdK>b2?FF@WN zThG&E`y55v(CEk+)r%5MP`pV8OQfwATR?cI+@Im7GT%psoyU(A(E0KSGT)t#aiP5D z1oqM|=-g8i@d>G7>h(L;esqBh54&jc2DoV|E0eym`KNJ$84F`w zM2|oxdWOi?)Vt(flIm3&#+K3MW&PKOc`8_+oj6k1Z}qGX@z|lU*Z{hb!0N3JR#M|S zRCeYN1QrWy?#I{+V|Nya2<+t3hUbl#4H~T^`66jtsV8pM6$ZE_T(Oncuo<^=sY=&r zLjKjVe(8lVQ1$m0h%1DqPmKLP6u{*MykfUGpKao?!PGXWAK2ike(%!vPYE-EVD8+4 z4MXq4+Q_{FBapv2TSf_ACI&WB!6KQ1v4FY68THm-2>6)=#ezemcmKpxm&l^FG zpt%Wce{J?h+E02L4EFYu#u^lSMP~fkW?P;Zf|SWy_X=tKxw>Kt%U7hx7mMz`u)f`> zeCa2!E2BXEV^y#93Do%fezR=;=Rz{bSCLYA3AxBfXMDn+TyNBjR1*hLJ0G9sJ#c)Ob>ySiC0bK(R z9xypv_F?zqKGACXuRXf2LJT*!ijkb#d=jl2D*61fWl@am>n&=g8^NvuWo^!MBR)+B z`V{Q1pRvK4ZMUmd5b56~2;5^fnLmRLy-2FILwh?!|K3n+()_g}owNMXD~1mP9ub&> z%5tL;{pxvJ$^^WjT}#!1FFT~jUC!ij`($fGQ$HYG6z%*2WYMxe`c7cEpPUaIGAMUX5b%HM;SC1R9MK z!?93uPsdsg@gh+nD!Sd8d4EG3gPt!M@4?FsmEvoK0VjBx>azSFc$^hHO__u9v7;`N z+JWSd7U068QWF3EU-f`%DHLw|4q}VE>K@&91x2vxY0q+@_P1gYMAJ1u$L#Li3MD7|D%7*uc{`@mDX#WjC_2C)#eZ$4QIiV8L*}_#cxq zdm;ytKeB#TKd!6V+)SKVeP?4Z|OMdmx=1nt>gaBpo{!F0fSRnp|YWI1dH$4odL0mI7uQCgBlid<8 zp@fHMo2?ZhBk(zC=O}=n+}(<0^Fx46u$gT0>N#!Z7YBUc!5aMp_Sf_V8JpM>jw(np zZ(j&JV!x9)e_a1JivIhGDJ)2`e#k|p>c~0925x4eclkH`auEOy0;vFHOgFT%25pa~ zq=Xn8AWlZu?*@H=%Ax)jjF>5=09-w*e2pggRw=Y<=7Iq(_0f-D8Z^|ALWzW^_`+f#}gJW@+^Jru_Xn7-0*tO^UAxZ z+wQ_3e+#O6|6Ics%ANRyYgdW+{3+Ta!+p1i9vvOA-m-ttBN_&0ww!%77#~hK%fs86 z(XA=W?tJi$4nJDnoB<>Ag=UI`4A@%V($W$>$R3d+p^3of{5#=iXVC%!;dXo5o2+Z8 z6t0WD3+1g1rTYxE!p{2ydZhjd@pGrUX*bhWAbD1S@1?j=<>v=N zVk9L-6{$VOc*)_{|E{+n{=j|-cr~RnFwOrf)(ilLFk>@G;)MF`yzVXC%wUGP?szh zU#Z^_$uNHycE&G+>ke~1rz<}1QkhLL#_AXDfjI8aOJiHIH7g*v$BB=^i*w6qSJpk6 zso$F1e&bhpJME2zR`2nRDF_zj<1u;Imuf>gB#-l7@{{?v_GJ-%M!_5SVnGAY#^}Lu zEwmJsLV`(9CS+%!b(s*V>|O2KeRSMALZ3KNpb)v=eR7WUcg)`l#W61$tN9SoEn;*U zRS_q%evuZ%8tlvZ(TFULzX#z+lni!=>zGvwB)}} z20K)H?p|bZAVL%jRK1vH5H3L}$XKW^_#clxO_Srx)qF}&@C)RA^?8Cd=r6Al^vt>F zET#lb-&;$#KfV*LLF9hv$As;X(FSuQq>1jaw<0J%lQ1V84?ZbIQ^e9}OD$p1C4B^3 zeni(G(?NDV2v`|DgO3%y!`MzvFI%iWIcR3WKlhGh@@F~elZ&%>E`&2~%3D!atyX+PND-to5XvD4_&=hC$edRI%8${@ouLtTK&Cwv%K5l&{0~Gs&t*7sI^yf}tS9)Jx5chd4B0W&b2RIgYQIBAy zaK`Zq;J+sDV{ePsZ0#~Hf{Feva@LuCA!AA-vpWgmFD&k6ln#2a9rR?NRZbF48aEcO z{}=(C@bjWP5m|LB2KH#vcc3N$&Qd4wdBaoXk`fkKCr&IsTE+R3b8R^-yi8>OaaqgV zblv4}&ASle z&7cPh$#-!w3FWL-Lz| zLqYlI6>s4Yu7+}eecl4*!M{+C{YT&o+?RK_OgC|JNr074B)%u z``w^byc|F%2hHD?emlYS&K^)cz++kPOm3DeO$FZ6u8m|pDy)Jbt_BZt9e@6P<|K@m z5b0QVtz)Ry)CC^SNT`4=;E5Ov7cZ;^aLNLq!z3Z$j4zK~JICLfQ`=`j`cUp!<5|PD z|KZwMnStULWUe~&TaFoC0Lxex-rGb6m^V9LC&Y9&_s{Z_B)l{5za~^a3W^|x`?=5n z1Y>-0hkpnX^lG5CCq@`d!QYPAJg|E~y7I&6;qyd5s9b^}{uD`Gp!ncQ%^G? zx%gr7Gr1>8uQ{2H|AtXD&dOZOS=j2VzhL+eL76r(EtI-Zknc@mrT6yI(b17!T48hL z@8GJjaPQ3_pR=ROFsMae0aA{cTCp*)i5}@(AlPTBf>Iyi(aI1YppWj0#^Ab$Fy`J-`yLe{%PaWPeHuMPx_ z9nGZG-xfE%g4njo#NFu1LV1sEN__e>{`&k{?PG68!-jH_H{v1A#KnuPzAqA|Gw||@ z{?en{2gAQr9y5g!fWYmn2}%kU`~#b>Jm%P1C$IEgH+c(v{ZFYIi@PwL*XMir*oMq` zNTtN(2YCW`{dPyFKK@79^luLH?<=k!AQS<>SLS)=F)jE#2m?Q-B>x{ujLbSZVC}xX z>QUg?cEJ>c$tfGf^DL+Ft*cH8iz~zLD;y@bh>N;e2?2^lfR)L)d2Xqe^!apzwKd)E zZsB@&(z{dzY-5G;3znBnmJ{D&EB%%bQUx`rWY7WU@9_kEqMDQ2RxzeUoEGNCf;Qf9 zuOrsAl@+4HU6fSnZ^M7s7m;uQ$R2^4QyPz4%5rDEI)%%PXnOrFXS@NOpH+fB6v!s+ zqeERsxaO$wn_mXtG7!Q9rs1Q~fQ8kU6n>(%`#m4CD0&$2DQZ)e;4lXypwcCw*RCiS z5V4LJ;8nTM{N46Rs*hyfzj4K%m+$k~N?t?Wp6d`#PgTJ#r(-&%-8rndn&duKAH##> zHcmo<&(nd-Up^y4lfzS9)GV1p44YYQD-4M)y`Gm_#Ws+HBNm;cujK<2CN2_$6A{yA zVNdpE)BoF{4)YJugS1R*EUA#dX{YvFUA2Ow=do)=P zZ>^DQjYUROHWCGFetM218f-5ykq(=^P9$Y7=TsI;VUiB=rwi!hD0}KZ4gPWZykf5F zApJ8#lM{*Y+Qy*iRCx+dn(WZuDyp-CQOJM^aVG4^DI~yW`-nyhj&gKh*5u5W?>Y!8 zkiBtK))h3k4G*A7TJCgQSCV`z4Fk-BU@8o{rKgb!!l0dU3Bv&K9D71SKn7!pAyqad zpFKMAF~j@Ipe|IZAEdl@Uu^wW4Twg&uk=!f@JZD`4)Ho)c?z`MVe|&L*Trem(#bMc z5T!8n_k3`Jn6-0%TlJDi@yZLuw%LR*C)CDGQ|7~{(QacIy&@BNN$5>NTmmmBPDb-o zvj%lRrTvRvzTT{%C;Xl*u5rY$fl(I<$4Y9}LrWnwtIeCE|MlVI;jPsC-%bWB6iT5q zkk*QC(g>}%y^V)gS}=b%0%GkWuMoEQKaJ5CQh(KI=KQ7Q-p8vlH&AVzWb(cgC9<_7 z==O=blmgXP&NLDV8;a`KVlsEk0b0w{vRpN!-tW%mO5#>z*Ugu&8R5EZ|Ld4VL|^+y zqc-hcYT-62)Ju;Giu#v+UlS??BWB)FYDa>MPx$OjpW1~!Jw1f~WS5*5kDd?<{}pV` zjPzU>=#m1d~Ra^+r^XKL}il(TV0kzLdZd?e4F+$Be%@**S`N~k$e z4GZYB%)WL|`EU_f^WCBY1Nx)e&uLf(BB6ENsbw~%bnRR=krn(lT0)r!LazWZaiq`n z)!I$S{ejh4>iNS4mLR`Q;S%XSW!2e9KL{0Pi~*dxn|lY%*jx##?jCrr8Pfi+B+))M ze%Ry_;y4`X6OPe(gfgX@^hMz;`dmu zvO$jcwaPH|=PuZs48)XOBToc+IkppNHxSTp|8hV=dUbnqGZq}&P=by19xtO}{#iJ= zMWa!aQX)U`M6SD8DG3;EG9}JP`>EU9Zx~Flp@oH8tfB@@0)InXRgy!Lu_|$&BIA?w z9rc2@TA{gXX5wOxT;n8en;bWuq5|v`U_k+M35g*Ytc(ZsnJT|< z>#m{txm-wAmPB|>EZH*bxpA8h4@AB)lu$?jnv1$-rT;8j|MR_>jNsaml~4{7kpJbY zIjN-v^3T?{bmsg|>kP0k>0OfSgNMKoYl;VCfw8Qp2vTEDro==INX$NvxFBg?hK1IW z8QY|j)j)&Hi_R*j#RG4N0V(&g>rgT0W4*gv{^pA9r#>V7h{JXFY>~#~*#Q0@F73?2 z(-FW$y#PTzyIwQYm=8DDfWv0gqI0bu;JJD0)Dy9W=64a~f=w9A_?MF|_F8c}?p@3~ z6BuKtLEtcG#a3#nBxp9TWJyCD{Ru?dYxS#yk274jh3LD55~FoUp1FZ$ zWjMS&ED@1Wr}vMDKJsE2QQAt0Pze;AnUgu{mU7LO(%~>S<4^OIlR#jUO1i! z$+Y0Vu3e*n9<;T!Gwy4@r}Bx}gp?5XIXtFId(VrPPvlbT%rwraCg#C((gJiufbQT% z!kQP1ec?e6tSLP7FxI32vqMf0xIB)U30MOUolmOJ-N{Y+NyXpuMgc!r96d+p-7V-& z>W_LKc>=Bid8ey0iq0UPJv6$3YwW(4A{MO`xW22+y$lRs-N;}d$d8eDnc>8<%LhE+ zOY{qq4xCK8-H>kX&gNd8mLbSsK{aP;WV1)>#@b3g`!XP$4pX)Ch+^V4uk8Ol>?niY z$#_sp_#I2rd{dEuAj1i9Iz4j9xvJwpRoKu&N(P#mh*1`GSQ^he?;?)MV&Lxge$r!QhV}Y4 z%T&SBHgwjjcKSH1cd{GA&?*FUk}r!R(;oWYDr#G+o6N z-|Es6w}tsyby2%6-9QHvjV)L=b5%F?!#R7V^W%YUn_j>jc z)p-#V+)CoWK0;b-lmvK}N=Z`H*yY>b)^J|wDs!+R1KC(|$rOb>k;gW!S!BIrZq?P* zX_O|e1tlfY``=gMd&V0GvBq2JJ%yRozpDbfbS|k@DV>qSes}4*TZWzz4daaUTaqWD zCnHT3b4HI+`ykX34yu~rU_%5R?t>-m^$7=?gLME!EoBl-s0e5=&V$__QeY5W5+N5= zLQh2wAU7P4`pa9*{P%K24%oj}(H907i=&;N_okrd_*_J()T>9#G2CsaqNn}&j4xH} zCHrm1L!D{ypF~l{K=@7gxioXL|JjZDAIU@B;1U4pN$ttu_9SF9^8~{Qu2!a?RVYFZ zS5i9*KzHvZhrWm{EcVrmvirb_x1K0A=z2!;B63hxrWM%deEOq)jxxTM{To+6mhhi7ovJL#@pLaov6cG!k% zTe%2x=PyIa!3 zt_6Ek-Sp=fV>|k@79ycR^pGfm&9#PlQOeS^GZu!lvV=N^MrL^EMH+GPgy62^wJN%Q zKS>o976JNU<9B=HA;tX?IFxWFsd#b{cJu_z=q@QobX1BQSFM2sstc~TbsDu5Yym9N zUcK`wSw1^y_`hv&22}r;f54jtx@hKA|NqP%QitK2my3s%o|~c(5&LLy2v`2hMeu9k zO-o2l=~TgVzdcmVe%QGc@w50mLh9Hu-g)ye)2c`?fC)(s**$w&kcsxO{ef~WzuAV+ z=W^oX0lNvAES+$X`BP{c>8a3eAZb;gQdQu)9Cs)`aI)5M>ahCj_L@%n!`kzHp`g^< z)hbDQ^7A^J06e5U?a6xpAyMN_L+H3))J}}_4!H?qJ0YWb**<)r_|$Gwb{l?avsoni ztY83Dn33?4Zc56^(b4%-nY*V(^M}kALR@+JVn+Cni`k-qO87(cJGwh|>~?6w;FZkG zOhUH2EFNkG-l>PLAQjJ1lP>c$~v5ceWL) zgu(sE!!&#J)j!d~UG_t&g$2}ZQ2VC$XaTLAlqgk?_&g#hih zb=xf$FV41(%`g;Q1zf;eRVh^{b?cY?l?(MCgnn4yXv1yq74eY~F)6Q9S(C4{ELAmW z%<|B0DZDHfC8_i=YKd(}O!co|J$bbJRG+nhU(EIVl=sVJ`AOodk?(HlMz+uHvfGsJ z;;Y$*SFfo@x0b%XNSu~zpBqg#SJz)o+e3+R9cy+Sx2s<4iG`2JJ!}ZAk0&kLkb$SO zzNNF46srliXV}Fj4qmk3{1ijdIR&Mbx%j1#c>+q|&y%sTumxy+>2_^+Ccl`5{Y0W zVSmZ0ZazH>`qLd)aCe&p<<`Jh(HW=vA2`E6^ey#5mptwN-_Z?7>J>xU+4u$P=b%YW z9t>E*-^i5-{yVOH3@uv_Y}G%M@^u9OjvsSfg8%{#vaKEadRq*uasE6KLs?W9eS}t? zpa{`zmlF4|%0n${+DmYsE z{gz09s%=#E-DRqZ93Y|!%KImr^n3b?>_zp*^26IWq@yIMRdI}}$SUM`&0oPTEwW`x zHlWkzdkA&a$JJVBeVUUr_teJP`iPm4@o$@6WnjJFl6g3YV)D6MNKcLc6Sv4-{Tq8Z zW|&wJP|*rqxiD45Dh9tCb`fuGx;E$}wn6jM3_=HrVMK4g7-DGZ4I9aR%Fz<0(&H&P zRw(B@?E=Um3+qUv8N&Q<80D&erNQqZYr8UAhLrXMj_y^s2A|RF``?aGZ;A4H&>N#p*q}}Of6pA= z;um=ilv+3)2kw?V2NX)nLHdtBlw+7UVMa9C`ouY~0dJicOQZHI^&*pKkr;TSTP8=8`4!Q+AQHJE1gr5k4613DveC$=(U53YP;USz zF7wWzq`6V$m~GTuZAgTM2?RrKZ9|IuH=tvlLHKc)B;;ZRon-k9TYOUt+8ew$;9*bq z|1Ej`{e(K%Kg5{h0AkV!Uw!)2to|WfoUwvS)kV&=U+L>E z8Lf@3#s43^zB;JwZtJ>)KogumDW%2TwODa?EACR<-6goRKyiw@yF10*-HK~*Ytb*g z&wJ&)^FH4U!~7EXBXe@jK6|gd*4nBmC)J3NOvc8j5v7IO*(?DFho8q83b9pWCU%Gi zjHD7C@wRskUr!POI@$C@ff7F%`wyruoy3Cc?NpU(@}!2tk709+kmla5NOC^O{c1$AO~!u{1Zq|^9C-{?!EbNZS}*XHlS08+=pYC@#*eP&j=N68tEko_nU z$PNzY;0~U;A`Yy=W-!pNlVJ!j1~bK*RYto-OK7$rB~&Uj#F*iqO~R{IIu#gb96Pgr zee59{4~IPW_p9D#HZHo#sLHOZN2?q}u~xHn17 z<2|T%E0&T2r<3ru&eIcOY@uNJ(rFUVNQ>s`ci>o(K!nS3#gZYRW#-DA{Gb>T%FO=9_rxQVIlS(o%FaHN+ZcxMuJlLz@Isf?Z1`kztcSrMtDNHQ#Y9#% zTVOdVt)FC5=ygZk#J&zuFTI^F=4~hB#n5}E&CW)&VAPx)0q_gsbSWGJ=jFnSVicT5 z38bgZ+Ujq`hXb182t6^t`UnQacvS==hse~v2n>V7l@drPi2TseGZfA!-RUKBCLL&v z!Z&~ir+O%=>fJ9a?UfstR%nTouad4W{7-VTC7T&!-AO<~EMm(WC{zuhnoKE0*fUskw>tg#(+LAn>c#1w zw(c&F48!6E6Rut;gH~t0tOpv@V_S8)a!T3PNMr{JD>#P~Mr%zY)9jMc6&~%W9=H{{L0`rN-dUR$cB z1&V7~|I@Qc3OHXd|IT!CUAkPML>+v1nN!cV1M#oocg0fc%*Jm%kZDf8ig@d;#1Ln7Nb#s zM(w);IbE5X&eZp{?CQ$3nxAkM@gAC3-R`o4j6{bKv}gq{FTW)aDVV44mB|&5s&yUa zu$IEtCsn{CC{n*RA08g)cUN_EbXI?zxOTJ*wx+?*@itqpt+$ZwKS<FyldONM|0rDB`l3qGw=mf%= z^bwJvPQvb!=-wVL!T_|1-<=JNLR95tZ1Gy8)v-mr*36mdk7ij#DcMeS!${2e=jClL z?MDA+r|Db$sa5L>`VjuER!zbH;ED5?5i7uld^ z|Kcg9gemv_07-$6*b9szh99k!TI1 zie88(vitI&q+K3a-u`apjJCx0nmqH3DHfuChd7$?4f`(s&mUNe z2(@6&UjUpNEUJX1v9VVg7f4o*@hcTW-p(=hqmW+Ca1El3U+6+{MB1pA5U^HF5|nG#zV=uM&S&t>xqu8Jbzn}RuQ7reK?|!tcOtBSJ0!k3cUTgWq?)#o1EGngCMM;?xUJ`~(PyXZ!|eT)nhY^u z8FHu`!Ci#t!myS>ijRE?pKP{(PItUZq7U_r;z!94srj_l>WlI*bY-Ufr8p{#;c(gJ zD=T|el5eU)DaCw=;ZV1)fwdc)#mF^-?U$N_CbX=YZOCDnM*Afp2CWkM+e+`1>&Qo zv1U3SVN^Swgic+KpYKh(*oVHz0^g;GVkU&vSR{{3cSx~{vKjsTq-^a`n%_;>--Wa*zdH|6yVRT-rSG(!&SHDcBk-UmzeOi)s= zzLWg1&C9eUMA(c9-s4Sy`Kg#_VN_+IcEzjCsS8Pzr0s}8p>3RBj#-QDU}7SMfcuD^ zCbL(SN@&Fcp?g&8?r0DIS_E9kkw2)i_X;7QC7rTvfM+Rg>RW$sIdc(;Kc#3v0G;1^Z+qDV>vK8VX(Bx`~fKxuvPZ)w=%wndT zS7q=+ODh*d6{ktFc64t-SjmZ?0x%jP{v4Rwu z99`w^WEdYiGH_p<{sEBo)&g&flj z!E3CZuk_8&?kA)XJV^|hw>IQ%pM9N3$C>}tDT3Be>I<$fX22)@Uj(H;$((*Qc*Y-w z6z@uec{=@Z}2;}C9ycQ0HPdD(~G6} zLxxe7P8C0phs|kTZ>Th76W9%jK#M&~JEp#)zZIwy@JTNCB}xYzyP-j@HTAska{BjC z9-VSZ}=TFC$XfJ+PqjL|Z@;8X8kks+1B`;(~m zCy`()^Ye{6gt5$4?9sK%TCv-Eu<1fI4*4XhB&Gr249#hPIc%CVw=?5KN9Vb4?>1 zD34XA)7D;Lsh?`zh#&sx{m*Up#t4E_?sUs}jB2ZFL&3n{OazU?Dst0Oy$pr0hbcpX zjSMNZa+sfc8GX)8w{2PU<^X?c=`lZn52#!u9AvXQ>}m5ju))fL&-0o5!^M2ZC`c}S zA*H8q*uvxa=f6X2jsbUPDkdZ6tS?7h|GoF1Riv;(ypRTYJ@7YBynYvG0Fo&p4&h!9 zT0fUB2kJopswaFU%5$q^tqzIOKul=>u*2 z!d{UvAvKvVn)tmRvteIkkScR7{_T2chd<~HDFKeLKWUIf-_)CuvfPr-haO9vk%a37EKUr`=@}O!UC9++$5aqeX@uo_)fW0%PHvqCe z$f9@5qV7Lhc)_)kZRVzH{1r($n4D}uwHfk$uV%m?eOQASp#BTj5#olAYUgQU71e;m^j?Hn>-YEz5Ty!6pUDZyifZf_@jS@*M}9ao8V2ao zUzPOwNQGQ>4-+<&;vgPsR_H+PzrE~}3@Ndl6er7aytWvat_W1E$owH|=Nq=u|1Q}i z*sFG1rjXQKDHK(PCOZ37wO1YlxJl9I@ai)zlQ`u^HX1>q56cZ`zjE>A&m97NVSwoV zP0CL?cb(T9624OwbvA9VI5+R&;&M~2+je+l9KIY@;&bK7Ufhqv(q)SIZ;;st}Dbb zVCW`@3Ffo%ua*3-2T~Fu#Q+gOJo&iCcufp5!M9B57@xj~X0wLD>5)l&QqgksOtn;1 zb>xHt)V(!L!ve#TFQn#Xau7tyjZF}@M_i(&h)|eJNBfPKeh=e` z-U#)KRC(@%5=4Hk<~M$P3PQH9X_aUvxe#mgMS@oES>l`8YdY zBl>sVi`-s{!(Ds9vVQ2QE|n<@-H&)hj7DnW_aPIH<ag?%p7Q*@ z`cOFnfA)rKr7Wj>k()pD5Kk>mUBy~?{!#WV*WUJZiHG z?_2#3*ekl5y97 zbMoxc@>Pnv777eGYBT+o`XtKv(QpUCLU((wRHqpKcK{kb%ji(a>=$8hy^?_MQDHVU z0-ON~yEO(JN5od&5h$wLz9)0SG z*BpuFfS)=fhp0Wa4sTZSZR1|>t({Yz=RdQV<>d81sW43U=r;ke!)E=7K98=#w>h`M zx9C5R+-K`}Vd|^$f9@_A5}NMEFZ4OR=oThX|4#tmKc2-gqY;?F!DL8rJFhfQV&Aib zbc12W9F|Tn3sabY=3{jC5(LkEvfid7lSfoi*!4$J{c^QSI z8nEPT;xNR90U2ssyX5wAM~~hlO}sXXZ&&fXekupnlp!~9ha63Of~HI&9ZRyMC7r&T;~7#tlB(Nrp? zO{q9t({OC-5wB6`A|2EnN1xcIqzkWyBmaEpbJ_2>zlIiG#|>?F|MOHfVifAS^yjfL zg#3Fc|L1T2EYW1x;ZQN8uwgxfYLa|dKB4{9;|v4z(jTY-%aR$$$t9+1J1hYkae(f% zdz%zJii%4j`%DoPv=FRl6#<=PNaavha{2jC7vxRIG`XL8PE@LKM6g#%z`q%ZE@sRAoj@nK!Dp1Dcc%9dlKmFeeR6}#x(PAHYa z7EIx~UqruksePlRUvX&(t{mA^(Ixt4P*U*45L{UjH4j zLca@aMx_D+@`6v$v)d;Z5AiY3z>ufGr6p0rL@H^hw21FKu!|ZY4PK)P+cV4wR^l$} z|4ED`ChF*k*JU@IH}SJ~H84PdHK4S%Xbvs)PqN+gBJWQZ;t9w82;q%`bm(hi~U!p^u8qTh4lwpk$ zxLSI>7Q`xA!gk%?o*Prb`UPEl6_=VFig79R{>Gg!>xCLB8JvSIUB8JP6HJ$-FO`=s zJFtAOw%RSQTQe%7I64TcXOo$hk8SBGMhzMI2gr5za^C!KpxEZx3)HcC+Byw6e%?6> z)e@ig=E?e``_JomOR$r0tZ$2_5&Q2B`?TxOc}3{8%%Q^Hfkz3F zjehxCPzIM%}s?ZgTCyL@t(%SwGI52DSaz)B(Khz#lUp{J#E_ymB1m5ZTx^f@b%= zRZ@Ead1ok3VU4^kbacd$pKvGlU(2){tj%O@ z3s>IpH_@fj95Uf%NRK|3`9U}K~ShbrN}KYe_p`#STRGa$BKbHk#nl4=H7lpPdP9E z=C*u%G+#0NSl;Z-MIN_DI zG#4Em-zDASbq&5UypSc@dof9fuy2RFn1^~gW$cu+-1qF9=-liC$GdxLHsAK9C!qFr z?I*!F@`#>Ipe{F|$Fbagow{ivA@vCy3ro|nvBk!?)7FB)UU=_WQ zw}&?8-xLW@Fj*#CuC*bI88#tx$>zt&gEE@ijR6OKL5`$N9-dr^5T3js8Mp(dPLwL3 zkb-?5fCGR5o^}h>`V?^MIxw$iDRPWjV^*S6(S04{$1$M=i~J3CLns1wxTktc*RDLd zLs(mRAQTuDxsBI2*%F$vF0F`>q%S6s_1Q2xDPsDG6%PP`$N5D$>X^zK!?OBmJTcT4 zpE>i6FbrV(yfXf{Vw%I1eam74%nr8Q2k&R4H1+uHm&Z7`=(l}z=kb?%7wX3m)+JCD zK*e+-A)D_LHziuWL^EsTFY+hjdm_aFQ+%I7+dZ8CV+sqY;RitjbQGc&03|v-A)h%P z72xXZSh{Y1p!6==hK%*|-~MJvYvG!gXwlM0ZutJDtf=_po`)i5NL~#&&6Q+T8Q;m# z4UWRQxr*;Vmdp(KTBv|hdi`)V`l7OK`bPCvg{+S*|9q7NAY1IF_c!?z?0APnDN1#^z0n7C6EwBVxwM3kA z^Xo8&{^Cl>xb<*m-ViJzqKyp1#@ z>+kvOvtm@QCL=Fek)(lNN63BP09er9wS_K5#7q8s+4yWg0bgGmBzB+!4RrBOrOG!*>{>+n~;A4yk> z6p_MPK>!p%uz3sGDLsJORfJ1q4V2O{#39YhQjkT88LQnsRKQT+X6Ra)mb79ZFXKiC zuh);Q2*sPpL_~TYPgfCg6b^p03>C}G2Mh;Lo>ERE-K=L-em?^?Z1 zp~E6VZbEs6_k|jPe&>yqQuhiU6J8t}c~s=>G-b9AjbF>_|6Yk;4EkM4$go{@hPSsd zT>Im>dJ@Ia;Xr#=F|+B1CYKyAe|zrMt_vH z{%u*GOUI8zbNWwOqrGWmY4hDi}w)H(>7$8`u#|C#v9`N7WTf7@C88(D&( zn$UiX38E~-bTaH1Dg}4+cYqCC!7jEixDJ6hCXt+HrGo`$CJZ^M0p6NYmbvn8^oU+O zW~fNg7*x0U&3)wRDq5?ANff7c;Xt3gdEsXo96Wxv#4#g=728_FjuNK66^1$8$SHCc;-f2< zRPPHvw_9}B*B$y{UEPkID0$kun^w?8uzP=^+@pRaZLXrHp;L^dyj)O-1QRAz;IipY z>j&GALgICLE><*89VQ%O)8XHV$@sOC{_Z;s>>IW;$Zx=4aQJ=ngkj{!J~DbyL)_y> zd>R~l_a+TjG_4$}fN{<>eC_yx4sfA14*0O`I&ey3+@hn)u*tKt)tb8k7PW2oYLYsi z>Ku_~$o%s{pf#en^78lxavEQG@Qz7i&o3%|4k{|GES{$bM@>n^8=*;s_E+J~&8cEi ziuJ@EskyqrvFRfY<@^mecCKG2|d^j*YdEZyjK3MVdQhCi#fQW}!;-SD&(_cVz z2Y)Q)g=$5)s)yh4^Z7IC+faD+NyI^<30*?ZNOh!lq1t3jur$A!wF!01LwcK>Zz_*y zTEmH>RO?u(sJjp(fx=mdNOdmuE!SLgJ4m}{`27Rk+@@+u@5&TmyW-l@>@Cyjt{nsT zYR_&t41?m(Q9*9-RVCfC@goAzr}+j^h(GKxTSY(v5EtJm7>6M59+wc!6pmPj4hA_7 zlaQ=@N!(wzZ8Z@MN7kK(jk;}t+)7To7WFAwd6_dKY4_$cQ>6?U3fY%Acyhc?$ro=B z#$9MPh~la0`0jDvNr@MsVL8U0?R9k39k$>f77%Lx+@NR(2`#2pR6%*QUvz{<&QdbY zCCeF3hM6$lWgOOS8Nb8GHZ+&_g#qrDi2OKxlrQy(+hOFm{5(BhLpVt@F=Ui4E z?Vl%Z0i*U5?2@n*EoE?Dts}I_DGel6;N74|<*+$B4-utT)z)7{i*PoOMx{&a0!714-mXR2|{z3Ta1d2)gA^UV)mH#(vu>Nd%Dr-bGICt5Kg_*43>8xTq zy(}|6f{#{2Anhjz1)#odXCZonBl66|bJ@+0f{f^f+7&3Pk!#)g;^-+B!GE5J7%VJM zoteSv2bT;n9!zjXU5>Fj(PCP6x9xL{czfG(X zPrXdVqRhT%{q6m)8);lXV3R*AaGO`g1RW~i)<-6|gdl^2AM`1f>5M!7lFCR>2svTb?`Y?i6M30|Fs6c42ahYDCi80EBPq%l?BJm?v9`e2Q#8IkLR zw!%}8rw^PVC6Oo$ZR0C!_!|isV2FweD<_dA#J?l}Oiv831ggV$aV@c-EOJD(NupdX zhyA`!-M#2O>I|)FO=ceqfsumth`x6iZ4a^z9cd_%w!xlga16IP;T8%)D{N{>`a#GE zoEUKWUO&|U0yA!%#dM1|YehQ+c~t0|cCz<)u zqcEcRe&iOJ{skm)l7EnNRb7z;Br{c0TX4BL);G6wIP-Y5r`jubiYVtPxE?}$oNGWL z`K@BT?SjSiv}C}~6ia#Q!f-Lj=?Vijml5p?D-uFonDv8vz)_I(fN2)llR!EJcNaNsWm0&}*NB^?E6s01I?A$Q6boiIr zTw&^SmckB%9cU%!h|6VDL_Nbi*vf{%N*|{keqit);MICBct3)L@^5m>Ketx^DXdlb zk~h?R6-`*}Wnz)A19_3-cA4Bq)P>bfi>f79WB(@jg*{%5DMJ{eUN3gOhQEMs21=l~ zlJ}2d-Z>!w**xqjiEB)CMo)WlHY?dOEHn&8=vCt|v1GCdSa;x?Bv<;NmIjJ64YN|Y zT^`$SDr}|eIhDt1Y5S{+j7%Y^A~GP9s;i5QAam>nuMK%8tR!XrdDnOTuB=E;SI6w} zW^ScLG4$&QF{Fu01svBdx+nfa^Rn#K>$SCvp|%U1G3i2!yr*2vg@AF_HSb^^UtibP zch`EpcU+jAJ!x#K%qnl4YGzvcR(E&Ln&y3sTR~cGTT;_>_{bVuZ--6}6kOys$)hn= zFGdz)i5g(Ox5D~ZsAwMeV^kZn>)wr*_)XJGX^vWDWeq9>z>??@v{C-ca;5VF@|Lb&gW?bv09BS7u-ec~Uu7y_<+Yz5aDCi!e!uP?EVnFQI^ zYCu=ILARoZ0_mS}ZioECLkR%(i)ss$3&-9G=W@Gb@V>RVon%JJ;W zk1T!#-~C>Wre4R3j}F0C`CwEB+itx7)~}hIU}>yaUI3nMux!AqjHH@#C|OJ&0%P&~zcbC2OXJ zf{(DB(EsiV`ru&x*FJ-8iFN9&ziYsAb^kYm^q=|EU!hSK26drHF_GC?USLo$VqO3- z`>m3=dD8`W?OgqJu_F@e4p zD2)>ws6Z_gP@sYIYajOQlIOr?8kmWa>oCa&!6f~@eM$;UvMUM?Ny=59 z-&i&sPQuPXR!{txI79An=E!!MPwBvH*+H*-8h53FpK!d_Ct$f5i79NVAd9+4i%^`P ze*R0~MaHu?!4>N)pNIPFo>{eQ%m(M|+ZRQwn#Zs8&_Yd0oXe^#iYZfe)l`!r)sE_) z81o-W1v=<(ll6VViXNzA*z=Q$IqsS;`kD)dJx3yG{EQpl_Q@;Xn{^Dzz4wj%4R*5j zeM3GN##-8f*Y&7I@Y7W)hG}ZFSK*0d`m{q9_vBgFaw1Q=sJ`uz`DpVS)!e)`m)fRq z#PHJ!W5}-j*u(DPr-Qw*S6@7*12?SMJV~>XWv3m-0<*v1GVQIs#Rfq5I9Fk!URHe` z^4Ad>lLs$#zp1MoW85yClM=8uek?OHO0#iz6c;2rg+dT0cuuer)bp8oi+EmbHZcuT zoa2I`QRwZPgUzCU$1wy|c3sdFo^?YoZ99Tz7FU@+cDF(7l_;sZkVG5d)2G+XnfN0V zkkG9kTWdBkUfavlWHR!^N;rg52Cs>~wxbLw8N5M6l4{%gaQq4m+{zeKm5VtxaiY6} z*d~I^gK&?_!Wj;iF-K9d?Aasl7?DOG=J)!lMW+)uF& z-4q&B?-#V$WG5Z-PHjm{S?}fH0O-%wP)hR{h_H;(uPQM`KMi76G9wsN~Fnwt_?` zE-7RvsXcN=6^6Ekn_N+0MQv3tHV|TjSTnlU`pWg|h@z{c3dBMC>0)@;%d7bLQ)@6% zyyuGf0LbggK!QPj;~MEJBK$+cbT#vv@N*N@R5Rl?4Ip&djXw4x_j~7JRXm!gEJ8DS zDec&t%0l7qBop6grpAs$b7F6*zc?COKB56iMO`#xFDJgbG7x~Sp1rcWX{K7~JUK&@ zW(;VeE_>a9xSV*oR94+^28+T@^PicCXi?RNZc>fN#{18;B=1A-B?(n+r(5-V3aj5q zC-pbVF4vU_Ao>%L?Q2d2A9(e|(hXWJ#t3tbCD*KMfkoTj$Hz9Yn7-A`ANqX2Ik!W@ zO}SRS`JPPbr(HWZ(5E?HemwEF_%7O8|hG+;hjr|rEfRnusQexGGX&C&Su`9hpvt{uFy zJsx~PA=M>4FWeNG)A79biRuS$h%1ev9MhGDnlG+qMaMIt>{ISNrMwbjB1~G)uFbB? zr9qZv5+L-Rb>@q;j&k~oSJiJ%Jw^NCvZQCztNo|Q32KwA7Y;Zkq+NG0(ppcOm+!fYWq-#ZP!qR1C^r@sq%B+H^+{ErS%#1|+f{Gy*9DjG0XHs0f`ReZ-)B;v=Q=`Pfl z!oV%!3oAX8y*2jD;G7ii5ZiQkZlCpW4!71tezdmt#WDJ@ub1fU6nKJpyterZyn7(( zUcG*mU}@1#`?B#}0bzhM>uz1-@hso$y-|oMu8zmm6gAf^&N2sUkN>u9#3P4$LJ##G zaXr^^68l1A_ta<^kCnr?&7`&UwM`&6r6ppFsyrtgX$EPH*)YBEMetN;ECml9NW-oF9V+@ zhmNI?5=*3D4KpEVNjkA!9hVapTTf#p)>uJ0RtSHp^fM&xAI8&%%|17$HYuB6Kn6KrW%o8%g&X&YfKH#mbkLp0WD*4#Fvy>+Wka|JU5Z6d2Uh| zv0KHWw7=@Vd%hwJTAS<0toFfMYl$>m*0C&8Q`7Xlz2w zfYb7cz?ihD_)z<}MRfyI_zo>5nFf5%m+STWY?DzgUDZWh;>EC$;6`7an*^41x$s+Eky4Kcb}muo?Fa|n;C5Yqw8)e&%e;1myhoc^<%W~#+ntwn*szLfF{_#F zomaht!jiG>rV{vhj*Bn&t;yRq#^JzsCii%&GO2soNAGIHQpl9;)hUC|@gDD%J|`?2 zJg%a5?XE@%--cLZIG!KQzURt{{RdXxIY|!VCQX)UC64^fg`ocnOyv~^K%|k%w8C+O zZSIRsV%%ov$~N(#&ag8%?! zDm@r-J^ysm5mLs3E{&2p&M`FSG?l_B+c+gBHZVo^H9epl0dpc?yL=li;xQwl_IId7 zn3Oa0w8e=6{DzqLV_1BF!XdR)Q4eN(KPefGq^7EWgp>)UvsR3;T{zWt1SAbkF}TM) zl{EaAY-nGP#iF@Z)}N;=b^{E@5vEn}MZUM^T_!iQ zVjC1@)|7J*`#SOE7LTLPlk_-3vc}UsTX7^^@}jSoZIFRPx+h1CJL0s>B)pcHTrOWu zfM~?0J6t3Db*>zNrW)h%cx}6nw~G$Xoc0w7PH)j6*4?9!#=0n*2b&d~Xa$LP9(6xi zAP$345LVwuegBUYz@R;S(8e7kM1QGU_{|zsPKdzLGVoX=OXGTsTru`Zylgk;3QchG zhAg6L<*}<!j4ttRt2+HZBiCs`$$vq_@utYCkNk-#*3_LC+2H*uq@^#%n{2@ zB(9&`+{hC0z}ztg&e@OhDxH2TwH(?w`uR~7GuOczd)NS>qADWxqk+8zZE5RGjlr>X z7lhFVV~Mc+^|Q^?@3g;ilt?BR${l65(`@!HOyR#CgyjWAgK zm3jZp{ie^x1;nIh8zIEn?YQjV*yf2Pg@N@sr=_%N2snDo>qhV%qn0sSdL63?ryF&S@`$#-DN?B)>!y8lV5S$B_vWtJ ztt)Sq8;OmK_+8SVHeYzD6rW&p*lPaF&Ht|G+9LU9$O#5Q?hxr$z1D<_BFic?x}T#i z?52SVa|GOKB&&G+7KB5X4fA*$JG9|MgRwWW@Tz;HB6+QERp*U$pE>iJ>&su?%Aczy zL5AFNt!hmpR8I}p_EY80K%C!jK2A&G@+R-T5e#};aMtX~A{nDfk{CimwO1Y!u1M)H zRun=?&GHlBKs^EV!ZOR}&r6~WbY`;-T5TR+&F(J86-P#y0;b@~>4xe!o{V#}?R90Xd z^8bT95`_fYNzcf>dj2Hy~rzydPVKE6ZZiGU}0=nV^d$E7`$?HGim@m0bF-3Vpct_*30_Sg_O zkkN<=83Q=AZK;yn_e(FKeckx%O{Ariqbj8&>(_->qimyEqa>Ca1q{|oj7U1+a?~Eb zjv}ZKqwqKpR0=)fK#{S%X;)Bn65Y@uCFe7oBLMoXk~M?yGu>Xir0?nX=)jY`s!sRW4%7J=E&}65j(|liaN2s z3#(%91<2_OXvmP*c1qCfi?6TIM*1CTr|t7DR^&%Shz+%sn>NSjROHU@UalWUNus-3Q1{er{bIDLQv#+1uJ%qFb=5R)i=!K}7SkE!q0HP0(=;8^&1i6Snm;dn z6;DHdA5ctE@~%c3nK3#de*?Kev&jAABPzpJ zzuOp+CI$aW@iyAL0s`)QK2iu~2F6zyLO??wQ-ZtMnV8GtFh@9#B?KPi3^#A_f3p1J zALQgIGV4`BP!cNDkT6!KZxp~V?4#r9DC6W$6pxcWC&o=?(-_SYVq6Rn$V4vk&do zWzb)l+_S-d;NiUOrY#{4^c%^1HH&hB4|N$xcRQk zu{G6!cUi@EPriRo{WYf6(~x?SOp3wR-I`#k5@F71#o|~Y{^h2~Ze57MpcX#$aLZ)C zD%Z(0$9evON$g4B*bdftoAPOi+_=R0Sn>oO?Rpq4MwHuY1NUHe_gb4ic21c`@n;{m z{d!z=+$8V5DYX_w5#F=1qe+PrftWRJ?tIMYMk|D;K3b=#fK+>0T<8prXAR{9=OQ%Aipz_I@5K9LTkiJ>pKAkLR;>HuSx(@zPkoH4i{owwr1Rb%z zJKzT7nN0H13mi#3eis;SNG*X}$B}+rFFPjrx7nsM^!fYPF625tp!3qIs*%>@c4M8F#`suWxh($%@L}rm8_$)X5LfT`YuxZ; z2W1^TOe0M3$Y8{cfSp;T%eHr_Zm1(ESj4t;6Nax3On#;LPw@0OyA5+Mf>Yd7b@J}W z%vz#!Jz^#rv1Al^l+1?gSKiN5MQoO}|LD1XazgAbp(fS^AAO78@cDq=u;;^3I`$}p zjeuqw)RCHcVDn2g%8n6z5&XFSl9}iR-=%p#Zu+Xp{JFj)sUwx=^hgCkSV4sR=9LVg zzw&A3@lxWKs>cId$Zc*HB4gDFqA!BZ=%rmwT99#ix`!M0Ujn&z@PE{r0a7lQe|LI= z6Z{ZAZQJi4QlO(b+dzJH*4R;i$eorU3pz&xY^pdm#K2@#Q&iP{$P}$q?%y6j| z4)E3rg8^4w7d@ur0xlx)J!KF)PTR;NJ#jgWXXt42){IJGO4*k+!L|QK*jq+L+4g(G zG7K>EFd!-2NH@|gA&mk`gQSGgJ#Mdv;qJRS26|0)c)VY*d3wxt-m-*Z>`6DgG2X+&Zo>B6b$P0*g=_ z4^8v(Vk(z+)ThMqNG}q#>b-Ck?V~_?hAL<9ZFHOMo2JVy19BV%J0nfHiOxxn=F5|> zBqwKf_ezGqR-iktjaXVQ^XDlfva3L$)-|<7CL3Ddi^KS=8@u{4U-k7ePSG->9hyWe zca;5=b=iSia*%CZLnIX%2Kk3T-UWj5c8a`EF-J*Z3BQ+ft3vTZ!*AbhD7mMltx>AH z!wu*RD6(nGNn)6C8oA-}Ys?LOP%#P+(%A80K66T@SixMz!YD1jGsQJDaE~P~3)hBT zLHXGJ0BT!s=-$9vb4>WREP@S0VHkLt$9uA=EV!@fP>zc^~R^8H`E%71+YuG=Dx zTru5{(am?IHfC3S`{1_}dz>K|ie!g04!G^J)4Gc7=E>g;VYZ}~2wEWs_RDzf%=!j5 zbWTKL4V^w>)6y}+8Vd+KnzuXHzd1L#&%kt(3tM=<-$#yoT1m`i6)SEJ23@}<$SNF9 zxc#n1(J+Vi6iTd}wHq^yX0O?@s>84~h${qM0b8)wb|G5`nR+FZ!9ozwxebug5!~q| zCq?|yuq;xpx1N?BYQ?|ZX)PfCB}~f~FycF5a$n z^Y<>uW619{S#h^P^3KKf?niR(n4BNqY)s;;41O@tKBtNKYJ+;(Uzd zQnMAAEom~g&7J5ZO+8KWFS;^+>>YGlHL*<}>-zCHAQ~?j4 zy?gZb#)mx4B&Vbx!9iy>&dgrNSHCu=*FWF(IV89qAN<$r{9hj;=`)Y@QODQ1;nAxM z7;y<4t|YOygs8KrPRa%`k~B8%D7dTY(*)Bz)?~P|tEuCY9cwImdF2^9%1;+;2%EU* zNF>w7bH5VCZk=J}y1zCO!)$&*$^>#K4@iBt9DvwYaZ=NmAbn?G?;uPP)l1C+g_gw6yt7K^0H$0%idZu z5MQ<-R~M0&&C!J6GAZ`_)msF%|6Ce;-S1vY5@oW)F?4#zQfEuKQt z|D_ixiZ*)dKlTLvxaN(KL$4HwtId^~lh!z}e~io*DFsI(73%cxV+}NOI{*4T9g0ZL z|Gv`FOdj^Kk1JCOZb~*gNtJ?+lgm1?UwpUJxO13MTl=&AuR<80NFX5SN$T0O(YpK+ zQxh$@;1^6WlQ6q++TwA8fLwQkE!{n8NtNH|#}eS7!p@yl7Cs1{#8#6=dNh4OR6Iws zn_Kit$Ig!Y&$6L#KS~vEXKL}xhH%EY{0?bOSG4g{ zebnjv3O^m^kl<)}#c)kjx<~Gd*WTL2sp(2$ZKAH=IQA7}I@Qb$xFo|W#T%ta+NfN) zE`p1nCvA@^<2Fl!+gYXKyR0|mc}IbM6i{i?`*Iqv*@%K}0ei7jsH|c&X~hIJ)vX3e zoiGjZvu-5{WFb-=e~8VGAr4VoPOvRntU^FpM%63D$P#GT*R!ggt=fvh&tL?rz}c9u zKF0ev&@2Nond6Ec2+n8dTPdLk9NZv{(_GB*m32{1>{XzFyJ~&-wPMfFCmqT1oz#6l>C_OPa*3p> zpvK(zW;wJY&eCeEipMoK-1c3anbAyo!JB6}1zJ6ETOM0RZo!+g#cvI+$%^oQ3cooh zk%b#nXNdlt`95R96IdZqke{WI+vt}rj30tj$()k(UY=8@;H~NkHKiUnzv|v(pBi;s zt^U;1ux@HAlw4x&=Ut5PQUm&)8w zr#UM`Wb7GgFA(~1bd;w0j}}e0G8(v5q$j%GqkJO>%XwxiU| znE~UMG6}n=`87xSwfFSy*RoeB^W?*z{>)TQ!12w)-^fKm5fGa$REs((B}HKwE81QY&z> zt@Yb{S0QNrYh0Xx@aT&%+Qbr%L-)m^yNJUL#f7PV8h{f*UVe4Lj-BENnJ!da^C2P1 zn}-Ik8s8&YV#94KJP2YFTC^4Q2x(i2g^_mzt1YMFlc;0Hy>zDe>1;({a($n&h3NEJ zJJz1P=b(T(MUTH8_xf%;RrWSf2w4Xpd9=qROOG(X@JvV0i`rZ->ObdOyKWeUv}>$M zCQqR_MpMiv$_M0rq0C?5AQA?vaqfR3c5t%?RsU2lw2l23$MM}_4=?>-&|l__x)9O^ zM*aHz6s~IrVDd@Mrk9`Kh+M7x(5o#M!q-{+78Mz{WXlG&@+s}Zhc+YyLO||1I`Sa! zWX@#T+__DH3o{2#Zl##Eu)$NsxP-0fCjGK*MeS0ytj}fCM4B(vD7_EMmI{(O6haro zde%{TXY+H4xvlTbe%bM|d-tWT(ShIa%sXu0Wj4rP?eSUY+fpJoC~kV;C*r~oT+!GX{P+FVT-;F5iIwm0 zT9%6nTLDp=dXrj@QIZg#)~aFgp{#U+^D5} z&fG(ybyrmh)+C{Z0ppV+x3}3gQJ`f3aM8l{;26* z8|wL4uBASllt-LV`bFLL{pF&qjg8HZVZ;+m5y{OCtOeGRjjybqhym66LW!r;_L}P- zSWr*iCDavO6vVHP!nZhSHS7W*WxweZ*%uHXU3`*itii>C zO2DjK3#%#UbF}J9C4>6(U(7!tLH!Ly%7v!p3P7<4o_>f)wf?$%N+l<{&rPhaXPL2a zzjAX_ALgObC!CZbiB)zi8Wb{tEUL5Z-w$~_^ZQaGc0roEnZ=9w>;2h}e5Z|?5|p`# z?^}BQw3~?g?Z|B0&M`ENGJ>~t3@7e(V~Yt&f7epoQBSL}nC*(Z;Ny%}yZ&Z+N$;=6dwM2!)ulJ*99o*o$U;>i`2^+reC3`d znJD&)E9arsR4g(JN++sK9?yCEt*PG#^u9WuYTdQJ{kA>IhxE}Eqq&@D!NXh_L4 zIS)`ZQ0uid`89sF@X=f+Ko3z|=TWgvb&?fBEP{b|MDAfl!eF;xV!|L=&x_w?C2#aS ztF>2*eBhr7eih=Jqrg75tHl|A8IqD<3RI_&8LKMfW>P-itda%auy+z1`baf~QngG@ zUIYGZ?y-=`S}x0juhlL|Z3@WY2!5`CtG4-e_JdX6<5jMSB|?{e>fl`=%;4HUCmo%j4M2g}T6ufyi6#wkbb4pba>FoSAqMDTbZ zQUS)SM1B%LJ+B0%yE0jPFA3}P@f-$Dy9CkAY7stf+c&&`;*YpNnbkp?ViIf$(CICz zV#ObXjeE3Ngod{F?+y9KZ11`a@24L0w7OcCVXO-A1ve9V^nuXNs}-JxY74aoLDWV#{Q8t);j!L)rN9C3QfvRgiWs}$un=WTVy=e*Tc z+P+IC+-LN|{%m%{xYgw^YRj`vpz{P>Hd4d|gr}f4*F@`wwpBJ0rD0GAve4qb2=K3^ z&pu|VGzyIEOc5L_cOX7RQbP{y;`?|^z-^knV&dUh#NLMe0GZasIVY87f$TZe8yXuM z$L(^HM9AEmbsi{fHU8)K-2x7T`$$Qn2^W5~D@8MXE}khA6Je~xrX;qQk=DU#M682n z=87d0&}$qf#0{`#M-o;p8GS>pu&vZwHIPF{*HzU_qc25K$u-mar;F}gF~7?*{CU^o zyfjbZSPXQ?HZH_3QjsAY(SEhH0s;)vyiIQ-7mK>tPEedK_M%|HBINm;UAjh{m=?U) zZ+}?6A%?Mj~HrF z!H<8P3y4Aue(3n4KhM7a4wQlNb8x7x%(Hg)>k{eIj?|0MrVw(}fxL8=J)^>&)i zGZuv(6}-;D?uFXzFdgIN#Ei3~t+?)jSwEMNz@W0OL1a2(YuWVz zIrpo=1r6Wd`&q?}^;G3T(YrqL@IK{>F4SA1AgfLg>k1q3%6=RVrTTSr&AV_+gVoSs zlhJx%Y*RCY%8=G{YserXlzm9teBm9`t4F3p9}(_6bz(-?{-~bmTB+DKHk=gqvUv&f zo$fdDfkK#&4|qTv9?gqGRXm)2U?!c|%td~1pVquaUUFvPYV;>&cgM60 zdt84yTxet$5&7Qz^pMZcPo?#S$4aHgpl#XmqIT@Z@aIwmSIFx~KpW$F&TM`jeq8r1 z)&h^A>++tUM{+>QR}38cVjur>{^9O`bMv3CYJ`sjO8>3aet@E(KkY^g0Y9*szyPBlU2siHE6(DD zO0N$)w=#u|9RTwn%WCZTNK0~9gr(lD-d?|z(vrgxxX*dPHP5XN{29ZwP>z66Ue=JB(-4|~c${wKNpb|1$yneiOx)V_^yG+8dV_oC$ zk34wbH#G%sd&N=+mnae5I5^OY$x-<0c?1cDCGHCKkFiUU6Mk%L(0%bV)L9hbEGlHQ z?6>K~rpQ(rQ0aou(4l*b%<6s|L%68V#zTBHE)M7}rR0{Ac19wkcf0U9wR^mL^!ehN z@LK(zrLye=Mm2_{cSXRAEYuazA^k@W3NG@eMrRU*)lyoHQIn1U-r`%zY^-OM?hbTp zHz!mgi1!BVd=1kEEV_`VMOG)65D~v_L zTOlR<{oHJ-X+ryQ%=zqXH{z921}DT?%GISCyNBC51(V&SAoAZ`M$a15$t;Tk(vvOc zmmKWUUmd6R)k|gfrF3|g8rmAckec9W?cDKuXDeWhpu$_%J(9f_z$WuRHLqQoVj?~c!5dHwD z9de3v8Sg9eS=ArM)2=;|OSnTD5rQy6^UN}htM#uar>W=CzoiIyN&C*M1udFSQkt(@ z?-ddxHs_reSqRxk`R%%K$+!BliC)On)FzsA^10NLP$LhZAi22->WcYxbf!E;%9R;5ihY-R)3hq15V&e zmBVz&3@G>e!=u4Q4@MJefqi3^)M@#4Mk%ySM*73j&bRp>z4iS|MfAL+7?d-bpmmny zWxG6C)ckpa8!H46fia*7>MyQuH$Eo&5k5G5m>>D^wBpY@d?pE!q=g6t-szhB!|DkE z7w`|kJYl3MXo%MB+&Mhq{-QEqVT=0GfiEPji{=8?*mWTmYKIg{p?FRiYsQ~6X;CQ7 z=SmcZ(kh;1z(P2#K?gTT z_OdouE=hu0@{BL=&I)KJo8Y%j2g^rn4DYlo11*5H|;f7(&4M~gN40-TPtl3 zwvkh-tuevftXePLUsbQ$3aGdT<1G%EO@(A#hp5f#Uta4K%_s{L#*>k_mw>~+i^*) z{@GdBpqd{k7*)QVPDpI(S0{YyTCD!_ZC)ci#ReL+2SRLj3o!ap$ViHQOy8xpfeUCWz#hbJjq-vnu-QD?uf&x|$;4d?%67m6yaTKxF=?p!1 zG=FdQ2fGSSKH1#100i>P(D&PR-#VrpKF5+G9%61I>STn}5Zq``@$v5bOn`M$@&{_i zOM`qDBc|2UTMtz{+vmx|V|a!JmwjKJyIm3%cC?p$ve+NLUszMC*Z!f3gwN&D;2suV zrHyA8Ki&N$-zxv3ut`$ZjPbzfYX+2P4o&sO)#gC}P3Pe!Vhf*eL07g3%D&=!UNO`a zE7VvPh*7*IOyj!MO?QurIb>qP$~}W?h!#!!LGV^73Ewu}^oA*NKI~_WplphOq>b`$ zjiOpQjT^FwbP39M-7@2V-P%_+vJ$BpTJN_s%OY(prM>fSel0SOUip1n?kWECuZ~9z zbxVQ%{{<#mrlAneY>Sg$qa8^^9#}&dwD5+oUl0p~RZBVf$w`O+V`k_PR<4_&kCS^{ zN5hT+Rm+TJDO@L>P(|GpI=+I9JMd7kGrAoGqp-HStnu2dq1g%71JVAnCw2lAQpU-|4%dhrC{sj~=I>nibLo8~iXm{s4Xpe;Hi$#p@kJ_|$Nm z5kF_Ptg?Vgy~}{r@_mW)C9mWdKT0=`$4hxUm$0Y>+$(4tYYkt7(`ABJdpM(y$=6X#h@othlJV?OfaKf6;!zuvdB<*V#Y9}oF{ipygs?eWyW}@(MM9w zk7pMpAWQpK3i+5iQs}d2Y~#Hb%J~0H9U3p`*quc`m<4NGra|t{xyZRl-)+j zwzDk7xBYoG0s|}X+M^lg-!nFtK-NJk zeeX+g-&;J#t zPeYH*;<_|jja0p8h+3n(e!!M=@{T<)X5r53mgIVqzbZrc_Bk;BOIK7?s#ttpg%p6~ z6|1z)Ie+2W=N9;bV`2^S&HCVVNWjd?$K&0F4f|?Z&ih-`$1WtrDIcdGO?_PFyXPtk zJG^Z>N!CB5IwGHcp88wFc8?7Jon9uC4*hqJ^#7(h1r)kN3SyFZfXEbj(gv+eeWaX2 zu4xxynjQ$j^;|ts4|5nG-Iqr2i-kzM3VFyb3oh6xNheP&Jc!fqCZ?NmuBz@)Q4hc& zdq|=j$5&tZeepatzgq~R zZ0xAt>Tn_4uWF+q5QvV8_|3_tOld9@>wotQr+g@Ssh3GJ=s&BZ)$Wv@i}XE#U|m6&u}$nHEc;QCBKiecH3LHzmw-!j&zHq2%4oPm7HI zz484c`|o01w1D(qLZ|;jVF3`v{s_+&&$Y<(SwasDnbLgexn|k3beHAxa)WIi} z;a=u=_8)`RD;3RTWn|VcwT7~@iv79XNc?V-BxPBDu~Ld^(;f4ji@H*M%|#~tz(~!V zqz`s>%7=k_#eh*P1$PY17Ml;3BQ&cNlF4}AglKlpQr2-M(#GDHGv`+?(Y*jaZH%+{ zK_(MFHhlFBTjGXqA?)^04l^omjm?`-#v&=_Z3KJhEr3VOs9d{$+J>-tU)mt%sGm*Q zAY;KAou~fJCA>jCArn7~98h^i6w!G-S(Juxrer^G{NNMV5nJayF7(l{_vW@t@wJ$v zDBk*q4j;b!Ws=8x_v3k${uQ%EaSoNh?gtF(5puJfNq9S%aeOtsM5m#$wz5$QV3Uq0ph$@f$kDfgOT z_v>#Q-QyCL?%MMCOGw zki5qUWbCMFkWXJrR8OYrOYMaP5MhnY&5T8G{`!&IK3WP)_!Ni@xom@!aHl*$BR2jf zQV+=21tamfH^2R2EF-z=9>fxZA|(oWxqsvj>EGTOR`KA-)LRgKtH3D>lm(%#3=NmThv9nU<$c;C zBwE1O4XbTsm zswP8?a_fi+v?oq~y%0_kYe2t-wwupYoYXPYvPv{?XvwPwA+rxEmv7TNALPM4RcRk&T@D;azrC20)Z>;E85O7Dmop1BC^ZkFQI?#scughteS)EW z{P{w;D$?^1}4b ziK&~nk>giF{V)k-?Lk~KX|~=`=L$o(B48V{-+2$q+IOq*$_Sx0o5OR52mp5cT_& za#Eujz1HPumKD`p>f-daK?8@l{BJz3BuK=yNrvF0VE6L$=^8(V(*7IP)A_g$RYC!j zgE^y73b{yBVhzHh>fv^Kgz7I)cH)}_dSGg;!jOizO3l07E(&uq?L8+nS}n=G{d z^$wP_aPrB?y9)MQ}%rtfYs#B_;t>}XzPy}!x9lh=s>R( z;O~LGI?tGL?dD;qmXG}L1LNCKxQWThOfS+t->AEK$I*XdR9vNlkiyVG2*)S52c>IS zpZP=32%f>n0yy54NgyleOP!EjKbw#Os|BF3(q|L0OXDTtZ+RYJB;s}8MBI?s^DS-q z+|C67y7V{X+@e2iiW|iACGdXs{-{1aws)N;nba6;B$)I%C+~2cn1yuNjCmXk2d?eg zCEseKuMwX(;R|#Op2?_VW;XEnYjEY7*YTKF=^x=??NpUW7K+DUdR2e9C3 z+be{Se(;#;rjCBQfSzn$Df)^|5S^}$laNXin(1XrPUJnqlLu=FRL4zMF1|g>t1{TA z`rNGZ@qT^f%m}=st!X{|73;uAf0X{t{JG8X!dMw8M{nd_doKG?!eDZPY{{NtayfB+(HYl@ujvblI= zB!&v+G1tSi=UR2RLM<*WC5VubT=+3wBM)@}kw`AHcWK;PY6!l3wd@cD-jUVT#|RE9 zS1V^3W1SR-FuV#Q3z@*wJ|ul}wb}~?D-0z4{4RHf#g9{A#1x3G6da$nM8qzR3SQgl z8(?|t<-c^>71NI`BsaRoo8pfwp?Ii`5Z=LYE3G_g+kAc)G;yZ*Q*!n?xl3wVt2);X zK`?}{49-`3)gP~BTdBk*C6>Nw5FX1fT*OU~cf9cBsG?c-(!YAi_x zM;i~N%ha*hdf9{VRzWgzGqVjA`KbwiIS?@`sOWU(&N@6&_Vb9tEX|W~&M`Mqt5hdR zF~{DA75eTh-Kryh^e#aItxUbm-%ftA%Mp6izZUhetft&l_i9s~yXgOoVmB58L~V>! zdRepo;n=rCQ)Fh1&2Wp+myfyeUpM({YzG@SD zH?1j;ibmDyKURq=_7;}Zi)aw~gna;jL&j3rJwuABdY|tRcXQBKh~!NMMmXm|ypKr! z;op1m#PjpH8`i5a`t5t${INCjt?#+ZWa<4P-x_%sUh=u6P(KpojKbpN>hN-?i^N~- zqrzQmFI~HODc)|-GLH?gWo2@E27_c>Itq9lR-gYW6@u*Oz8p{?g*yb=3*Pb=Hw%a3uV%~OcayE5H5qI+kH;7rD`G=H9Aczx(G zWYnUIc!BWO$^HK&H}i9XEtTOX1{buvaEmm>#Y;ex6ikmhO^Zf1XwUn~YzlF_P z&J%AqfKiK!i}7-Da?p?(gNHZBm>?mxYo0u%Nbuwrr0fXSxay%sfQV-BlQ8QdHna|f z%udUv8S`FPyr!-YW*HpB70NO`(u#LkekosC)O-~>F{@L}YW($G9%aFBn8iSaO_{E7t+Nr3mj4%m}Yhi6330Y7;3Y>c`N`J1gUzT=>!`uT7D z;MgEujDxl7ZkSIE;Z$nUJZKe`%RZ-1K)Zl|O5$O}$^6)2)Y5HqhWz&_-9eZ5^uKzB z|NfQ#9hk--T4kF=5wS}i24b39=jJ^_>{=V?1qX)s92B@u^Xz@vW6D)&ElM22p=Iac z`2v`*VEXfhX~)lhOU%QWGk2YpT*6_wff78IX$-%P{rpw&MCB_$nX?AuF!umH4kuE% zzX&rgPJo&GZ2pr33W#Ee2#GKv;AncPI=MX|uJN0z%`UgS|mh7wfzogj{Y|)m}LnTp$2Hi_c$!~>;${5 zs{pvdN#ZjgRr6KqRUC?79?_j~Xa13C)-pBXDg7%CiF5pzzC;ZA|^?vz#ChpIFLBry+?Vqo* zl;J0LZtMA+36Lp^{n{NK>EjWADA(Lhq9D5_H{ zC+yKTe=e6ESo(ILj5}dmgyfPQTcqf(kk_(I-;&eux1Q2}Y;TsJu;HaBp+rx(XQ&+D zTAz2x5rXtoV!GW)%#IVD2aUS?W~vC9RW6IRN4|tg+(4t`Sz|V~Ysgoj?dKY6lkeIa1FV6&cOLp{6gTAHk0Y>#_`vs-1!&O;V)t9dzFvhqe0+R! zgL-%-@n?emoM{YgXq~l@W!v~QXO4TwZx1V#hUOVuvQtWu(_ZuMvmsa_UuYr~#P6)r zD;A}peEz-o>}HRuMOPmkMEw}kyc>FindK`ggY}3rvu~9qR7@2Auk0Wc0q|p#sn_=Z z#JB(W$DRsaqmoNa(Q2PYvn1ApCN{L+5i5UMf5n>eQk)#!obCCp*GW_E#hpGp#eS(GOi#Yj;dQyn?jG*eWFh8f?v%?) zA7$`qG$2}f`MH}Sm~~FY+FUhgVd~%A-P_&rJ?I9Zi}(;9)BeQ_9L6O)(2suR*1-B7 z*KvRS_zZ%8noI+ZOO(?y;qGCgjBR>U|0w`8YH(Xz6>}|Dgz?7Hr+;XGJ}`d)Qbf>U z<7N6yAB&L9Q+nle4beLP<&na7-By_mp9D;f`pr@D1LFF=LZn9lvgK#z7E>ws^Ix_? zfN#(_E*o`9&}}jd{2ma-fyD8weq0+2s%@H^k-j5nj0I2>%5c~8nymx?(=6zDb4I8x z+>9l?}!^_IpPMDpC>M9XdVB_HC?#Kx@mZ5M1uaQNe$aZ*} z{zet3B$=bHStEr8mGD*2{CeziH(W;P4vz@4`s;8U-GO(eiZ@05BL7L;0|?^$FG1%~ z+ebaw9IpJIfY^1#i!d*XhRba0^9Z{|RP@y;`Sd~roP7osH$Z6} zAoeAhc!U|HN^eZjpwUQ~eN{zw_>7&Lps6;WT#fUB2k<6%3OJWh&9N>~Mlpo^lv4}E zmJAD`3NyQ(?s_T=e(#TN6UZ%_^6jXVma>AjOUkvPaj`Oe`gWfW9xh^zmohgum+JL+ zf7va1wd5DxWVO}pI_s*1-Drn+yx3j!T@*Y{Z<;hGaq{!^nw%>BuX0$c!OMxeswERp zceoE2P9(Y)Nh95Xpt+6Ep7^a!u3jVQegGQt&6WJV7XESR*}B-;*}LW2P~JrH z(|r$}f8(daF-OxXk0u1}Dr=44M>_C3ZC%`6$59cbj-}96cNG0qlc)q+@kmzL+^w;h zNAnl9ZEkocGW&&qPtu*r|E*N6UbpSJTYN6C^dt&(jNjH$jl)%}q*?W_%y4^Ae=SV% z?$GfkD$RZT_qVABeCuROO-sKCR!+t}*yUB0Bh@Ks@n#tnlL@)TWAOfn{W_f9uGSGPik+|6- zxjOtA=EJV09&hyZcb)1lapvb%VDyQF%^Kt{erH6sJqrU}W`S06GU z7+u1lyYG&&r-{w6zf&a$!WO>A0k3?NPe(BRX^=ay+e$IrdOfe(=YEI@*CFiDsGgF* z;~%^;$r&LeCnf#heRtdq!@_jDIcmHze#+>|_gJ)uDbSY}YoYc|;@shNM3iq@_P19{ z;&vLo4D`7CykID3(v$_KZR0NWc{iYq*iMyf?Fzn&(|5b4dEUm8UAH*PoUKwO(xLuW zc?29JCSZ7;5!&;|ALu{-wPXRND(Wl_FNPYBQ&^4i%H@6m&xZ@4lNeB!TX<4>o`+V$ zf}Mho2wLXP(<`k>&)xn2=6h3)Bjxiki4Ge+rXBymeecC!_(k3xEN_`fPWidNkLgU9 zQAFw?9+S}X9s(bp{HgNs->2^eewwJ^%Nh|3R@lM{9g$Dt{K4a*dhU7C0 zk~{W5ahh~Kc)Nnwhj+Gw$tw^IqZZigiip-q`c>%Wa>6g~0QRF@8?IlrHh~l9-W*$` zlmAZY=}AD=8Vc_dO#bJ`Ly?JN5&~*&A+(O5RC$W5xrhW{$H95j%3_yKB#Rg2iC;Db z&9^jlI7If~rnYtH0IGDW=|@;U;S@*s*(3omO3<}%PH$v%fNkZGw?U4x^UT?r?VVQ$ z0%|BzLCh6Bj<8Z5d;h1W zzZq7#&rPzIx8zhTcOrQfIdP|?0lKQe>Yldrys*%=vlqvi*EPA*K{)*Y`DT{ZtNu0C zk`J*d&2o%%fZ2flfCr>^)CEv*_zlLxZ{?fwWH(?q1?aBeVXK&rHv^p*l0>{K3 zA-hC0PkTwEJce(Zo%Pk^4h41AIzMB&>dd?~|8wv7oBmiXHaQ-~{9hZ!|9QTi>4H-5 zUZoGyK*n7PrC1WmKWUN5Bx?V4#VrqgrWnhG5WPNo`mq4jJka-zXE`4_L6KN(K70s> zj*jL?YzUl}^{5=(c2Ht3X>=r-(!&))RqY$K!N5)BBY+|4@Os=|9eyfj}MMgRBGS7a8Tv0{s~79z9^Ah{k9w zhXhUiEQC<~$@NKUH_a1`+XQDZRA51%we0KI%%3Edn3A#!K=r>fE+sdzCS$Qk=r_ zdg=>1_)dPj2%*RWb}k+8;E!7CRqmkk{m?fp`O0iZV66c3K@tTBX!98ALSnzfMAeFw z*PRB--_3FI7AB_z@f-RZ(;|nriQx(}V@kdf{dO1~%GH0Gdcdf_s^8?Hb<%7sB3oxz zlonvs5M?vUgkylq`{m=P;`!i|7xEmt zv<;xa%55eqk$zgpuXcjxjdr`$-lD}rwD!up0)O|fi7lr%z)^4ouJ_W%CO z&kP0tzRp!6UV-bFz~m3}hu%)9`r!+c^jKOQU|xZV1Q%~Dzai)R!uf@r-GZCAcV%TI>wC-M!=*$Joy%q# zcH3yPBNOxIOY=BW{#OF}tt3lY(LuPs6z~!|H~PY3QJ-0Y@^x_T8B3^r8v8LlA~f~c zAEjr|-b{_zo!UPBzFIAj)KxTF`0J<`@Bsy4uqT*FPta_`SfA#%tE;F z@0Z@%vr0y*iBi5wbar@bKpa}R!8BRXQH!+!m-96weHE)Q{B87Eh!i|QF6@)c8+T#(XQdL;p7 z^Izy`9R4sbPWTfh?W-&jRwIIqkfnUorPYWvzMPz~sVK5x3e7B4rT;`DNE`xsoO^^F zsI)~!>tC7|Ix}ty#$B3~*}n)6tecq*L?TBUFIAYZz1jDXKq7FcKf&m+ z0?u2#lzUTvXCVJkch#lxv=icEd*p3v=-X>UTa3ZK{9+ntfEv1q=BVlavDu(N2+;=C z?pbRBf9}!?xo6d(a?{ym*0_nlVSNvRHBY7^G#ls{wTQNebkl@>d7}tiC?yGRNW-SD*x6wr<^AY4 znI8dD!YvKmkk{w!WmP;xA_Jji!{$=eohAgU!N@T)B13>v_|dL3P5I^L?v+Uq7DNoj+; zAB^St`$Q^6Eko?62&INK?7(Cf6Al9?4uuyB-jDZ#larHpr~1Z=ncdNZW!)N7J9MW> z{({VvTUA`}oA@sfC`U4xbnrtqVHC8raq^x4?H5Uy=1&8EU-@GO<9VX(PoMHmb5ZbNATx) z<%JBcV3skTu*(^1ZaLk|Q>)n!toFe2CBglI6c&Cz`Vm=wjXBUF-^HX{GkHgOo(g&& zm!~5{GLsPeEoHc506BBqM)|~{-}#R@O>yZPZd8l39>P;rElLgAbIX?(C;&w;{w&;(jf1R9y)BB;k7ly5bgh_4cwG! zz9*2Vf3q}E6MXp9+dpdO($(NG@!5hwT|rPy0}>r4z9mUIjGw+c+Gi$f#x&4`XSd!& z0%LkLt0sKDO~foibgk)i#`(_ ziw!ZtNvSC{K$6r`A?TWw((g2uUYTrO{w-=sr_|MpZ_#Q!NJ4XS?Ft47*Y;;cV=#Tj z@`GguhY+E6N^Af^u2$$z3-blu|LgXwwVs%P? zWkU=r;L$12A8kc>r%?tpzOHW!?44aFH-;g4`O2*UVzllO1|}vZu?{&CmL==1P2sXF zc&dX$9Riz>d4bOfE&l7mwUZ-4a!-?gXe1g4J-Il-nkK4kKeOpcxrBmG_~j=#wkPCd zwC$&BltU0k63zqQs(6HrVB)(bO7B5jg?EZEP;JA$+3DA+cxl_)+Y5l(UUUgN-hG;L zSNq*Z!Q$L`J92T&6uwb8^HgaUN2UN2c1N%`+(C_ww&B z#K0o~cTfNB7E$K<<;r7kGW>^`cN)gc)773;|DM$TQ8H5YS$*}7=@C%k=y48cDC(t+ zZ2&XLK>*};eLMOz2AwTDhY(9ARnfp4l0PrsqZ5X(PO3ubH-aM-O%WTU#$!+~Y;rry z%a#au2s2o-3p#9WY|OZBsK-VG((0fp+sli3~*S3OAO{GwTefNanXInzSrMS zRdUi=8pQ+&&4(Mc=*6nE!ay<{OE5#=BWHZ|?j4{Ep{2p5OEQ^UiVPkN1(|%5}ZQ`8v<*Y+`l##8xFqKQ?K6 zg@OrQBp1cP89s%sQ55dg$w!N!SI?^$lUi|Zk&^z0H=bS0vIeL;WqR~JUaD#VD6Sa> z@j#6J)vYQsufpk8YqwZiNmoCeiG0wq2|E$LK$l4$d~sY6L21W+F7Np@PfMPtV>3FA znH1aevCRLG$KI+crZjvrhM6j#|LzWt%zW$mfko3$`C&%-5})9t>zeG~=&>1*_wn)BV-6Jr)7pV9Tq|_N==B z^N*S3PzN;AK3_ilF!xACNRK_u-eID?AVVRqQdDhjDQ$?MceAO&XD4;4Ef90i3Zq6`igZHnBq3iVS*De=m zn--waDIrXq36B~qIj3`g&s3jxa$jq_m=B2a%sB34tuTv>dsUoq=vIC7=;JHN4d0Gi z>v-H3&Uq-STycBmKa1Gu^EVg9ry@9Y`u|KK6xTuh+*b3j*CMfdvJ={ru7@J+r*myY z&x_D>Z4_P-L^?OV{K=d9kYYF%@y@zuEa1w=**kacZ@!?B)LF>zYX6YhY1w8Ysqv{M z>dD&u=iuON9(#pBf2#-lg(3aTTpfFM^6KKEG#BXLH%0$izf$!T&c;?5bsfLQT%y;- zK*Jk&F~8#IdlYBRy53&Zr^Bk1!@OH1AtDU(;=55nj+9X=|1RfwJ3qmm3;E! zPin-&gL=>?3o#2uOwulz6*EQhy=5fs&FcL+r4%pYOgSW6EwZf3~My`%!z)WB#5luJcm2-xB#r2_bUSa+rTly1I5JbdCjvlk@w(PrfbPJx>H%c{N~*zliN~QrZJM%242Y zREnF=-*X!8TsjVk{ri;Z4ReylQ|-#~#N@UJZo2C}ad}47iT9Ol!|hK+FC11npXqd6{gJlZ7Sz** z>6mZYr4zV*UGB#9=!^U9LWg@Z8?%dkae?z~=W=U}G_WBtZy}i)8(lScX!~>Eb84cf zzLd_DF%A!HJ2u991DnVz-Xiq0APZI_>4h+3XOiU!WkKj*Mq|wj3W@&_fj29NcY6vZW8vrN%QBX`>f^1j*rsBXcz>esAv=!*w`9r znPV=hQc=^f{f|eQEj92=UU+!0@=R+Ie%sUF#AD@czo$x&$)sBJ4Z354&dNjUy1`M?sWlY%Sk`vw({8n+j)SAiAe~Tgw@`ildrDyGkPBI2F}JqBu!vgTbL;UhEu#zSm@KG1{dKT zE-W%6M}08_S^Fk+CVULy`?Ibste)3-w=SdPRy3F)O_s|@LKI@uQ0>Yn?zY*F-13v| z?H0JP9TSB&OP+bLC+utp&yWGqr?}po7^LsJK$cW=h{b6!Lb)=-v-ITk=JfE*uGg)3 z0kk1)w>*X9rS=Za`Noa1a4h>r{+)CBVVjSvwdUXVW%PSF2Z_rdt-W0T0<9S=#xl-) zT;ZPjw(&FiXo*H4f{!jOn%e$->FQFHDp>oCHwx)CBc!0wAzZU{cx|BFd9|m>=C-&$ zKOJ!Y`h*Lcl?Gy)5Eu?hbA zspil~UWH62j#?RZv5v94@O8y+^EPYn6*k3Dzzgi^*W zzoS=We(Cxu@}ky~ub^6{+O@`mj4hYNi#f#2aQl)pD4~TZs&Fg9qxPB@Y5|c*Bw1`F zF}YGqk$YI1E|ljIO{JA8HN5??LS@;7MPtDJ3)jadjl|+)KLF!5w?bDor9vNREWYIm zR0#a6!YoI!&&Z7@Ug3$$rh>wW?&i9nYTX}m(Z=#$^m!#X4syd~dIXF2L`bd@6hJ#} zde5e~)~#^V@p}`vR$J-GOlYG-J>EvFR+14&J1+a+wyCY{YLCn|SbL>}YuA3Ho$(zK zSXg--#9(AGinG=3u~y98{eCN=xL`HLVG+WxPMPFZnUE~F$BNaF`2lI91Eac#ELPxR zw1oEPgF{%uj=`w2dwH&L48ngwqRHjEFdo{O+Wxh$ceMy-jZ|yb81xHAZv;&ao8rT> z;&TvWv(d;*Om}=a7k|X>3u30{k1`|NCH$}bv))zbG<0-39r8~f{x1PrbwP?z?gX1% zOb8&hPf1*8|AtN$q0v2)-YsRehSYR#nS{S1y zL#f>PMV+v!Smq~{PgLsyVe=Fu0f;~kP?xKt*H@NLv;l{$$O9R`ai*b4Sp?8jz)!jx zMCGfanFC|b`x@x>xoC3Qw^o9Yp{#kNxs(s^y2*Tjo62lgj9X7F3I_wERFqm7Q!P}x z0;~+A79I$XJ@n4AyK4AIE8n)W%{p-P%n0reJ32Fz?s}Q-Ib@t~uT2`(W-wK3okFSH z82zK0C3HRTku6yv*DXrtbOdh!qVl&TM-wsk$SyKVJN+Siy`RZjz&!l!87q2t>H>?* zMWqr4&=+P_7Oq27=OZQDN_Gteux6hP&y2c?1vWm0xHWR{gB1l#D$isU z>1jhQs{DEK??KplOGOPQb^m(sKhxoG2 zt>80t83U7K1Pu*~7=;IT!7It!RB5tCV!5vLzwFr-(t88#t8D+$rrYGe_Z>U%n*9i% zsZVS%I4_A%-Ogx_mT7VQINp`nhu-b4RPuF)ddXgYwf#9NhDKwpEilY z9M%S1*RA-vHomi^SY1OFgy&dpE*h`gWx_Vgp+wt>EXMu>YM zuK5jrki@}cyWWA_)z{U754inb0G0xi&VKLbJ<$iyVJEyBTr8`>S7AVN=E=;`s?MctjsW*q<{a330e5>ilO$~iGZvBTj+}F zT;%lDmKg#K`}(E%^@AAwJ7Ve>9ncMz1`I_LMP!pT8I-g;C7)Q+j(}G<7*0MkN0x3y zsiJ!?C6=DFo6NTAanfuoFp?L55O0>0p~SJ+DjJ6VP{f5{HD#5d*`s3m?~XAw`qJGV zEM9U_!!8_bwdTEW(lKiN=#4gYqE3_l>UUU4Izxi?;QetkNh|n9hg*E&3?@aH6PDQu!juNrRh?C?*rkCL?gsq)KvO-okg*}e#PNiGR0l3}>SK0& zbbk1@MJ3;<5h9@T?&z6V#*`Q0Y86s&C>`&&!SnUG)UVOY-y3H+_vEvqqtO*lRX5yX zGj73D7sBH$`I+f;%dQ3nny$Ye^5FQi@-{U_ND)GwS6BI^UiSH=iA~9sKv|N-+pft< zk%>+B4;w_wS1z-kd)+GC;{3EydxNw#a*Ryu3HV_~=f2+XO<#8*MM1quXjIH|ZelYX zku`T5-BF;A_kWZ*P(5|qnzv)v_@G8ja^`HEYq1NMTE;_N$#21C!uNN^U!YhKumyd> zOH9A|UlKm0m}Qj99jI#{D3?Mr3Ey#C{c9SNb-?E7mGAluF?5p7oJ@+}?dycs_@2bcONrC!I+)-!RHE(I0h?TCg^-Id)Ey0 zHclgRGJT^`-8M~C03Br^I1L)`mC;nzSSCtx{p_kD8RLpapSlz|mJA$3Ty$miuggTe z*;1=;R!K|ektnw9G;K8(=l`cBhJz_3+{}kp7yoIjY;4E}SHyO;Z9%TSbYaUk>FwIVh z?bK)c5kYE*Y$3gclAo7!wsQgibNacumR*oObhrt1bh}MEGD-lbtJs0uvI_^2SAuOY zwJhO}WB7jQCG)Qx722w@@F$v<799ge#~2xp=+F=!3$#X&swyy!#h%TUnaYUGc1Kke zaJs^HJYYRqZd*Q~^=K>Gj~3d+3OC#h^7?Tj?QC6StlU9g01)C$-x@=MC`hrsf_m4 zLXNt~g8emg;j-thAD+amXZ(CWD5zva(h_3QUPMWC7=}i~v#J=jCmK4r;2J{fz(~p&=4`x%&2N1+LhK%uN?K#7(n&iVAhF4F6cp1p`fa)z}BHFxw&*MO-U&m zW~{Ll%GZyRD=NeKmR0SHJhRuR5Ppi;Gp$Ug8{%XzyT{3FR&u}dxeV{8w^vcCoCo+P zqt0l?9Au&sPb#5N_I8+G7J?deCR@?sn5@unK0MlsKu>Q&YJ2pwZ^trhY6s=9?$(yC zrD;y}I_WgSqO($(dR7a&T!ME@E1c0gk_veyNVkrF8l`E5x^gJ^xvqGRnC+Djn+xA+ zhMk^sUfE1?GoEcvZLS=F!^LJjF@p;qZB!S0>w{UJkJjy4T}X`BerC3wiu+z}Q=Za< z41MG*##eJS{E=GRq!LD;;axi0xD(>ICY-S`+Bk=6dF^`fcc!iQ@NEBqh&RP4OF4A| zT4)Bd_`xMF?nuc>*Lex&1?A)%=k!F5&i^3kA6++|2ZzlW7$zKxOb@0A&PryhVfs{ zmBfUtINr4%O%v4>QB>V)tajtN0rsTsK<%{$95b1uB7JTG?#oa7qy{1P*P9Pm90a)d z+hwYZVxBPYc~ArBkmRFc2+SLr7uA7urSTVBPzO*oT~>n%WX*={-NEU)w8qeYpKMQ= zgvZ0!-p0_O5q9PnNoqiO$(dMReSEvVjMY`~Aq35E*PTb^!rbMZ=_xMYr9jPYNpZuoq@>ZGw*(Bu_S zPdr)my^opfIel42Fb@m~ulPK!C=oKYD<5*^LO9!UZdH6d-6|hh8EJI^B^Ex;jy-%c z#2b$oud&EJerDk%**BW7x*J?f&0jC?rF-Z8x$7MIV2SuH5f5B$UNbl69j{B|ePrME zAyv9A1ZiL5EUKp$m&PhIcyH?68P5Gli)3z;ErtPmg+%guq&V5tjW>R~-owNtQ7+2< z{zX>m6A-qUEYNeHbMyKdTX} zjfL1)j6{SMY_8B+`|XNs?G{<+oLlvH;!V2qb56^GSa}22_&otN5t?w>qu_JS^kYiD z74dTtOh_yvycMRJVWbHnr}cRgpy_b`udQp2O={`~C9 z0qyv)#ni-{X3lQkf@guno8`v0qtv4LVpY?hQ3FW+oOHFro(+np5%YC)b1C0yX{n4A z7z!)WRDgxQ-W}AE-dZpl{XXfgvlLOSau!AEsKFhq;i4gR>y^epUwMx5U&a*UYBlvo zG@>W<+`?vGDsM%EGREoe>35`Jh%x1JVy|#G_lb39310KxdV;q0*LlY=)vfi&CvPHB z!~%Nr;?hHlPHsNBW^krv6>SEUZD4sNA!dtPN>MT-|RopmiS%rQL7@=?%eFTG+} zSw%a0w_{khaIG{o(kBFEdic)r&%TS5)ts}k(tK@Lr8;?QnF@Z?4fC6{0bc!%`)iaa zZ9g!P8TZOdrx^8ZB`9q*D`jmz!v0Fb?@{MgME;x2D{J|kkkmW(Mn~hDPYETdhX@nF`#hNqZ!Ek-y~^uq ze6{)v#>)w22ER&8JH^r;&@8W=IGNHd{a%>O1^=;CP3@;$F$SDVAj1CGc2Xel@AIU# zdB09&7cM%9NJVDJ^FB1rp)MfX$&k11ZjPkuiV$N`gf^x})>v%Dvf0df>_enY%Qt*Q zFfnBO1})=sGx=eD{TrQc=&YVD6A$wD?(bx?yV84<^I z2BfQ)1{;;(PY!xgwyS=ddHtcHr=&mGG?+Di#yN_~HSKmUz`7x|CcsR2nvoaHcO<#t z&G?P>k2jKttf;GeYOkq3Y>pJpqx5_dkqI)l)YsHF_gu<*j?en`IpEZxMFNHF02Qdv3b$Ui`PtPtoMK7iegROkaa6toWeNDdL->EtmUoK=BgE z9r`ZY4fPn(J(PF z#o*^cy@Ix<>gwKdNjkRKR0b@4d+qMybH|2>Q{2qYuZxj8^D{yljM^l;w&Ug(t2r>c zT{if~L8anHMSc52OU5PFPmg{xyt&nH)RE)b?wibmxHybX5E=e)^&*{~T_h>%P`|5#7rk8uUyyPBwjX?dhWu_7duK=nSQBg!X)=pc zyh%SN&v55en@gHKgs(&;RwLW~DTM?H zh9=sFe|5pK?+^7&uMpx>RvhSn8VRdq;LA{x5DV}mV{ZBQRZ!xgKlg`T7tcABN;j*V zlVF@{d&KY3gvK+UsgowTj>a9Hi?rm&%SO^K>cnxO(zRjX<2j|P;8d>E{Eb%r`@@mO z2M2E~^2?LAtLJilW@LS~nEZ@fF=SNnP~5BCxbopxrxOUzY>Tog%%Dq?luv2s3*hV7S2A&KY7_`Kk-O>n;vG4{kh6gt8h+o6BL`V;eSQW-B>}w6DZlOQi#} zTZCp&pg3jpghgy~%wX%ZfsQWSGV)zZQe13HQ%68%DjqKBZcpmwQi#O4KsJ_al6xX% z)SJ(LT|R;Xn~}5Q1l&uNAoZmAkv=chjKv*q1#$A99C>G{*n#%?Gdrv5HMyxWMdXF+ zAOuyYPTXmdM%7MVf38=C3dKNvvjmqV(6^a-{F7U=l=s{~w#6ZozEo0eTPiB56Ku=< zZ^7Cb%WzVx+RtA76V>*)U3VluYkirO(C)W2HyV==rZt^WKil_4;EEqBVZ}OUDBma| znDJF>xvBz4SwJ981)>SPTos?FUhEGt4JVVTX?{v42dv7?*vMEjjec!BHA<4DM+Vk=#K4*h8 z-}kiHRJSjI{=>0mCuxK zaI2)%CEbXFq>blhd~WwMq+?cX9ogumadzPdhMszwx9JW@3mP|bYg~NOm0v-SBB}?* z5yq>*DP_S65zLI@uECYR6xzzIX4X+N*z)YN4u#W@_4!aC4rM@NmfntJ+*Vruh77DM z6C3NETV2mR*6IGkf`W;cmdd9h?25+Ab)6WSfAGLw3N&A?Xr?(eIv%^>)I;q(%BW%( z^)i3_O@wk8CfyvW77hb+u-t1$QuSIbyVAm1pz`#WME~;UE-oXwfmDuyG!Pq*mwKgM z99C;r8EfAj`ZUD+xz`qRl6jf~DSn$uyNjSw#EDr|TS&6tiG{YxfS2E3!^V82uXO5N_87HF^>^}MgKR;uKYX`dOa9$KD^p*Lha z%M__z)I?tkR)bb^)y^0a3y$kozk$=)0%S!zWp^HKO9KH!6=%(r@a@hFZ2P7K4JX4E zRmg}0(>mY&#ch&qF}c!T_s}x(^8K@ch8P4C{W^a$&-L7L6!%;+u4C6@sP|A!25`AY zDxYV~NV+V;JrN^->7UX&UnUvhNaAm~-ZcJoyLfnlz@gYj)JW2GTy%>cDJco5XMVE$ zF(XDQIo#MVD^YFUZ~x2)%F9zE9JHO6-CMco*9;uw->{Ic%-vK}LHSe7jO%0?2IGs* z>uA(qR%HL$u>tg*gCOv_!Z6K1v3e=#q5w!GipR@_FI#`jDnDI~{ilEIv}1oFo7yhmeJEWZ1o-&&sik&EH9S&*^s+od zpqEwJ2Vc_?)cIH4X^%d}d@L0h@>ZU? zGJ0PHL^um`|6_k^)>4UID-{r?jjwYfXNzUI@#`uy(?o1-Pc4zbyPm)|vwA9(e|MRe z*87%+`aT&-f8IHk!jmGR+~V1e@5t|=NCRr&p2wKU+2zR|D<)e}>lS?k=Wx!=t5Ka# z8OZAz5N!MT8R-$;tf5%7Q)g1x^7PBRD_4k58k$4B*jovWzue!xG-dJT&HARcMP9p} z@cLEBehV9p*;W(DSHbdpM?F)yeh2y4G1QUAEq5rl2D6fgJe6NN3J+%8u3E}p>7I&z z-xi?oVv{i==xN^UCgXvdxTvaUzZz`TyER;56M5OZvhID>I66eDE0$P~3W)$zi2dbi z+dszWlY+5CDofUKs>+qf)~MMlA8Hr0b5x7W%Cms?FmENxXFV&@Z;w0OtBBJzc1saK zNFFzEKG^YbWx4Ao^SY@Ulg_i2`)x{Z0R4^$xB2KxhO9|RLGx)2EdXte^(;W@T>_lH z(1vZU*vnXrDpU>Sz-Go79>+_%Rqa^uIj`ej3t!^U_kJDjIkmpwUlUXY{%I-?4by)C zgB5f>(Owm3-apJ9f=AEUYMTH!LTKUxOL9{`h&fkZ>@z(?hkOsls_Q?}&gl*A3wDh- ztEz8e6g~1)lD^#&xOv=Aak-FnO(85K-4dxa;#!FrAVS{c(GWr7y+PF{iyz>jnvx<5 zwBM=Wu9xJ7H0z>{HM*iY3V@@ z2y6ZarjyE}X^wyQbA@hJC!wB5V4U^r15%$gt-BZhDH?kHWX^G|_LNfVhg|iz_@uA5 z04!k1eO#Wpy!YKXoUe%dJm&?r_<}c|iuT%1pN;gBKYGM4o2gev4sd^zgTTC>Hw&ry zw?5y}XfaRoGM8xM13&OjD$*-QQ(+XBGOK;6m3);B$2$T8&QS;U-P5K+2 zP8h27@C_G(+wYMVMked{gQ0$44w?(_RL-DN945|Hr?GLmS+Q}V!;BZt__%V+ZrlvD+3!)@M|WsHy$fxNvw#o+YgZ$%Ii!_FwJ@D zblx0|8<$C{?5m`D#4cv5rxu!5;q9%#7s&r|>r?-U!TA>nL9D4)7~9vp)S{go*C{4Z zMMfGU4|v|NTQ>_V^h$Q0d9rnz4(^+)lKc_hr;w_`S>ct@lv7f>>E;Z*Ws}ljEaP62?78KRxONFnTG6(9R}|gWRhi#kR|f_wHi=Y^RN>Fo&O{@ z!v4OJ9$U9>8CN=^l;`o`@Q6%l#pZxfKEo&iaMY*+H*B`bM2q<7D6W?e{pB~6*dB;h zb`R(+e)=I*h!@20Y%NnIw{sp_;NNiTcnA#1dLBt$BaGvpJX_fY7Y^#c-bu(BZ$xM> z+x;|DyLwT->l&zV$9X0iX^4mYZYg`cBSuAPFKR4aWPRCIK(xSA+c6()AvOpsk;?v< z?%Yv^Q`w>M$6iTy@=`C#0v9Nh5y`S+d0Tf` zN#rN~@kVy2@ocHQ80NX1SVw324OXu*fr_AMkA)7z(LhJ=itH!DgTrG@pZ)z02_H^= zK~E!!&h;km2Wa)6HKzd70+t~!E}}a8#fi-;%X_iDT1ozVAsM5~RH++*)(V#my}uV^ zrfzUg4BQ3B-9IkV;oVeLQv%`oy$+kDUianQheQC^Wi+1E@8g{mKxKJJr}>eHVX_P*3iLz5pG%#8jG(5fQ4c2oTR zm#PHr%0<-&=jb4XdL^$l|M{Y!!JU0OeifIW63Z^Fk*(9CC$f1-k-+%Ka{_+ZV|EPR?uZW*0iTeB$~bn9LLr|flsdX!JFhP`*Nf{Z7d6Fzf( zi%yY8Ep99R8@n@+xeMvCM=8yDJWSyH_4hO~x>n)ImpFvuQ=i>8T7YshFO{|yP!+Wt zv?1*1I_gE4s_yHFqV$K`dj9PxI$impL40Gk#YoNRGCi-_8OVxL zyJ=Q$m-Xhs(!|UA(xCZJ0_t%XI^)xy)sNOK^=@r8ALFer!v<=<*nz_{jt5edo}g|n z#J#xjFM!OJ_KX^MTP}B%G8~{GI%GiGJ=#8`XHNgZt||cTova*j^$UoRUY9_ZNH^1F zZ`~xAJ)ves?)RwWpG~^+cD%1kR=y=d)jOTT4=(70cy%Nu>L!PmPrRe30^8{)Cy-}$ zVH$+=YJ3li1GIzuXD0X~(sV;^%(2h!H!IMJH!-nC^3&-;$Va8xrqFcv4K5mxiqFDF zT0uUBb-NrflH8Gxf@a_1$sK0_2-JJwFM)5AuLv8S1~B)NMYLq{sdc|HW(aT9KK}%7 zwGnC(^Mw7@f?H{2T{}{t>KDdlo;E0r_BS70P>50UK@od@`>o$6q$d zI6t*AM?ugiE#L;8qcESy)3fnd{kGxeV0p|014Gz;n`_H{l$H#;yT*C8)_jE@Hf!`A zKD2FasnoETXo|GbuaH}Ybl8M7g*^|X;}BmD&DUwR3kwGeL$ic^PEBWBg%$If=`h}nPMldpsb5VLb zBmJx;-`AvX>l0{(@D0XS~z&)^@o29yS5=74rRqd(%at>VZIL&4z;MlCXum{(H~ zS-7<<6&|T`;InQ&jOp879;Q|*z^3^~9Y&hyP`0t+m`YLvDtYG)-}=g->m^FS=6j#b zu@`7Yyv~%`enzZZF4mBTk+SctEMo%#-rM%KKW-Z^fIL3L?rZ-oBBo1Cdkr{~Db zEQ7h%w+>}sEzj_M*na_bJ`?y^=F+K73aioW0m+(jqrHdYILK5syK8s-qE25`>3u~F zlW+4`SY4DBXwz|bAY@uc=KQ+@h2i=W-+k6I-Ez0Ja6iy!ZWm8Vj36%y^sNd0EoW2) zZwOwn0)5Ac<&luPqKZV88KZd19x21|w=QVMdxn*z=61=9DwM2e-+ZW)C;c6tCnQ5I z&`C)mat0X)p)_Ze2eDOqZP*LS-%I-(ui;{^rYIPNtr|OhF5iHU)<|1 z>|S5?8=S=ih6Ud9F#RoM9UoZJxhaZqsJ_)=jQ-JR<%qkfUm@J7iO;XYDWMc_y|&uF zW8Es$%%;=ctWC0)X`_C#Hy&i^kryJF$`p20D$0(ZfYm!j64_<0mG@L!j?u!s3^=MM znOhe46&87|6W3GIBT34`KkpZJA_&XDX3(aRwVm?;=dBvY-ji9bG|*`PV@UuwfgYWJ$E%iO0`?Yk{n0$$caxHGEN1giN>op zuQlI#zi;2GQlC+KC(N zQ78+GFv{jl<_~y0dFX$@G`YVl1#0!hITIp;3SajjR_ zS&`qw+Wrk2RPE&7Ic>rD!c6$GYk(-PxUly&`*%X=V~Mu-q3Ti5d82G!)ePF{%rT1L zS8+>S&nd~*{8dD+Pmeh=aHZvaGbmKFJLXJ}_*2BzxubKGgnHz8IBU)q#zW14* zf|!5PVjsx>ThlNHg(iyS6%?QBobk5Oxq#Yv1KUFbVMj;xv%zzY{xX+B;l%Q&gocCu zZSB`@?Plb)tXZfdv>PJ5;-g!;ra@{=*8c3gtkky^$j)l7u{g}{R z&0^eX<* zRWlZBdH=z0_gAegJO_rw-QK|}f6Ct4o$PjSzDmWzG{AE&!6i|uvwuY2L*p9x6G*_9 zNs&f!N!c7#uj3!<^dbmxTqe2-zvHNAfDZ(1^25S;P{8Md?MqtjlL>=vRcF43Rbh68 zxTD=Yj(Z23GA9!kx%TcXafJ>QaB*=%FgE1Tu;*5`WV2_I2Yr2`Z8cGe>ZA`gHk~Nj zOvB-o0hEvLs#GbaXMcM&+s!v2u--qAjy$`g+U9;Fbc=5&b~7o^6)u{5{u?>eS~9+HHFeZK(<#ok6&|7H5|(VaU)+ac z$Zl>vKaOM+GD6Qjmo^o&ye^J7u6`6(u@Qd~y;_~?=z&5gRkWbk`$t_xzZRM_8S@sq zGLN+(s#A-&#EHB6r-+vDz?4@lk_nhM#>GXwP1CY163TOxGcbnU9l5p^rL5{ckEGTt zoy4bs#?$A8i|HDfI_Cu&qxW_a%?Et7J$8;iOMQBIzW&0RcPSTAmtC5Ees{jcQOp8w zTz8|q-0qRtZ#fwjz7$u+Y!Q09;KxO&lHUa%NXFe;q)9cXnM31X-7?u;hYgVJoZXsd zxJxwcpiPA!f^XPrmvj$T0hzGepA^nOBE{Cmbiyw%Cf|J*o|NlRo$yQC)=5_OJ^n4TprQ665)tVG zbhU=Qa0uJR)dwjav6U~nRWXdH_P5E`< zAA+1Mquk!0B+8H~6Dq2ZHmI#%yc*s26v#?6Pmio55=v?WREgmm0flk#tn*VjD)`#P zGZh{DUuSAtTh3HYm4VnZMB!R${XBJ)2x%o+ITJj#wh&Vh+_DIwc{Zv~TQBu@`Po^( zVAbqjI$SfogNit=u_`TEs8b9%4z@-;M7aeO*>SzNv$+~`T8=>l%WUaEc4r5|H=TYA zEu(tS+q)#0WIxf!!UJ{QvR3>~%PukWvwtkzSxe-V@%Hl$tW-%{o=z%E=0i(?a2p{# zooSq9rreWFS9oE&@!EM2EN;SpB?Q5)IXM`HxMBwu2Qm96<#-x2B2nK+2CSpCE&274*tTXi;0;q=X%P&OQWgvQ-z&P`6~s zG73a2@(9G0K?|LXSSpKnUDM7xeRBy%GHTGA-OaIy;aDWtTfHyR^s%d`8NR$QgT>@j z@6mcnU3r;HbC0;c|6x0F^}0vn&bh?-<_SQn6H;_KT>AaVB zS73Ke!PBjycvj)pX@t)vNxk^!E}A+l;!gTO(0RbDA`7oTHVqZ{nH2x}nrGtE0j)w> zpkc$nSdJF!^?zs+&r+f?@((WK&$du!?>f;ks}Ikcppno4^T?^v5H)`1IXa-K@M@ zYfdVH8^93^V$a@6s}^zpv!HnsINNe-Iis3X*x!K`$U>DKd`fo1Aopj*-d|7YuW@UJ zC=0wHJOb1W#=ZpR)rmRweL$hR5G;K7V7 zj+S!<8wP&T&T`P!o-lHDF@(qZWcKy>v?o@)syiC_Qp$@eqM<4yP42sly(vjCWWj_T zT>-Evxagdrs3R0!z&J>0V^|~nJBOlIA?S)^%G^jC?{C2hR9Yp@=neR>|9ENhqL}m) zKy?{(w&`kVWcS=`;B6HWci1#s=JW{Swysh;{|+AW=rfD4=0?>oqWU9okg=kzxZ<`{ ziA!_uWA5_6(b&%vxCT5{aqAv{+7{bltXbQkl?geb2}YQ*~Cv%t;?E+ z`he<`*fO;fClDt?P%~|&--M?{l8Ke_q!4GnAX#kH4)ZuKJmz+%%%bhCz&MDS?pQZ! zbMef)<_6s@YeB%*?{NSdz5!eqHrc%w zMy~PyN5#XZ58sWzw`4&GNs!=zKwa6o*P|gqg?+hS3lUe81(cC%1&j|v$i{g)+wJxV zB||l`1NW=nqPjKIk`NcQ>#~wpu7`){5MD7u41-VOAmqIB61e|Ez*zaVDz$GrmnHtn zVO*qK0OS~mejFJj^J1QyP({(FS}albOqc7EW0!hn|MTfhBQ_v&ZmHSWWmL-QIFQAT@1N`0S>||& z(pTl72rBaQwAlFvIfn0{Mg4?s)kCoB8STq+O8T8^z_JNoY1Ie&@L%{<#@5JR9>U>u zC*~}>Zq4yuE-JW!n(|dUWdPC=ZeLbb0Dvc=%vESs!qSFb*x^(zIR?V8!mg4Ik1Gv& zXJYNLB*iN9AJ$T5Dgg9Mb%E00A5aXaHHdfizk0@^$}1|`kQvV-vl1cY#DmMv*4a*n z5n^oK7A9w#OE>kc4TGVl8(nj;{Z*3;g5K}xI0P}qdJ7&EN$1@@B})08(Bd!;NG)XC z>UwT`c8YOK>@=(f^jKx&(ynG23}@34vr}>c zo3~fLTC>3Szn7*o)jjKqUhZk&UESx;?j^{&Ag&Ip!Oyn0eK!Tzyw>u5{ztUAGZ2gjbeIApRAqvqy zcM{z_J=;B}5CEV{3cj>_FStfag2EsHg#CJ(@R<#X`Q?plG$%A}uVP@{H8XNIW_4q< zInX%Q_9-=d+s=749`Q;Q{T*Y#W@~u01$nWm5YR~9X;Q8^jBn&GqEcmX!zy{8t1Zgy zD8HMhc&9wi&@3Njq>x~YfB&@zGFyhY)t$GxX$t`YO{k+D8N20r-t(JX2jO}{(pJ#n zr7C4p58t-_;%%Ce*kR2uuM|II8LYI-xv(kr)^0oUB-KvFb7BHAcQ%l^9Mz|i829}m zIp#krj1uy;rJEKL`bZsG4Qm-6BUu%T(`ovi z26}Ddch~QQZ8lc)w1MXuk?$p`sBYl?z6*fg>+RFT|LoSiWWZeTT2?$7jSe>cb7$-j z)t*fthWc2b)OkbhV|UXJu_}4FUEFLX`(I(4@qQg|ej;S_ruo2DMfVs#`intX|Lo_z zI&V7=;yL@AokLeqU$40d;HHx45&fdR|C z#+li_c^bc1Rk#`fOPpGNHHU+KgFFvjy)RAuAJJkM3{K0=R-(;4G&D44Zf0gS&aywh zF?S7_5uy5txbK}7^SGTzf&7I=V;({#`~(-K$mG@xz}3IcJLT_WeQo0C=%`{))7${_ z@M@qaY@}1M5@gl&s#}0cr;rnhq<g)@sK|MO z1|nu-CQB>OYmn!t&~ABj>z)cr;O7XomYIs3dr*`l;6hPwDBnMoCKbH$hWEzEO(3D% zrpdQZT2^*#Ek|2~c;Dk$!^_TGC?YJ{TwO#CjDFcSds=3n20iy>`>+1< zWBuedm&Me(?i=PKoafwHUYplt|KoHNwyfEkvu9m$Zm}1wKuRvAUAK z@@c4+{5=IySfffE)A^SYnod-k+APw}<%R9l=I&UWzOmz|kYf}&fbdd5CmxVR;t23s zcUi^YCP&-i6r0aIfkLnI$n45{PK}7}xw?oex64L=m%aWa13Gmh|198KzZPJUr8r^t zgSi^DOmvchb_YV2=(}SGi8i~&`h{%S-0XLK2~yCcLs3c*KLZwAObBZ6C*&8SNS6ta zkkV^t^ebJvm0ln=^F905k@mmQ*viK@-E0;wqxcl$3G5ltD8ETkIwNR}mAbUjRTZeHA$x}1}mFwGvQP-xuHr>b2u z4b=_ur3VTg9i=%$ddZH;of|Yvn}qS<0PY?iAg93WY|^Blh6h-2+Wpdmo6Ry;x2Kf( z-B8?=Jd0`XP#Rje-I~e=2nEipF;93{(y2-k+OC4H0rJt%p37jJKvpK?zC{JgR#whp znezI)y@C{Np>uMaX*1ck5LaIrp|DhyF-nW^4q7tAi%(V-fx5t20^8Wi?>5j8hcvJr z%qumKS!bH{jo?*`g&V`1!#63bQ+otXKEt}%B7;k=-qTX*Il_sM*_5({Y2UGHXkob| zmyEPbnENvv)=;S^ldb|2RZib~*#z!oA{3MaP5fSy0h_Y)=NWblhb-V(Z$P$=R`H+V z*KCPDNkay<)tL4oq=)?+M^*|;mOXJRt^mW?rDM^<-QC1$$GaK-u355WCrM-#SUu5$ z-c+by!+^6+%b`J<{}XS)6YnrV&#a(_%H!2bkI29if5XeeS}C91RfDy4>n$0$pZ!CM zvTMr^)P0aIhUH|pSR{>{92p)U9he^jxVjazs)SQA&CI6v(JPa%Vl#Ml`X8!>lXP~)s z72bjdJZ+_9aGajy?P62HaE*)d8O26B>SB6E`L{r!p8lq!6D|+w$@L;Ty!Cc(HwtME z3rf!NlwWw0@3FK*+#BAcytQ?*x>anI`?;;>S^eNgN=_$s~Rh^~8rtUfI@pxoDV?TE= zqPi{DAx@*?3puA3$zTzQss17lp8cqB0>7Jf?ynQd)lu{r+XB*#OhZ1|;OS-oYcEtD zs#R|T+kks!X2Pm4l+3i1?0WwQy~PYFiZ(k8mB$-^-`ecmzvO$|w5XtncR0r%e@oAv7!iPfNr5b;7^bpt$J>EsLF5>8J zk1QVx{^{an+_w#~A&)FgfkBb+-%7{#&Mrsfbl#YyRZ6uHX9BRu-@F5}Mvdokvnixl z0xT9 zkS6pB;UCIXb9`RLf&-+|;-M!5$Y|(+5ni^KgwH&-hU|?Wmb#-;GPQD8c z8>wr?4Tt@1ULAxEbEaeC;hI*3zTa&9F;YfYOl*Hp^t$XQj2*sEn)y1|8AIs8DQXc0 zf%yBsy`HkhJ%V zFOYVum;nl=zHj5a2buDy|xaa$QTV0F`?PX_*eL0T8!=qN*DA$FC%c1N75A)Lb z=3-7Bm7wd$<+Isr2FUla{$S=}cXGRrZ18y9_YbV0qY%Uas|xjbO##ZWBX-iQh?!t8 z^N0_Ua&1{#!CKa_5|J7nV52ZTNm-tHp6W~*H-nl%ZK?xuLY9D~3o8WsVlV%yDb1P5 zgBV#hr(&dgWr5aKD5`2|W^+hM$QMogoPm}HoKsXqJa`L8{9-DvNXy_uaKtKu9UamS z?Js=#KExlm`5qiIvoX0lnP$-wPwfy5%~jgXNqVJjF05Nkw?pkga;j^>*%!euMt+}J z&R&y>ejxr5{WG&lk?MZZ5tv-a+;D&sF{9MSHZ~gsMnW_$RY6KqQQ!&GxOJm07QeaM zv|EKtCQjgX*JnoXsK4q`ZP z^Ls!|`WAX~+^b7o-iRGD0oOM|gJuvm21q7)+{N3iJtj9~AMXRadR|hb&+SSTbae&b z6I%-Mlx!;6ttP)OuZ(mTMw~Sp+)3gLWz}?=E7cU&l^rqB`tj<$CO=zZMpr-@;wc2% zQ+v~AJ1RUCiI3Ph)F55OKB}7Z)ZKm}_Je@#f*Px#b>fuu zZ>|S#GpoH%1(e)HnM5$i;M+l@G~9o$Tg023L&0X`vP1x+v+y$wCGZPcNqX z!Qr26%WZVJLkQ*R8^@41UZc|8hh*eJ&_5!a9wS4Ec{@0uM0pJOuDd|mexnY|mv8qH z@4#Nu1_FgDj@kI(KC;BkoS{Vl2}ufaVGWn2aVLvf>*}06SeKE<(FoY90pri3ALf42 z!>F9NeiSzQq+fM!fs=7^nTRNLET`Ds!83#`y+s}-#sdIAnK&O5U$v}~dyo#cm@3b( zj=wEPY!(Q~wU4BB>)C?D`dIQJiI5~h5$E)?bPWVpXmI3o2rpdI|d>x^FJG0X$UgL W)b}Dk`045&pE=?9Rn?b1Y5xLP(uF1f literal 0 HcmV?d00001 diff --git a/docs/miscellaneous/troubleshooting.md b/docs/miscellaneous/troubleshooting.md new file mode 100644 index 0000000..7c3ee61 --- /dev/null +++ b/docs/miscellaneous/troubleshooting.md @@ -0,0 +1,9 @@ +# Troubleshooting and Known Issues + +- If your robomimic training seems to be proceeding slowly (especially for image-based agents), it might be a problem with robomimic and more modern versions of PyTorch. We recommend PyTorch 1.12.1 (on Ubuntu, we used `conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch`). It is also a good idea to verify that the GPU is being utilized during training. +- In our testing on M1 macbook we ran into the following error when using `imageio-ffmpeg` installed through pip: `RuntimeError: No ffmpeg exe could be found. Install ffmpeg on your system, or set the IMAGEIO_FFMPEG_EXE environment variable.` Using `conda install imageio-ffmpeg` fixed this issue on our end. +- If you run into trouble with installing [egl_probe](https://github.com/StanfordVL/egl_probe) during robomimic installation (e.g. `ERROR: Failed building wheel for egl_probe`) you may need to make sure `cmake` is installed. A simple `pip install cmake` should work. +- If you run into other strange installation issues, one potential fix is to launch a new terminal, activate your conda environment, and try the install commands that are failing once again. One clue that the current terminal state is corrupt and this fix will help is if you see installations going into a different conda environment than the one you have active. +- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](#installation) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). + +If you run into an error not documented above, please search through the [GitHub issues](https://github.com/NVlabs/mimicgen/issues), and create a new one if you cannot find a fix. \ No newline at end of file diff --git a/docs/modules/datagen.md b/docs/modules/datagen.md new file mode 100644 index 0000000..6c6a92f --- /dev/null +++ b/docs/modules/datagen.md @@ -0,0 +1,5 @@ +# Datagen + +Overview of modules and classes related to data generation. + +DatagenInfo, Waypoint, Data Generator, Selection Strategy \ No newline at end of file diff --git a/docs/modules/env_interfaces.md b/docs/modules/env_interfaces.md new file mode 100644 index 0000000..1774cd9 --- /dev/null +++ b/docs/modules/env_interfaces.md @@ -0,0 +1,4 @@ +# Environment Interfaces + +TODO: overview of base class, what must be implemented, and subclass, what must be implemented +TODO: how this fits into data generation (action translation, object poses, subtask termination heuristics for parsing source demos) \ No newline at end of file diff --git a/docs/modules/overview.md b/docs/modules/overview.md new file mode 100644 index 0000000..1eb7a02 --- /dev/null +++ b/docs/modules/overview.md @@ -0,0 +1,4 @@ +# Overview + +TODO: codebase structure overview +TODO: links to more detailed pages for certain parts (e.g. Datagen - break down each file / class, Env Interfaces) \ No newline at end of file diff --git a/docs/tutorials/datagen_custom.md b/docs/tutorials/datagen_custom.md new file mode 100644 index 0000000..63ba19d --- /dev/null +++ b/docs/tutorials/datagen_custom.md @@ -0,0 +1,3 @@ +# Data Generation for Custom Environments + +TODO: data generation for other simulators and tasks (must implement robomimic wrapper for new simulator class, must have environment interface base class with certain methods, must have specific subclass for object poses, and either plan to annotate subtask termination signals or implement those here) diff --git a/docs/tutorials/debugging_datagen.md b/docs/tutorials/debugging_datagen.md new file mode 100644 index 0000000..6868a5e --- /dev/null +++ b/docs/tutorials/debugging_datagen.md @@ -0,0 +1,12 @@ +# Debugging Data Generation + + +TODO: validating source demos and annotations section + + TODO: visualize src dataset structure + + TODO: visualize subtasks + + TODO: can re-do offsets and subtask signals (annotations or env_interface function) + +TODO: generate dataset args for debugging, including pause subtask, render on-screen or not, etc, main entry point for codebase diff --git a/docs/tutorials/getting_started.md b/docs/tutorials/getting_started.md new file mode 100644 index 0000000..7254f74 --- /dev/null +++ b/docs/tutorials/getting_started.md @@ -0,0 +1,42 @@ +# Getting Started and Pipeline Overview + +
+

Note

+ +This section helps users get started with data generation. If you would just like to download our existing datasets and use them with policy learning methods please see the [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial for a guide, or the [Datasets](https://mimicgen.github.io/docs/datasets/overview.html) page to get details on the datasets. + +
+ + +## Quick Data Generation Run + +Let's run a quick data generation example. + +Befor starting, make sure you are at the base repo path: +```sh +$ cd {/path/to/mimicgen} +``` + +### Step 1: Prepare source human dataset. + +MimicGen requires a handful of human demonstrations to get started. + +TODO: download square +TODO: note that you could collect your own as well, using teleoperation (e.g. link to robosuite / robomimic) - must be in robomimic hdf5 format + +TODO: postprocess square (env interface - where to get information needed during data generation, link to env interface module) + +### Step 2: Prepare data generation config. + +TODO: get config from template (lots of options for us to configure, but for now, 10 attempts) +TODO: note that config follows RoboMimic config style (link to it) + + +### Step 3: View data generation outputs. + +TODO: compatibility with robomimic, can get info (link to robomimic) +TODO: see dataset successes and failures, statistics in json + +## Overview of Typical Data Generation Pipeline + +pipeline overview (collect demo, postprocess demo, optionally annotate demo subtask terminations, run data generation, then run policy training) \ No newline at end of file diff --git a/docs/tutorials/launching_several.md b/docs/tutorials/launching_several.md new file mode 100644 index 0000000..cdb2ac7 --- /dev/null +++ b/docs/tutorials/launching_several.md @@ -0,0 +1,3 @@ +# Launching Several Data Generation Runs + +TODO: describe how we use ConfigGenerator, can run multiple config generation sweeps easily. \ No newline at end of file diff --git a/docs/tutorials/reproducing_experiments.md b/docs/tutorials/reproducing_experiments.md new file mode 100644 index 0000000..2b2ac44 --- /dev/null +++ b/docs/tutorials/reproducing_experiments.md @@ -0,0 +1,3 @@ +# Reproducing Published Experiments and Results + +This is a guide on how to reproduce published experiments and results \ No newline at end of file diff --git a/docs/tutorials/subtask_termination_signals.md b/docs/tutorials/subtask_termination_signals.md new file mode 100644 index 0000000..089f94b --- /dev/null +++ b/docs/tutorials/subtask_termination_signals.md @@ -0,0 +1,12 @@ +# Subtask Termination Signals + +how is it used to parse source data, and how to make your own manual annotation +where we get it from (by default, read from envirnoment interface, but can have manual annotation) + +the 0 to 1 thing + +note: only used on source demonstrations + +link to tutorial on visualizing subtasks to see how your annotations lead to the subtask splits + +link to datagen info module as well diff --git a/docs/tutorials/task_visualizations.md b/docs/tutorials/task_visualizations.md new file mode 100644 index 0000000..27af5f8 --- /dev/null +++ b/docs/tutorials/task_visualizations.md @@ -0,0 +1,8 @@ +# Task Visualizations + +We provide a convenience script to write videos for each task's reset distribution at `scripts/get_reset_videos.py`. Set the `OUTPUT_FOLDER` global variable to the folder where you want to write the videos, and set `DATASET_INFOS` appropriately if you would like to limit the environments visualized. Then run the script. + +The environments are also readily compatible with robosuite visualization scripts such as the [demo_random_action.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/demos/demo_random_action.py) script and the [make_reset_video.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/scripts/make_reset_video.py) script, but you will need to modify these files to add a `import mimicgen` line to make sure that `robosuite` can find these environments. + + +**Note**: You can find task reset visualizations on the [website](https://mimicgen.github.io), but they may look a little different as they were generated with robosuite v1.2. \ No newline at end of file diff --git a/mimicgen/envs/robosuite/coffee.py b/mimicgen/envs/robosuite/coffee.py index 9ff85f2..f7a72a7 100644 --- a/mimicgen/envs/robosuite/coffee.py +++ b/mimicgen/envs/robosuite/coffee.py @@ -213,8 +213,6 @@ def reward(self, action=None): """ Reward function for the task. - Dense reward: TODO - The sparse reward only consists of the threading component. Note that the final reward is normalized and scaled by @@ -468,7 +466,6 @@ def _create_obj_sensors(self, obj_name, modality="object"): names (list): array of corresponding observable names """ - ### TODO: this was stolen from pick-place - do we want to move this into utils to share it? ### pf = self.robots[0].robot_model.naming_prefix @sensor(modality=modality) @@ -634,8 +631,6 @@ def _get_partial_task_metrics(self): lid_check = self._check_lid() pod_check = self._check_pod() - # TODO: should probably clean up redundant code below and in @_check_pod - # pod should be in pod holder pod_holder_pos = np.array(self.sim.data.body_xpos[self.obj_body_id["coffee_pod_holder"]]) pod_pos = np.array(self.sim.data.body_xpos[self.obj_body_id["coffee_pod"]]) @@ -1250,7 +1245,7 @@ def _get_partial_task_metrics(self): # new task success includes mug placement metrics["task"] = metrics["task"] and metrics["mug_place"] - # TODO: decide if we want a check on drawer being closed here, to make the task even harder + # can have a check on drawer being closed here, to make the task even harder # print(self.sim.data.qpos[self.cabinet_qpos_addr]) return metrics diff --git a/mimicgen/envs/robosuite/mug_cleanup.py b/mimicgen/envs/robosuite/mug_cleanup.py index e2d4e27..0b9d602 100644 --- a/mimicgen/envs/robosuite/mug_cleanup.py +++ b/mimicgen/envs/robosuite/mug_cleanup.py @@ -224,8 +224,6 @@ def reward(self, action=None): """ Reward function for the task. - Dense reward: TODO - The sparse reward only consists of the threading component. Note that the final reward is normalized and scaled by @@ -543,7 +541,6 @@ def _create_obj_sensors(self, obj_name, modality="object"): names (list): array of corresponding observable names """ - ### TODO: this was stolen from pick-place - do we want to move this into utils to share it? ### pf = self.robots[0].robot_model.naming_prefix @sensor(modality=modality) diff --git a/mimicgen/envs/robosuite/threading.py b/mimicgen/envs/robosuite/threading.py index 622b541..ee6f533 100644 --- a/mimicgen/envs/robosuite/threading.py +++ b/mimicgen/envs/robosuite/threading.py @@ -208,8 +208,6 @@ def reward(self, action=None): """ Reward function for the task. - Dense reward: TODO - The sparse reward only consists of the threading component. Note that the final reward is normalized and scaled by @@ -423,7 +421,6 @@ def _create_obj_sensors(self, obj_name, modality="object"): names (list): array of corresponding observable names """ - ### TODO: this was stolen from pick-place - do we want to move this into utils to share it? ### pf = self.robots[0].robot_model.naming_prefix @sensor(modality=modality) diff --git a/mimicgen/envs/robosuite/three_piece_assembly.py b/mimicgen/envs/robosuite/three_piece_assembly.py index 173524b..82d1d21 100644 --- a/mimicgen/envs/robosuite/three_piece_assembly.py +++ b/mimicgen/envs/robosuite/three_piece_assembly.py @@ -223,8 +223,6 @@ def reward(self, action=None): """ Reward function for the task. - Dense reward: TODO - The sparse reward only consists of the threading component. Note that the final reward is normalized and scaled by @@ -612,7 +610,6 @@ def _create_obj_sensors(self, obj_name, modality="object"): names (list): array of corresponding observable names """ - ### TODO: this was stolen from pick-place - do we want to move this into utils to share it? ### pf = self.robots[0].robot_model.naming_prefix @sensor(modality=modality) diff --git a/mimicgen/models/robosuite/objects/composite/needle.py b/mimicgen/models/robosuite/objects/composite/needle.py index 226a523..f50d9bc 100644 --- a/mimicgen/models/robosuite/objects/composite/needle.py +++ b/mimicgen/models/robosuite/objects/composite/needle.py @@ -22,8 +22,6 @@ def __init__( name, ): - ### TODO: make this object more general (with more args and configuration options) later ### - # Set object attributes self._name = name self.needle_mat_name = "darkwood_mat" diff --git a/mimicgen/models/robosuite/objects/composite/ring_tripod.py b/mimicgen/models/robosuite/objects/composite/ring_tripod.py index c49f613..4c8d573 100644 --- a/mimicgen/models/robosuite/objects/composite/ring_tripod.py +++ b/mimicgen/models/robosuite/objects/composite/ring_tripod.py @@ -23,8 +23,6 @@ def __init__( name, ): - ### TODO: make this object more general (with more args and configuration options) later ### - # Set object attributes self._name = name self.tripod_mat_name = "lightwood_mat" diff --git a/mimicgen/models/robosuite/objects/xml_objects.py b/mimicgen/models/robosuite/objects/xml_objects.py index 56b1ca0..d317c3a 100644 --- a/mimicgen/models/robosuite/objects/xml_objects.py +++ b/mimicgen/models/robosuite/objects/xml_objects.py @@ -222,5 +222,4 @@ def top_offset(self): @property def horizontal_radius(self): - # TODO: this might need to change now return 0.15 diff --git a/mimicgen/scripts/generate_core_configs.py b/mimicgen/scripts/generate_core_configs.py index f61acba..51f750f 100644 --- a/mimicgen/scripts/generate_core_configs.py +++ b/mimicgen/scripts/generate_core_configs.py @@ -24,18 +24,23 @@ # set path to folder containing src datasets SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") +# SRC_DATA_DIR = "/workspace/scratch/conda/public/mimicgen_environments/datasets/source" # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/core_configs" +# CONFIG_DIR = "/tmp/core_configs_ngc" # set base folder for newly generated datasets OUTPUT_FOLDER = "/tmp/core_datasets" +# OUTPUT_FOLDER = "/workspace/scratch/datasets/mimicgen_public/test_1" # number of trajectories to generate (or attempt to generate) -NUM_TRAJ = 1000 +# NUM_TRAJ = 1000 +NUM_TRAJ = 10 # whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) -GUARANTEE = True +# GUARANTEE = True +GUARANTEE = False # whether to run a quick debug run instead of full generation DEBUG = False diff --git a/mimicgen/scripts/generate_core_training_configs.py b/mimicgen/scripts/generate_core_training_configs.py index ef3754b..1476081 100644 --- a/mimicgen/scripts/generate_core_training_configs.py +++ b/mimicgen/scripts/generate_core_training_configs.py @@ -23,13 +23,16 @@ # set path to folder with mimicgen generated datasets -DATASET_DIR = "/tmp/minimal_datasets" +# DATASET_DIR = "/tmp/core_datasets" +DATASET_DIR = "/workspace/scratch/datasets/mimicgen_public/test_1" # set base folder for where to generate new config files for training runs -CONFIG_DIR = "/tmp/minimal_training_configs" +# CONFIG_DIR = "/tmp/core_train_configs" +CONFIG_DIR = "/tmp/core_train_configs_ngc" # set base folder for training outputs (model checkpoints, videos, logs) -OUTPUT_DIR = "/tmp/minimal_training_results" +# OUTPUT_DIR = "/tmp/core_training_results" +OUTPUT_DIR = "/workspace/scratch/exp_results/mimicgen_public/test_1" # path to base config BASE_CONFIG = os.path.join(robomimic.__path__[0], "exps/templates/bc.json") @@ -44,8 +47,8 @@ def make_generators(base_config, dataset_dir, output_dir): # stack dict( dataset_paths=[ - os.path.join(dataset_dir, "stack", "demo_src_stack_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "stack", "demo_src_stack_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "stack", "demo_src_stack_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "stack", "demo_src_stack_task_D1/demo.hdf5"), ], dataset_names=[ "stack_D0", @@ -56,8 +59,8 @@ def make_generators(base_config, dataset_dir, output_dir): # stack_three dict( dataset_paths=[ - os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "stack_three", "demo_src_stack_three_task_D1/demo.hdf5"), ], dataset_names=[ "stack_three_D0", @@ -68,9 +71,9 @@ def make_generators(base_config, dataset_dir, output_dir): # square dict( dataset_paths=[ - os.path.join(dataset_dir, "square", "demo_src_square_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "square", "demo_src_square_targ_D1/demo.hdf5"), - os.path.join(dataset_dir, "square", "demo_src_square_targ_D2/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D1/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D2/demo.hdf5"), ], dataset_names=[ "square_D0", @@ -82,9 +85,9 @@ def make_generators(base_config, dataset_dir, output_dir): # threading dict( dataset_paths=[ - os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D1/demo.hdf5"), - os.path.join(dataset_dir, "threading", "demo_src_threading_targ_D2/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D1/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D2/demo.hdf5"), ], dataset_names=[ "threading_D0", @@ -96,9 +99,9 @@ def make_generators(base_config, dataset_dir, output_dir): # three_piece_assembly dict( dataset_paths=[ - os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D1/demo.hdf5"), - os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_targ_D2/demo.hdf5"), + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_task_D1/demo.hdf5"), + os.path.join(dataset_dir, "three_piece_assembly", "demo_src_three_piece_assembly_task_D2/demo.hdf5"), ], dataset_names=[ "three_piece_assembly_D0", @@ -110,9 +113,9 @@ def make_generators(base_config, dataset_dir, output_dir): # coffee dict( dataset_paths=[ - os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D1/demo.hdf5"), - os.path.join(dataset_dir, "coffee", "demo_src_coffee_targ_D2/demo.hdf5"), + os.path.join(dataset_dir, "coffee", "demo_src_coffee_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "coffee", "demo_src_coffee_task_D1/demo.hdf5"), + os.path.join(dataset_dir, "coffee", "demo_src_coffee_task_D2/demo.hdf5"), ], dataset_names=[ "coffee_D0", @@ -124,8 +127,8 @@ def make_generators(base_config, dataset_dir, output_dir): # coffee_preparation dict( dataset_paths=[ - os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "coffee_preparation", "demo_src_coffee_preparation_task_D1/demo.hdf5"), ], dataset_names=[ "coffee_preparation_D0", @@ -136,7 +139,7 @@ def make_generators(base_config, dataset_dir, output_dir): # nut_assembly dict( dataset_paths=[ - os.path.join(dataset_dir, "nut_assembly", "demo_src_nut_assembly_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "nut_assembly", "demo_src_nut_assembly_task_D0/demo.hdf5"), ], dataset_names=[ "nut_assembly_D0", @@ -146,7 +149,7 @@ def make_generators(base_config, dataset_dir, output_dir): # pick_place dict( dataset_paths=[ - os.path.join(dataset_dir, "pick_place", "demo_src_pick_place_targ_D0/demo.hdf5"), + os.path.join(dataset_dir, "pick_place", "demo_src_pick_place_task_D0/demo.hdf5"), ], dataset_names=[ "pick_place_D0", @@ -156,10 +159,10 @@ def make_generators(base_config, dataset_dir, output_dir): # mug_cleanup dict( dataset_paths=[ - os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_D1/demo.hdf5"), - os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_O1/demo.hdf5"), - os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_targ_O2/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_task_D1/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_task_O1/demo.hdf5"), + os.path.join(dataset_dir, "mug_cleanup", "demo_src_mug_cleanup_task_O2/demo.hdf5"), ], dataset_names=[ "mug_cleanup_D0", @@ -172,8 +175,8 @@ def make_generators(base_config, dataset_dir, output_dir): # hammer_cleanup dict( dataset_paths=[ - os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "hammer_cleanup", "demo_src_hammer_cleanup_task_D1/demo.hdf5"), ], dataset_names=[ "hammer_cleanup_D0", @@ -184,8 +187,8 @@ def make_generators(base_config, dataset_dir, output_dir): # kitchen dict( dataset_paths=[ - os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_targ_D0/demo.hdf5"), - os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_targ_D1/demo.hdf5"), + os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_task_D0/demo.hdf5"), + os.path.join(dataset_dir, "kitchen", "demo_src_kitchen_task_D1/demo.hdf5"), ], dataset_names=[ "kitchen_D0", @@ -198,6 +201,7 @@ def make_generators(base_config, dataset_dir, output_dir): ret = [] for setting in all_settings: for mod in ["low_dim", "image"]: + # for mod in ["image"]: ret.append(make_gen(os.path.expanduser(base_config), setting, output_dir, mod)) return ret @@ -248,6 +252,8 @@ def make_gen(base_config, settings, output_dir, mod): value_names=settings["dataset_names"], ) + # print(json.dumps(settings["dataset_paths"], indent=4)) + # rollout settings generator.add_param( key="experiment.rollout.horizon", diff --git a/mimicgen/scripts/generate_robot_transfer_configs.py b/mimicgen/scripts/generate_robot_transfer_configs.py index b124cf7..48436da 100644 --- a/mimicgen/scripts/generate_robot_transfer_configs.py +++ b/mimicgen/scripts/generate_robot_transfer_configs.py @@ -25,18 +25,23 @@ # set path to folder containing src datasets SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") +# SRC_DATA_DIR = "/workspace/scratch/conda/public/mimicgen_environments/datasets/source" # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/robot_configs" +# CONFIG_DIR = "/tmp/robot_configs_ngc" # set base folder for newly generated datasets OUTPUT_FOLDER = "/tmp/robot_datasets" +# OUTPUT_FOLDER = "/workspace/scratch/datasets/mimicgen_public/test_1_robot" # number of trajectories to generate (or attempt to generate) -NUM_TRAJ = 1000 +# NUM_TRAJ = 1000 +NUM_TRAJ = 10 # whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) -GUARANTEE = True +# GUARANTEE = True +GUARANTEE = False # whether to run a quick debug run instead of full generation DEBUG = False diff --git a/mimicgen/scripts/generate_robot_transfer_training_configs.py b/mimicgen/scripts/generate_robot_transfer_training_configs.py new file mode 100644 index 0000000..b9d4be9 --- /dev/null +++ b/mimicgen/scripts/generate_robot_transfer_training_configs.py @@ -0,0 +1,232 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + +""" +We utilize robomimic's config generator class to easily generate policy training configs for the +robot transfer set of experiments in the paper, where we use source data collected on the Panda arm +to generate demonstrations for other robot arms. It can be modified easily to generate other +training configs. + +See https://robomimic.github.io/docs/tutorials/hyperparam_scan.html for more info. +""" +import os +import json +import shutil +import argparse + +import robomimic +from robomimic.utils.hyperparam_utils import ConfigGenerator + +import mimicgen +import mimicgen.utils.config_utils as ConfigUtils +from mimicgen.utils.file_utils import config_generator_to_script_lines + + +# set path to folder with mimicgen generated datasets +# DATASET_DIR = "/tmp/robot_datasets" +DATASET_DIR = "/workspace/scratch/datasets/mimicgen_public/test_1_robot" + +# set base folder for where to generate new config files for training runs +# CONFIG_DIR = "/tmp/robot_train_configs" +CONFIG_DIR = "/tmp/robot_train_configs_ngc" + +# set base folder for training outputs (model checkpoints, videos, logs) +# OUTPUT_DIR = "/tmp/robot_training_results" +OUTPUT_DIR = "/workspace/scratch/exp_results/mimicgen_public/test_1_robot" + +# path to base config +BASE_CONFIG = os.path.join(robomimic.__path__[0], "exps/templates/bc.json") + + +def make_generators(base_config, dataset_dir, output_dir): + """ + An easy way to make multiple config generators by using different + settings for each. + """ + all_settings = [ + # square + dict( + dataset_paths=[ + os.path.join(dataset_dir, "square", "demo_src_square_task_D0_robot_IIWA_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D0_robot_Sawyer_gripper_RethinkGripper/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D0_robot_UR5e_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D1_robot_IIWA_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D1_robot_Sawyer_gripper_RethinkGripper/demo.hdf5"), + os.path.join(dataset_dir, "square", "demo_src_square_task_D1_robot_UR5e_gripper_Robotiq85Gripper/demo.hdf5"), + ], + dataset_names=[ + "square_D0_IIWA", + "square_D0_Sawyer", + "square_D0_UR5e", + "square_D1_IIWA", + "square_D1_Sawyer", + "square_D1_UR5e", + ], + horizon=400, + ), + # threading + dict( + dataset_paths=[ + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D0_robot_IIWA_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D0_robot_Sawyer_gripper_RethinkGripper/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D0_robot_UR5e_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D1_robot_IIWA_gripper_Robotiq85Gripper/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D1_robot_Sawyer_gripper_RethinkGripper/demo.hdf5"), + os.path.join(dataset_dir, "threading", "demo_src_threading_task_D1_robot_UR5e_gripper_Robotiq85Gripper/demo.hdf5"), + ], + dataset_names=[ + "threading_D0_IIWA", + "threading_D0_Sawyer", + "threading_D0_UR5e", + "threading_D1_IIWA", + "threading_D1_Sawyer", + "threading_D1_UR5e", + ], + horizon=400, + ), + ] + + ret = [] + for setting in all_settings: + for mod in ["low_dim", "image"]: + # for mod in ["image"]: + ret.append(make_gen(os.path.expanduser(base_config), setting, output_dir, mod)) + return ret + + +def make_gen(base_config, settings, output_dir, mod): + """ + Specify training configs to generate here. + """ + generator = ConfigGenerator( + base_config_file=base_config, + script_file="", # will be overriden in next step + base_exp_name="bc_rnn_{}".format(mod), + ) + + # set algo settings for bc-rnn + modality = mod + low_dim_keys = settings.get("low_dim_keys", None) + image_keys = settings.get("image_keys", None) + crop_size = settings.get("crop_size", None) + if modality == "low_dim": + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + if modality == "image": + if low_dim_keys is None: + low_dim_keys = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + if image_keys is None: + image_keys = ["agentview_image", "robot0_eye_in_hand_image"] + if crop_size is None: + crop_size = [76, 76] + assert len(crop_size) == 2 + + ConfigUtils.set_learning_settings_for_bc_rnn( + generator=generator, + group=-1, + modality=modality, + seq_length=10, + low_dim_keys=low_dim_keys, + image_keys=image_keys, + crop_size=crop_size, + ) + + # set dataset + generator.add_param( + key="train.data", + name="ds", + group=0, + values=settings["dataset_paths"], + value_names=settings["dataset_names"], + ) + + # print(json.dumps(settings["dataset_paths"], indent=4)) + + # rollout settings + generator.add_param( + key="experiment.rollout.horizon", + name="", + group=1, + values=[settings["horizon"]], + ) + + # output path + generator.add_param( + key="train.output_dir", + name="", + group=-1, + values=[ + output_dir, + ], + ) + + # num data workers 4 by default (for both low-dim and image) and cache mode "low_dim" + generator.add_param( + key="train.num_data_workers", + name="", + group=-1, + values=[4], + ) + generator.add_param( + key="train.hdf5_cache_mode", + name="", + group=-1, + values=["low_dim"], + ) + + # seed + generator.add_param( + key="train.seed", + name="seed", + group=100000, + values=[101], + ) + + return generator + + +def main(args): + + # make config generators + generators = make_generators(base_config=BASE_CONFIG, dataset_dir=args.dataset_dir, output_dir=args.output_dir) + + if os.path.exists(args.config_dir): + ans = input("Non-empty dir at {} will be removed.\nContinue (y / n)? \n".format(args.config_dir)) + if ans != "y": + exit() + shutil.rmtree(args.config_dir) + + all_json_files, run_lines = config_generator_to_script_lines(generators, config_dir=args.config_dir) + + run_lines = [line.strip() for line in run_lines] + + print("configs") + print(json.dumps(all_json_files, indent=4)) + print("runs") + print(json.dumps(run_lines, indent=4)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config_dir", + type=str, + default=os.path.expanduser(CONFIG_DIR), + help="set base folder for where to generate new config files for data generation", + ) + parser.add_argument( + "--dataset_dir", + type=str, + default=os.path.expanduser(DATASET_DIR), + help="set path to folder with datasets", + ) + parser.add_argument( + "--output_dir", + type=str, + default=os.path.expanduser(OUTPUT_DIR), + help="set base folder for where to generate new config files for data generation", + ) + + args = parser.parse_args() + main(args) diff --git a/mimicgen/scripts/merge_hdf5.py b/mimicgen/scripts/merge_hdf5.py index 600f782..414e334 100644 --- a/mimicgen/scripts/merge_hdf5.py +++ b/mimicgen/scripts/merge_hdf5.py @@ -19,7 +19,6 @@ import mimicgen import mimicgen.utils.file_utils as MG_FileUtils from mimicgen.configs import config_factory -from mimicgen.scripts.generate_dataset import make_dataset_video, postprocess_motion_planning_dataset def merge_hdf5s(args): diff --git a/mimicgen/scripts/prepare_all_src_datasets.sh b/mimicgen/scripts/prepare_all_src_datasets.sh index aa2ff7f..199f46f 100644 --- a/mimicgen/scripts/prepare_all_src_datasets.sh +++ b/mimicgen/scripts/prepare_all_src_datasets.sh @@ -5,72 +5,72 @@ # coffee python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/coffee.hdf5 \ + --dataset ../../datasets/source/coffee.hdf5 \ --env_interface MG_Coffee \ --env_interface_type robosuite # coffee_preparation python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/coffee_preparation.hdf5 \ + --dataset ../../datasets/source/coffee_preparation.hdf5 \ --env_interface MG_CoffeePreparation \ --env_interface_type robosuite # hammer_cleanup python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/hammer_cleanup.hdf5 \ + --dataset ../../datasets/source/hammer_cleanup.hdf5 \ --env_interface MG_HammerCleanup \ --env_interface_type robosuite # kitchen python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/kitchen.hdf5 \ + --dataset ../../datasets/source/kitchen.hdf5 \ --env_interface MG_Kitchen \ --env_interface_type robosuite # mug_cleanup python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/mug_cleanup.hdf5 \ + --dataset ../../datasets/source/mug_cleanup.hdf5 \ --env_interface MG_MugCleanup \ --env_interface_type robosuite # nut_assembly python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/nut_assembly.hdf5 \ + --dataset ../../datasets/source/nut_assembly.hdf5 \ --env_interface MG_NutAssembly \ --env_interface_type robosuite # pick_place python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/pick_place.hdf5 \ + --dataset ../../datasets/source/pick_place.hdf5 \ --env_interface MG_PickPlace \ --env_interface_type robosuite # square python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/square.hdf5 \ + --dataset ../../datasets/source/square.hdf5 \ --env_interface MG_Square \ --env_interface_type robosuite # stack python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/stack.hdf5 \ + --dataset ../../datasets/source/stack.hdf5 \ --env_interface MG_Stack \ --env_interface_type robosuite # stack_three python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/stack_three.hdf5 \ + --dataset ../../datasets/source/stack_three.hdf5 \ --env_interface MG_StackThree \ --env_interface_type robosuite # threading python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/threading.hdf5 \ + --dataset ../../datasets/source/threading.hdf5 \ --env_interface MG_Threading \ --env_interface_type robosuite # three_piece_assembly python prepare_src_dataset.py \ - --dataset ../../../mimicgen_environments/datasets/source/three_piece_assembly.hdf5 \ + --dataset ../../datasets/source/three_piece_assembly.hdf5 \ --env_interface MG_ThreePieceAssembly \ --env_interface_type robosuite diff --git a/setup.py b/setup.py index 48405c4..99d4784 100644 --- a/setup.py +++ b/setup.py @@ -30,9 +30,9 @@ python_requires='>=3', description="MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations", author="Ajay Mandlekar", - url="https://gitlab-master.nvidia.com/srl/mimicgen_environments", - author_email="amandlek@cs.stanford.edu", - version="0.1.0", + url="https://github.com/NVlabs/mimicgen", + author_email="amandlekar@nvidia.com", + version="1.0.0", long_description=long_description, long_description_content_type='text/markdown' ) From 7f29f01179faf430ef1042c642733cd77ef51427 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 14:15:41 -0700 Subject: [PATCH 05/11] mostly finalized docs --- README.md | 10 + docs/api/mimicgen.configs.rst | 1 - docs/api/mimicgen.datagen.rst | 1 - docs/api/mimicgen.env_interfaces.rst | 1 - docs/api/mimicgen.envs.robosuite.rst | 1 - docs/api/mimicgen.envs.rst | 1 + docs/api/mimicgen.rst | 1 + docs/api/mimicgen.scripts.rst | 1 - docs/api/mimicgen.utils.rst | 1 - docs/conf.py | 6 +- .../{overview.md => mimicgen_corl_2023.md} | 25 +- docs/index.rst | 3 +- docs/introduction/installation.md | 147 ++++-- docs/introduction/overview.md | 23 +- docs/miscellaneous/troubleshooting.md | 2 +- docs/modules/datagen.md | 317 +++++++++++- docs/modules/env_interfaces.md | 23 +- docs/modules/overview.md | 34 +- docs/modules/task_spec.md | 78 +++ docs/tutorials/datagen_custom.md | 483 +++++++++++++++++- docs/tutorials/debugging_datagen.md | 77 ++- docs/tutorials/getting_started.md | 108 +++- docs/tutorials/launching_several.md | 17 +- docs/tutorials/reproducing_experiments.md | 57 ++- docs/tutorials/subtask_termination_signals.md | 75 ++- docs/tutorials/task_visualizations.md | 9 +- mimicgen/env_interfaces/base.py | 4 + mimicgen/scripts/annotate_subtasks.py | 7 +- mimicgen/scripts/generate_core_configs.py | 9 +- .../scripts/generate_core_training_configs.py | 9 +- mimicgen/scripts/generate_dataset.py | 17 + .../generate_robot_transfer_configs.py | 9 +- ...enerate_robot_transfer_training_configs.py | 9 +- 33 files changed, 1433 insertions(+), 133 deletions(-) rename docs/datasets/{overview.md => mimicgen_corl_2023.md} (76%) create mode 100644 docs/modules/task_spec.md diff --git a/README.md b/README.md index 6cc4331..1f52ad1 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,16 @@ For business inquiries, please submit this form: [NVIDIA Research Licensing](htt ------- +## Useful Documentation Links + +Some helpful suggestions on useful documentation pages to view next: + +- [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) +- [Launching Several Data Generation Runs](https://mimicgen.github.io/docs/tutorials/launching_several.html) +- [Reproducing Published Experiments and Results](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) +- [Data Generation for Custom Environments](https://mimicgen.github.io/docs/tutorials/datagen_custom.html) +- [Overview of MimicGen Codebase](https://mimicgen.github.io/docs/modules/overview.html) + ## Troubleshooting Please see the [troubleshooting](https://mimicgen.github.io/docs/miscellaneous/troubleshooting.html) section for common fixes, or submit an issue on our github page. diff --git a/docs/api/mimicgen.configs.rst b/docs/api/mimicgen.configs.rst index a580a9c..631bfcc 100644 --- a/docs/api/mimicgen.configs.rst +++ b/docs/api/mimicgen.configs.rst @@ -28,7 +28,6 @@ mimicgen.configs.task\_spec module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/api/mimicgen.datagen.rst b/docs/api/mimicgen.datagen.rst index 6f63ef9..6d58d07 100644 --- a/docs/api/mimicgen.datagen.rst +++ b/docs/api/mimicgen.datagen.rst @@ -36,7 +36,6 @@ mimicgen.datagen.waypoint module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/api/mimicgen.env_interfaces.rst b/docs/api/mimicgen.env_interfaces.rst index 1d9b179..3a153df 100644 --- a/docs/api/mimicgen.env_interfaces.rst +++ b/docs/api/mimicgen.env_interfaces.rst @@ -20,7 +20,6 @@ mimicgen.env\_interfaces.robosuite module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/api/mimicgen.envs.robosuite.rst b/docs/api/mimicgen.envs.robosuite.rst index ecbc884..8104943 100644 --- a/docs/api/mimicgen.envs.robosuite.rst +++ b/docs/api/mimicgen.envs.robosuite.rst @@ -84,7 +84,6 @@ mimicgen.envs.robosuite.three\_piece\_assembly module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/api/mimicgen.envs.rst b/docs/api/mimicgen.envs.rst index d7b7226..e7e805a 100644 --- a/docs/api/mimicgen.envs.rst +++ b/docs/api/mimicgen.envs.rst @@ -5,6 +5,7 @@ Subpackages ----------- .. toctree:: + :maxdepth: 4 mimicgen.envs.robosuite diff --git a/docs/api/mimicgen.rst b/docs/api/mimicgen.rst index 38537c3..5bce266 100644 --- a/docs/api/mimicgen.rst +++ b/docs/api/mimicgen.rst @@ -5,6 +5,7 @@ Subpackages ----------- .. toctree:: + :maxdepth: 4 mimicgen.configs mimicgen.datagen diff --git a/docs/api/mimicgen.scripts.rst b/docs/api/mimicgen.scripts.rst index a1f4ac6..244657e 100644 --- a/docs/api/mimicgen.scripts.rst +++ b/docs/api/mimicgen.scripts.rst @@ -124,7 +124,6 @@ mimicgen.scripts.visualize\_subtasks module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/api/mimicgen.utils.rst b/docs/api/mimicgen.utils.rst index 3ffa865..93c064a 100644 --- a/docs/api/mimicgen.utils.rst +++ b/docs/api/mimicgen.utils.rst @@ -44,7 +44,6 @@ mimicgen.utils.robomimic\_utils module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/docs/conf.py b/docs/conf.py index d740392..55a381e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,3 +1,7 @@ +# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the NVIDIA Source Code License [see LICENSE for details]. + # Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: @@ -77,7 +81,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and diff --git a/docs/datasets/overview.md b/docs/datasets/mimicgen_corl_2023.md similarity index 76% rename from docs/datasets/overview.md rename to docs/datasets/mimicgen_corl_2023.md index 7ad8370..37db304 100644 --- a/docs/datasets/overview.md +++ b/docs/datasets/mimicgen_corl_2023.md @@ -1,4 +1,13 @@ -# Overview +# MimicGen (CoRL 2023) + +In this section, we provide an overview of the datasets we released with the MimicGen paper. These are the same exact datasets used in our paper (but postprocessed to support a higher version of robosuite). We also show how to easily reproduce policy learning results on this data. + +
+

Note

+ +If you would like to reproduce our data generation results in addition to the policy learning results, please see the [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial for a comprehensive guide. + +
## Downloading and Using Datasets @@ -14,9 +23,12 @@ The datasets are split into different types: - **robot**: datasets generated with MimicGen for different robots. These correspond to the results in Appendix F of the paper. - **large_interpolation**: datasets generated with MimicGen using much larger interpolation segments. These correspond to the results in Appendix H in the paper. -**Note 1**: All datasets are readily compatible with [robomimic](https://robomimic.github.io/) --- the structure is explained [here](https://robomimic.github.io/docs/datasets/overview.html#dataset-structure). This means that you can use robomimic to [visualize the data](https://robomimic.github.io/docs/tutorials/dataset_contents.html) or train models with different policy learning methods that we did not explore in our paper, such as [BC-Transformer](https://robomimic.github.io/docs/tutorials/training_transformers.html). +
+

Note

+ +We found that the large_interpolation datasets pose a significant challenge for imitation learning, and have substantial room for improvement. -**Note 2**: We found that the large_interpolation datasets pose a significant challenge for imitation learning, and have substantial room for improvement. +
### Dataset Statistics @@ -84,8 +96,9 @@ Then, to reproduce a specific set of training runs for different experiment grou python /path/to/robomimic/scripts/train.py --config /path/to/mimicgen/exps/paper/core/coffee_d0/image/bc_rnn.json ``` -**Note 1**: Another option is to directly run `robomimic/scripts/train.py` with any generated config jsons of interest -- the commands in the shell files do exactly this. +
+

Note

-**Note 2**: See the [robomimic documentation](https://robomimic.github.io/docs/introduction/getting_started.html) for more information on how training works. +In the MimicGen paper, we generated our datasets on versions of environments built on robosuite `v1.2`. Since then, we changed the environments and datasets (through postprocessing) to be based on robosuite `v1.4`. However, `v1.4` has some visual and dynamics differences from `v1.2`, so the learning results may not exactly match up with the ones we reported in the paper. In our testing on these released datasets, we were able to reproduce nearly all of our results, but within 10% of the performance reported in the paper. -**Note 3**: In the MimicGen paper, we generated our datasets on versions of environments built on robosuite `v1.2`. Since then, we changed the environments and datasets (through postprocessing) to be based on robosuite `v1.4`. However, `v1.4` has some visual and dynamics differences from `v1.2`, so the learning results may not exactly match up with the ones we reported in the paper. In our testing on these released datasets, we were able to reproduce nearly all of our results, but within 10% of the performance reported in the paper. +
diff --git a/docs/index.rst b/docs/index.rst index af87537..23af397 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,7 +17,7 @@ Welcome to mimicgen's documentation! :maxdepth: 1 :caption: Datasets - datasets/overview + datasets/mimicgen_corl_2023 .. toctree:: :maxdepth: 1 @@ -37,6 +37,7 @@ Welcome to mimicgen's documentation! modules/overview modules/datagen + modules/task_spec modules/env_interfaces .. toctree:: diff --git a/docs/introduction/installation.md b/docs/introduction/installation.md index d27db5a..215799f 100644 --- a/docs/introduction/installation.md +++ b/docs/introduction/installation.md @@ -25,64 +25,125 @@ cd mimicgen pip install -e . ``` -However, there are some additional dependencies that we list below. These are best installed from source: - -- [robosuite](https://robosuite.ai/) - - **Note**: This is optional and only needed if running the examples provided with this repository. The MimicGen source code does not rely on robosuite and can be used with other simulation frameworks. - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robosuite.git - git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa - cd robosuite - pip install -e . - ``` - - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v1.4+`) should be fine. - - For more detailed instructions, see [here](https://robosuite.ai/docs/installation.html) -- [robomimic](https://robomimic.github.io/) - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robomimic.git - git checkout d0b37cf214bd24fb590d182edb6384333f67b661 - cd robomimic - pip install -e . - ``` - - **Note**: the git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v0.3+`) should be fine. - - For more detailed instructions, see [here](https://robomimic.github.io/docs/introduction/installation.html) -- [robosuite_task_zoo](https://github.com/ARISE-Initiative/robosuite-task-zoo) - - **Note**: This is optional and only needed for the Kitchen and Hammer Cleanup environments / datasets. - - **Installation** - ```sh - cd - git clone https://github.com/ARISE-Initiative/robosuite-task-zoo - git checkout 74eab7f88214c21ca1ae8617c2b2f8d19718a9ed - cd robosuite_task_zoo - pip install -e . - ``` - -Lastly, if using robosuite, **please downgrade MuJoCo to 2.3.2**: +However, there are some additional dependencies that we list below. + +### Additional Dependencies + +Most of these additional dependencies are best installed from source. + +#### robosuite + +
+

Note

+ +[robosuite](https://robosuite.ai/) is an optional dependency that is only needed if running the examples provided with this repository. The MimicGen source code does not rely on robosuite and can be used with other simulation frameworks. + +
+ +```sh +$ cd +$ git clone https://github.com/ARISE-Initiative/robosuite.git +$ git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa +$ cd robosuite +$ pip install -e . +``` + +For more detailed instructions, see the [robosuite installation page](https://robosuite.ai/docs/installation.html). + +
+

Note

+ +The git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v1.4+`) should be fine. + +
+ +#### robomimic + +[robomimic](https://robomimic.github.io/) is a required dependency that provides a standardized dataset format, wrappers around simulation environments, and policy learning utilities. + +```sh +$ cd +$ git clone https://github.com/ARISE-Initiative/robomimic.git +$ git checkout d0b37cf214bd24fb590d182edb6384333f67b661 +$ cd robomimic +$ pip install -e . +``` + +For more detailed instructions, see the [robomimic installation page](https://robomimic.github.io/docs/introduction/installation.html). + +
+

Note

+ +The git checkout command corresponds to the commit we used for testing our policy learning results. In general the `master` branch (`v0.3+`) should be fine, as long as it is after the above commit. + +
+ +#### robosuite_task_zoo + +
+

Note

+ +[robosuite_task_zoo](https://github.com/ARISE-Initiative/robosuite-task-zoo) is an optional dependency that is only needed if running the Kitchen and Hammer Cleanup environments and datasets provided with this repository. + +
+ +```sh +$ cd +$ git clone https://github.com/ARISE-Initiative/robosuite-task-zoo +$ git checkout 74eab7f88214c21ca1ae8617c2b2f8d19718a9ed +$ cd robosuite_task_zoo +$ pip install -e . +``` + +#### mujoco + +If using robosuite, **please downgrade MuJoCo to 2.3.2**: + ```sh -pip install mujoco==2.3.2 +$ pip install mujoco==2.3.2 ```
-

MuJoCo Version

+

Warning

+ +This MuJoCo version (`2.3.2`) can be important -- in our testing, we found that other versions of MuJoCo could be problematic, especially for the Sawyer arm datasets (e.g. `2.3.5` causes problems with rendering and `2.3.7` changes the dynamics of the robot arm significantly from the collected datasets). More modern versions of MuJoCo (e.g. `3.0`+) might be fine. -This MuJoCo version (`2.3.2`) can be important -- in our testing, we found that other versions of MuJoCo could be problematic, especially for the Sawyer arm datasets (e.g. `2.3.5` causes problems with rendering and `2.3.7` changes the dynamics of the robot arm significantly from the collected datasets).
+#### pygame + +If you plan on using our subtask annotation interface (`scripts/annotate_subtasks.py`) you should also install pygame with `pip install pygame`. See the [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) page for more information. + ## Test Your Installation The following script can be used to try random actions in one of our custom robosuite tasks. ```sh -cd mimicgen/scripts -python demo_random_action.py +$ cd mimicgen/scripts +$ python demo_random_action.py ```
-

Testing Data Generation

+

Note

To test data generation please move on to the [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) tutorial.
+ +## Install documentation dependencies + +If you plan to contribute to the repository and add new features, you must install the additional requirements required to build the documentation locally: + +```sh +$ pip install -r requirements-docs.txt +``` + +You can test generating the documentation and viewing it locally in a web browser: +```sh +$ cd /docs +$ make clean +$ make apidoc +$ make html +$ cp -r images _build/html/ +``` + +There should be a generated `_build` folder - navigate to `_build/html/` and open `index.html` in a web browser to view the documentation. diff --git a/docs/introduction/overview.md b/docs/introduction/overview.md index 12bd15f..f8fc9df 100644 --- a/docs/introduction/overview.md +++ b/docs/introduction/overview.md @@ -6,23 +6,28 @@ This repository contains the official release of data generation code, simulation environments, and datasets for the [CoRL 2023](https://www.corl2023.org/) paper "MimicGen: A Data Generation System for Scalable Robot Learning using Human Demonstrations". -The released datasets contain over 48,000 task demonstrations across 12 tasks and the MimicGen data generation tool can create as many as you would like. - Website: [https://mimicgen.github.io](https://mimicgen.github.io) Paper: [https://arxiv.org/abs/2310.17596](https://arxiv.org/abs/2310.17596) Documentation: [https://mimicgen.github.io/docs/introduction/overview.html](https://mimicgen.github.io/docs/introduction/overview.html) -For business inquiries, please submit this form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/) +
+

Note

+ +For business inquiries, please submit this form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/). + +
+ +## Useful Documentation Links -## Useful Links +Some helpful suggestions on useful documentation pages to view next: -TODO: link to tutorial: quick data generation run example and outputs, pipeline overview (collect demo, postprocess demo, optionally annotate demo subtask terminations, run data generation, then run policy training) -TODO: link to tutorial: configuring multiple data generation runs -TODO: link to tutorial: reproducing paper results -TODO: link to tutorial: data generation for custom envs -TODO: link to modules page to understand codebase overview, especially Datagen Info +- [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) +- [Launching Several Data Generation Runs](https://mimicgen.github.io/docs/tutorials/launching_several.html) +- [Reproducing Published Experiments and Results](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) +- [Data Generation for Custom Environments](https://mimicgen.github.io/docs/tutorials/datagen_custom.html) +- [Overview of MimicGen Codebase](https://mimicgen.github.io/docs/modules/overview.html) ## Troubleshooting diff --git a/docs/miscellaneous/troubleshooting.md b/docs/miscellaneous/troubleshooting.md index 7c3ee61..1cd5a97 100644 --- a/docs/miscellaneous/troubleshooting.md +++ b/docs/miscellaneous/troubleshooting.md @@ -4,6 +4,6 @@ - In our testing on M1 macbook we ran into the following error when using `imageio-ffmpeg` installed through pip: `RuntimeError: No ffmpeg exe could be found. Install ffmpeg on your system, or set the IMAGEIO_FFMPEG_EXE environment variable.` Using `conda install imageio-ffmpeg` fixed this issue on our end. - If you run into trouble with installing [egl_probe](https://github.com/StanfordVL/egl_probe) during robomimic installation (e.g. `ERROR: Failed building wheel for egl_probe`) you may need to make sure `cmake` is installed. A simple `pip install cmake` should work. - If you run into other strange installation issues, one potential fix is to launch a new terminal, activate your conda environment, and try the install commands that are failing once again. One clue that the current terminal state is corrupt and this fix will help is if you see installations going into a different conda environment than the one you have active. -- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](#installation) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). +- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](https://mimicgen.github.io/docs/introduction/installation.html) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). If you run into an error not documented above, please search through the [GitHub issues](https://github.com/NVlabs/mimicgen/issues), and create a new one if you cannot find a fix. \ No newline at end of file diff --git a/docs/modules/datagen.md b/docs/modules/datagen.md index 6c6a92f..9931bd1 100644 --- a/docs/modules/datagen.md +++ b/docs/modules/datagen.md @@ -1,5 +1,316 @@ -# Datagen +# Data Generation Classes -Overview of modules and classes related to data generation. +This section discusses the key classes related to data generation. -DatagenInfo, Waypoint, Data Generator, Selection Strategy \ No newline at end of file +## Data Generator + +The `DataGenerator` class (`datagen/data_generator.py`) is responsible for generating new demonstration trajectories. First, the internal `_load_dataset` method is used to parse the source dataset (using [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html)) into source subtask segments. Each segment is a sequence of [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) objects. Then, the `generate` method is called repeatedly (by the main script `scripts/generate_dataset.py`) to keep attempting to generate new trajectories using the source subtask segments. During each new attempt, the `select_source_demo` method is used to employ a [SelectionStrategy](https://mimicgen.github.io/docs/modules/datagen.html#selection-strategy) to pick a reference source subtask segment to transform. [WaypointTrajectory](https://mimicgen.github.io/docs/modules/datagen.html#waypoint) objects are used to transform and compose subtask segments together. + +## Datagen Info + +DatagenInfo objects keep track of important information used during data generation. These objects are added to source demonstrations with the `prepare_src_dataset.py` script, or provided directly by an [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) object. + +The structure of the object is below: + +```python +class DatagenInfo(object): + """ + Structure of information needed from an environment for data generation. To allow for + flexibility, not all information must be present. + """ + def __init__( + self, + eef_pose=None, + object_poses=None, + subtask_term_signals=None, + target_pose=None, + gripper_action=None, + ): + """ + Args: + eef_pose (np.array or None): robot end effector poses of shape [..., 4, 4] + object_poses (dict or None): dictionary mapping object name to object poses + of shape [..., 4, 4] + subtask_term_signals (dict or None): dictionary mapping subtask name to a binary + indicator (0 or 1) on whether subtask has been completed. Each value in the + dictionary could be an int, float, or np.array of shape [..., 1]. + target_pose (np.array or None): target end effector poses of shape [..., 4, 4] + gripper_action (np.array or None): gripper actions of shape [..., D] where D + is the dimension of the gripper actuation action for the robot arm + """ +``` + +## Selection Strategy + +
+

Note

+ +See Appendix N.3 in the MimicGen paper for a more thorough explanation of source subtask segment selection and some further intuition on when to use different settings. + +
+ +Each data generation attempt requires choosing one or more subtask segments from the source demonstrations to transform -- this is carried out by a SelectionStrategy instance: + +```python +@six.add_metaclass(MG_SelectionStrategyMeta) +class MG_SelectionStrategy(object): + """ + Defines methods and functions for selection strategies to implement. + """ + def __init__(self): + pass + + @property + @classmethod + def NAME(self): + """ + This name (str) will be used to register the selection strategy class in the global + registry. + """ + raise NotImplementedError + + @abc.abstractmethod + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ + raise NotImplementedError +``` + +Every SelectionStrategy class must subclass this base class and implement the `NAME` and `select_source_demo` methods. The `NAME` field is used to register the SelectionStrategy class into the global registry, and `select_source_demo` implements the heuristic for selecting a source demonstration index. + +Each data generation config json specifies how source segment selection should be done during data generation. First, `config.experiment.generation.select_src_per_subtask` determines whether to select a different source demonstration for each subtask during data generation, or keep the same source demonstration as the one used for the first subtask. This corresponds to the `per-subtask` parameter described in the "Selection Frequency" paragraph of Appendix N.3 in the paper. + +The specific task config (`config.task.task_spec`), which corresponds to the [Task Spec](https://mimicgen.github.io/docs/modules/task_spec.html) object used in data generation, also specifies the selection strategy to use for each subtask via the `selection_strategy` parameter and the `selection_strategy_kwargs` parameter. The `selection_strategy` parameter corresponds to the `NAME` for the SelectionStrategy class, and the `selection_strategy_kwargs` correspond to any additional kwargs to specify when invoking the `select_source_demo` method. + +
+

Note

+ +Note that if `config.experiment.generation.select_src_per_subtask` is False, only the first subtask's selection strategy matters, since the selected source demonstration will be used for the remainder of the data generation attempt. + +
+ +As an example, the `NearestNeighborObjectStrategy` (see implementation below) can be specified by passing `nearest_neighbor_object` for the `selection_strategy` parameter and you can use the `selection_strategy_kwargs` parameter to specify a dictionary containing values for the `pos_weight`, `rot_weight`, and `nn_k` parameters. + +```python +class NearestNeighborObjectStrategy(MG_SelectionStrategy): + """ + Pick source demonstration to be the one with the closest object pose to the object + in the current scene. + """ + + # name for registering this class into registry + NAME = "nearest_neighbor_object" + + def select_source_demo( + self, + eef_pose, + object_pose, + src_subtask_datagen_infos, + pos_weight=1., + rot_weight=1., + nn_k=3, + ): + """ + Selects source demonstration index using the current robot pose, relevant object pose + for the current subtask, and relevant information from the source demonstrations for the + current subtask. + + Args: + eef_pose (np.array): current 4x4 eef pose + object_pose (np.array): current 4x4 object pose, for the object in this subtask + src_subtask_datagen_infos (list): DatagenInfo instance for the relevant subtask segment + in the source demonstrations + pos_weight (float): weight on position for minimizing pose distance + rot_weight (float): weight on rotation for minimizing pose distance + nn_k (int): pick source demo index uniformly at randomly from the top @nn_k nearest neighbors + + Returns: + source_demo_ind (int): index of source demonstration - indicates which source subtask segment to use + """ +``` + +## Waypoint + +### Waypoint Class Variants + +MimicGen uses a collection of convenience classes to represent waypoints and trajectories (`datagen/waypoint.py`). + +The `Waypoint` class represents a single 6-DoF target pose and the gripper action for that timestep: + +```python +class Waypoint(object): + """ + Represents a single desired 6-DoF waypoint, along with corresponding gripper actuation for this point. + """ + def __init__(self, pose, gripper_action, noise=None): +``` + +The `WaypointSequence` class represents a sequence of these `Waypoint` objects: + +```python +class WaypointSequence(object): + """ + Represents a sequence of Waypoint objects. + """ + def __init__(self, sequence=None): +``` + +It can easily be instantiated from a collection of poses (e.g. `WaypointSequence.from_poses`): + +```python + @classmethod + def from_poses(cls, poses, gripper_actions, action_noise): + """ + Instantiate a WaypointSequence object given a sequence of poses, + gripper actions, and action noise. + + Args: + poses (np.array): sequence of pose matrices of shape (T, 4, 4) + gripper_actions (np.array): sequence of gripper actions + that should be applied at each timestep of shape (T, D). + action_noise (float or np.array): sequence of action noise + magnitudes that should be applied at each timestep. If a + single float is provided, the noise magnitude will be + constant over the trajectory. + """ +``` + +Finally, the `WaypointTrajectory` class is a sequence of the `WaypointSequence` objects, and is a convenient way to represent 6-DoF trajectories and execute them: + +```python +class WaypointTrajectory(object): + """ + A sequence of WaypointSequence objects that corresponds to a full 6-DoF trajectory. + """ +``` + +`WaypointSequence` objects can be added directly to a `WaypointTrajectory` object: + +```python + def add_waypoint_sequence(self, sequence): + """ + Directly append sequence to list (no interpolation). + + Args: + sequence (WaypointSequence instance): sequence to add + """ +``` + +Interpolation segments can also be added easily using this helper method: + +```python + def add_waypoint_sequence_for_target_pose( + self, + pose, + gripper_action, + num_steps, + skip_interpolation=False, + action_noise=0., + ): + """ + Adds a new waypoint sequence corresponding to a desired target pose. A new WaypointSequence + will be constructed consisting of @num_steps intermediate Waypoint objects. These can either + be constructed with linear interpolation from the last waypoint (default) or be a + constant set of target poses (set @skip_interpolation to True). + + Args: + pose (np.array): 4x4 target pose + + gripper_action (np.array): value for gripper action + + num_steps (int): number of action steps when trying to reach this waypoint. Will + add intermediate linearly interpolated points between the last pose on this trajectory + and the target pose, so that the total number of steps is @num_steps. + + skip_interpolation (bool): if True, keep the target pose fixed and repeat it @num_steps + times instead of using linearly interpolated targets. + + action_noise (float): scale of random gaussian noise to add during action execution (e.g. + when @execute is called) + """ +``` + +The `merge` method is a thin wrapper around the above method, easily allowing for linear interpolation between two `WaypointTrajectory` objects: + +```python + def merge( + self, + other, + num_steps_interp=None, + num_steps_fixed=None, + action_noise=0., + ): + """ + Merge this trajectory with another (@other). + + Args: + other (WaypointTrajectory object): the other trajectory to merge into this one + + num_steps_interp (int or None): if not None, add a waypoint sequence that interpolates + between the end of the current trajectory and the start of @other + + num_steps_fixed (int or None): if not None, add a waypoint sequence that has constant + target poses corresponding to the first target pose in @other + + action_noise (float): noise to use during the interpolation segment + """ +``` + +Finally, the `execute` method makes it easy to execute the waypoint sequences in the simulation environment: + +```python + def execute( + self, + env, + env_interface, + render=False, + video_writer=None, + video_skip=5, + camera_names=None, + ): + """ + Main function to execute the trajectory. Will use env_interface.target_pose_to_action to + convert each target pose at each waypoint to an action command, and pass that along to + env.step. + + Args: + env (robomimic EnvBase instance): environment to use for executing trajectory + env_interface (MG_EnvInterface instance): environment interface for executing trajectory + render (bool): if True, render on-screen + video_writer (imageio writer): video writer + video_skip (int): determines rate at which environment frames are written to video + camera_names (list): determines which camera(s) are used for rendering. Pass more than + one to output a video with multiple camera views concatenated horizontally. + + Returns: + results (dict): dictionary with the following items for the executed trajectory: + states (list): simulator state at each timestep + observations (list): observation dictionary at each timestep + datagen_infos (list): datagen_info at each timestep + actions (list): action executed at each timestep + success (bool): whether the trajectory successfully solved the task or not + """ +``` + +### Waypoint Class Usage during Data Generation + +Each data generation attempt consists of executing a particular sequence of waypoints for each subtask. A `WaypointTrajectory` object is constructed and executed for each subtask in the `generate` method of the `DataGenerator`. + +For a given subtask, a `WaypointTrajectory` object is initialized with a single pose (usually the current robot end effector pose, or the last target pose from the previous subtask execution attempt). Next, a reference source subtask segment is selected, and then transformed using the `transform_source_data_segment_using_object_pose` method from `utils/pose_utils.py`. It is then merged into the trajectory object with linear interpolation using the `merge` method. Finally, the `execute` method is used to carry out the subtask. This process repeats for each subtask diff --git a/docs/modules/env_interfaces.md b/docs/modules/env_interfaces.md index 1774cd9..1c84247 100644 --- a/docs/modules/env_interfaces.md +++ b/docs/modules/env_interfaces.md @@ -1,4 +1,23 @@ # Environment Interfaces -TODO: overview of base class, what must be implemented, and subclass, what must be implemented -TODO: how this fits into data generation (action translation, object poses, subtask termination heuristics for parsing source demos) \ No newline at end of file +Environment interface objects allow simulation environments to provide MimicGen with [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) class instances, which is a collection of information that MimicGen needs during data generation. + +These environment interface objects are used by `scripts/prepare_src_dataset.py` to add these DatagenInfo instances to the source datasets, along with metadata that stores which environment interface is to be used with this source dataset. They are also used during data generation to provide DatagenInfo instances that correspond to the current timestep of the environment being used for generation (via the `get_datagen_info` method). The environment interface objects are also used to go back and forth between environment actions (used by `env.step`) and target poses for the end effector controller (as described in MimicGen Appendix N.1). + +Every simulation framework should implement a base environment interface class that subclasses the `MG_EnvInterface` abstract base class in `env_interfaces/base.py`. For example, the `RobosuiteInterface` class in `env_interfaces/robosuite.py` is the base environment interface class for robosuite simulation environments. + +
+

Note

+ +The [Generating Data for New Simulation Frameworks](https://mimicgen.github.io/docs/tutorials/datagen_custom.html#generating-data-for-new-simulation-frameworks) tutorial provides a concrete example of how to implement a base environment interface class for a new simulation environment. + +
+ +Every simulation task should also implement a task-specific environment interface class that subclasses the corresponding base environment interface class for that simulation framework. + +
+

Note

+ +The [Generating Data for New Tasks](https://mimicgen.github.io/docs/tutorials/datagen_custom.html#generating-data-for-new-tasks) tutorial provides a concrete example of how to implement a task-specific environment interface class for new tasks. + +
diff --git a/docs/modules/overview.md b/docs/modules/overview.md index 1eb7a02..b069ffa 100644 --- a/docs/modules/overview.md +++ b/docs/modules/overview.md @@ -1,4 +1,32 @@ -# Overview +# Codebase Overview -TODO: codebase structure overview -TODO: links to more detailed pages for certain parts (e.g. Datagen - break down each file / class, Env Interfaces) \ No newline at end of file +## Codebase Structure + +We outline some important folders and files below. + +- `mimicgen/scripts`: utility scripts + - `generate_dataset.py`: main script for data generation +- `mimicgen/exps/templates`: collection of data generation config json templates for each task +- `mimicgen/configs`: implementation of data generation config classes + - `config.py`: base config class + - `task_spec.py`: [TaskSpec](https://mimicgen.github.io/docs/modules/task_spec.html) object for specifying sequence of subtasks for each task + - `robosuite.py`: robosuite-specific config classes +- `mimicgen/env_interfaces`: implementation of [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) classes that help simulation environments provide datagen info during data generation +- `mimicgen/datagen`: implementation of core [Data Generation](https://mimicgen.github.io/docs/modules/datagen.html) classes + - `data_generator.py`: [DataGenerator](https://mimicgen.github.io/docs/modules/datagen.html#data-generator) class used to generate new trajectories + - `datagen_info.py`: [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) class to group information from the sim environment needed during data generation + - `selection_strategy.py`: [SelectionStrategy](https://mimicgen.github.io/docs/modules/datagen.html#selection-strategy) classes that contain different heuristics for selecting source demos during each data generation trial + - `waypoint.py`: collection of [Waypoint](https://mimicgen.github.io/docs/modules/datagen.html#waypoint) classes to help end effector controllers execute waypoint targets and waypoint sequences +- `mimicgen/envs` and `mimicgen/models`: files containing collection of robosuite simulation environments and assets released with this project +- `mimicgen/utils`: collection of utility functions and classes +- `docs`: files related to documentation + +## Important Modules + +We provide some more guidance on some important modules and how they relate to one another. + +MimicGen starts with a handful of source demonstrations and generates new demonstrations automatically. MimicGen treats each task as a sequence of object-centric subtasks, and attempts to generate trajectories one subtask at a time. MimicGen must parse source demonstrations into contiguous subtask segments -- it uses [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) to do this. It also requires object poses at the start of each subtask, both in the source demonstrations and in the current scene during data generation. Information on object poses, subtask termination signals, and other information needed at data generation time is collected into [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) objects, which are read from the source demonstrations, and also read from the current scene. This information is provided through [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) classes which connect underlying simulation environments to DatagenInfo objects. + +Data generation is carried out by the [DataGenerator](https://mimicgen.github.io/docs/modules/datagen.html#data-generator) class. Each data generation attempt requires choosing one or more subtask segments from the source demonstrations to transform -- this is carried out by a [SelectionStrategy](https://mimicgen.github.io/docs/modules/datagen.html#selection-strategy) instance. The transformation consists of keeping track of a collection of end effector target poses for a controller to execute -- this is managed by [Waypoint](https://mimicgen.github.io/docs/modules/datagen.html#waypoint) classes. + +The sequence of object-centric subtasks and other important data generation settings for each data generation run are communicated to MimicGen through the [TaskSpec object](https://mimicgen.github.io/docs/modules/task_spec.html), which is read as part of the MimicGen config. diff --git a/docs/modules/task_spec.md b/docs/modules/task_spec.md new file mode 100644 index 0000000..77e3be2 --- /dev/null +++ b/docs/modules/task_spec.md @@ -0,0 +1,78 @@ +# Task Spec + +The sequence of object-centric subtasks and other important data generation settings for each data generation run are communicated to MimicGen through the TaskSpec object (`configs/task_spec.py`). The TaskSpec object is instantiated from the MimicGen task config (`config.task.task_spec`) using the `TaskSpec.from_json` method. + +
+

Note

+ +See [this section](https://mimicgen.github.io/docs/tutorials/datagen_custom.html#step-2-implement-task-specific-config) of the Data Generation for Custom Environments tutorial for an example of how to implement a task config for a new task. + +
+ +We describe components of the TaskSpec object in more detail below. The TaskSpec is essentially list of dictionaries with each dictionary corresponding to exactly one subtask. The method below highlights the important settings that the TaskSpec object holds for each task: + +```python + def add_subtask( + self, + object_ref, + subtask_term_signal, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0., + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ): + """ + Add subtask to this task spec. + + Args: + object_ref (str): each subtask involves manipulation with + respect to a single object frame. This string should + specify the object for this subtask. The name + should be consistent with the "datagen_info" from the + environment interface and dataset. + + subtask_term_signal (str or None): the "datagen_info" from the environment + and dataset includes binary indicators for each subtask + of the task at each timestep. This key should correspond + to the key in "datagen_info" that should be used to + infer when this subtask is finished (e.g. on a 0 to 1 + edge of the binary indicator). Should provide None for the final + subtask. + + subtask_term_offset_range (2-tuple): if provided, specifies time offsets to + be used during data generation when splitting a trajectory into + subtask segments. On each data generation attempt, an offset is sampled + and added to the boundary defined by @subtask_term_signal. + + selection_strategy (str): specifies how the source subtask segment should be + selected during data generation from the set of source human demos + + selection_strategy_kwargs (dict or None): optional keyword arguments for the selection + strategy function used + + action_noise (float): amount of action noise to apply during this subtask + + num_interpolation_steps (int): number of interpolation steps to bridge previous subtask segment + to this one + + num_fixed_steps (int): number of additional steps (with constant target pose of beginning of + this subtask segment) to add to give the robot time to reach the pose needed to carry + out this subtask segment + + apply_noise_during_interpolation (bool): if True, apply action noise during interpolation phase + leading up to this subtask, as well as during the execution of this subtask + """ +``` + +- The `object_ref` for each subtask determines the reference object frame for the motion in that subtask. The name here should be consistent with the `get_object_poses` method of the relevant task-specific [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html)) object. + +- The `subtask_term_signal` and `subtask_term_offset_range` settings determine how subtask segments for this subtask is parsed from the source demonstrations -- see the [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) page for more information. + +- The `selection_strategy` and `selection_strategy_kwargs` determine the [SelectionStrategy](https://mimicgen.github.io/docs/modules/datagen.html#selection-strategy) class used to select a source subtask segment at the start of each subtask during data generation. See Appendix N.3 in the MimicGen paper for more details. + +- The `action_noise` setting determines the magnitude of action noise added when executing actions during data generation. See Appendix N.4 of the MimicGen paper for more details. + +- The `num_interpolation_steps` and `num_fixed_steps` settings determines the number of interpolation waypoints in the interpolation segment that bridges this subtask segment and the previous subtask segment during data generation. See Appendix N.2 of the MimicGen paper for more details. diff --git a/docs/tutorials/datagen_custom.md b/docs/tutorials/datagen_custom.md index 63ba19d..177d5f6 100644 --- a/docs/tutorials/datagen_custom.md +++ b/docs/tutorials/datagen_custom.md @@ -1,3 +1,484 @@ # Data Generation for Custom Environments -TODO: data generation for other simulators and tasks (must implement robomimic wrapper for new simulator class, must have environment interface base class with certain methods, must have specific subclass for object poses, and either plan to annotate subtask termination signals or implement those here) +In this section, we provide guidance on using MimicGen to generate data for custom tasks and simulation frameworks. + +
+

Note

+ +We recommend going through the [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) tutorial first, so that you are familiar with the typical data generation workflow. We will refer back to the steps in the [Data Generation Workflow Overview](https://mimicgen.github.io/docs/tutorials/getting_started.html#overview-of-typical-data-generation-workflow) in this section. + +
+ +## Generating Data for New Tasks + +In this section, we will assume we are trying to generate data for a new task implemented in [robosuite](https://robosuite.ai/), and we will use the robosuite Stack Three task as a running example. The same instructions can be used for any task in any simulation framework, as long as an [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) base class already exists for the simulation framework. See the [Generating Data for New Simulation Frameworks](https://mimicgen.github.io/docs/tutorials/datagen_custom.html#generating-data-for-new-simulation-frameworks)) below for guidance on setting up a new simulation framework if this has not happened yet. + +### Step 1: Implement Task-Specific Environment Interface + +The first step is to subclass the appropriate base Environment Interface class -- for robosuite, this is the `RobosuiteInterface` class at the top of `env_interfaces/robosuite.py`. We create a new class as below: + +```python +class MG_StackThree(RobosuiteInterface): + """ + Corresponds to robosuite StackThree task and variants. + """ + pass +``` + +The `MG_EnvInterface` abstract base class in `env_interfaces/base.py` (which `RobosuiteInterface` inherits from) describes the methods that task-specific subclasses must implement. There are two important methods: + +```python + """ + These should be filled out by each simulation domain (e.g. nut assembly, coffee). + """ + @abc.abstractmethod + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + raise NotImplementedError + + @abc.abstractmethod + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + raise NotImplementedError +``` + +Recall that MimicGen generates data by composing object-centric subtask segments together (see the paper for more information). During data generation, MimicGen requires a way to observe the pose of the relevant object at the start of each subtask. The `get_object_poses` method will be used for this purpose - it should return a dictionary mapping object name to a pose matrix. + +The `RobosuiteInterface` base class offers a helper method `get_object_pose(self, obj_name, obj_type)` to make retrieving object poses from robosuite easily - we use it below to get the poses of each cube in the `StackThree` task. + +```python + def get_object_poses(self): + """ + Gets the pose of each object relevant to MimicGen data generation in the current scene. + + Returns: + object_poses (dict): dictionary that maps object name (str) to object pose matrix (4x4 np.array) + """ + + # three relevant objects - three cubes + return dict( + cubeA=self.get_object_pose(obj_name=self.env.cubeA.root_body, obj_type="body"), + cubeB=self.get_object_pose(obj_name=self.env.cubeB.root_body, obj_type="body"), + cubeC=self.get_object_pose(obj_name=self.env.cubeC.root_body, obj_type="body"), + ) +``` + +Next we need to implement `get_subtask_term_signals`. This function has only one purpose - it is used to provide [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) for each timestep in the source demonstrations (this is part of what happens in `scripts/prepare_src_dataset.py`). These signals are used to determine where each subtask ends and the next one starts -- the first 0 to 1 transition in this signal during a source demonstration determines the end of the subtask. + +The StackThree tasks consists of 4 object-centric subtasks: + +``` +1. grasping cubeA (motion relative to cubeA) +2. placing cubeA on cubeB (motion relative to cubeB) +3. grasping cubeC (motion relative to cubeC) +4. placing cubeC on cubeA (motion relative to cubeA) +``` + +To define the end of subtask 1 and 3, we can just check for a successful grasp, and for the end of subtask 2, we can check for a placement (re-using part of the success check for the robosuite StackThree task): + +```python + def get_subtask_term_signals(self): + """ + Gets a dictionary of binary flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. MimicGen only uses this + when parsing source demonstrations at the start of data generation, and it only + uses the first 0 -> 1 transition in this signal to detect the end of a subtask. + + Returns: + subtask_term_signals (dict): dictionary that maps subtask name to termination flag (0 or 1) + """ + signals = dict() + + # first subtask is grasping cubeA (motion relative to cubeA) + signals["grasp_1"] = int(self.env._check_grasp(gripper=self.env.robots[0].gripper, object_geoms=self.env.cubeA)) + + # second subtask is placing cubeA on cubeB (motion relative to cubeB) + signals["stack_1"] = int(self.env._check_cubeA_stacked()) + + # third subtask is grasping cubeC (motion relative to cubeC) + signals["grasp_2"] = int(self.env._check_grasp(gripper=self.env.robots[0].gripper, object_geoms=self.env.cubeC)) + + # final subtask is placing cubeC on cubeA (motion relative to cubeA) - but final subtask signal is not needed + return signals +``` + +
+

Warning

+ +The final subtask in a task never requires a subtask termination signal, since its end is determined by the end of the source demonstration. + +
+ +
+

Note

+ +Providing a proper implementation for the `get_subtask_term_signals` function is entirely optional. In most cases it is easy to specify heuristics to define these subtask boundaries as we did above, but sometimes you may want to just directly annotate the boundaries between subtasks. We provide an annotation script (`scripts/annotate_subtasks.py`) for this purpose. If you plan to do this, you can just return an empty dict for the `get_subtask_term_signals` function. + +
+ +### Step 2: Implement Task-Specific Config + +The next step is to implement a task-specific Config object that inherits from the `MG_Config` base class (`configs/config.py`). There are three things for the subclass to implement: + +```python + @property + @classmethod + def NAME(cls): + # must be specified by subclasses + raise NotImplementedError + + @property + @classmethod + def TYPE(cls): + # must be specified by subclasses + raise NotImplementedError + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. + """ + raise NotImplementedError +``` + +The `NAME` and `TYPE` are used to store the new subclass into the config registry, and chiefly determine where the auto-generated config template is stored in the repository (e.g. `mimicgen/exps/templates//.json`). + +The `task_config` is consistent with [TaskSpec objects](https://mimicgen.github.io/docs/modules/task_spec.html) -- there is an entry for each subtask which consists of the arguments to the `add_subtask` function in the TaskSpec object (`configs/task_spec.py`): + +```python + def add_subtask( + self, + object_ref, + subtask_term_signal, + subtask_term_offset_range=None, + selection_strategy="random", + selection_strategy_kwargs=None, + action_noise=0., + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ): +``` + +We show the implementation for the StackThree config below: + +```python +class StackThree_Config(MG_Config): + """ + Corresponds to robosuite StackThree task and variants. + """ + NAME = "stack_three" + TYPE = "robosuite" + + def task_config(self): + """ + This function populates the `config.task` attribute of the config, + which has settings for each object-centric subtask in a task. Each + dictionary should have kwargs for the @add_subtask method in the + @MG_TaskSpec object. + """ + self.task.task_spec.subtask_1 = dict( + object_ref="cubeA", + subtask_term_signal="grasp_1", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_2 = dict( + object_ref="cubeB", + subtask_term_signal="stack_1", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_3 = dict( + object_ref="cubeC", + subtask_term_signal="grasp_2", + subtask_term_offset_range=(10, 20), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.subtask_4 = dict( + object_ref="cubeA", + subtask_term_signal=None, + subtask_term_offset_range=None, + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs=dict(nn_k=3), + action_noise=0.05, + num_interpolation_steps=5, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + self.task.task_spec.do_not_lock_keys() +``` + +Notice that we set the `object_ref` for each subtask to be consistent with the object names in the `get_object_poses` method in the `MG_StackThree` environment interface we implemented. We also set the `subtask_term_signal` for each subtask to be consistent with the subtask signals in the `get_subtask_term_signals` method in the `MG_StackThree` class as well. Please see the [TaskSpec page](https://mimicgen.github.io/docs/modules/task_spec.html) for more information on the other settings. + +
+

Note

+ +If you used or plan to use `scripts/annotate_subtasks.py` to manually annotate the end of each subtask in the source demos, you should use signal names that are consistent with the `--signals` argument that you will pass to that script that give a name to each subtask. Internally, the annotations are stored as subtask termination signals with those names. + +
+ +
+

Note

+ +You should make sure that the config class you implemented is being imported somewhere in your codebase to make sure it gets registered in the config registry. In the MimicGen codebase, we do this in `mimicgen/configs/__init__.py`. + +
+ +Finally, run `scripts/generate_config_templates.py` to generate a config template for this new task. It should appear under `mimicgen/exps/templates//.json`. Ensure that the default settings look correct. These settings can be overridden using config generators ([see this tutorial](https://mimicgen.github.io/docs/tutorials/launching_several.html)). + +### Step 3: Execute Data Generation Workflow + +You are now all set to try data generation. You should be able to follow the steps documented in the [Data Generation Workflow Overview](https://mimicgen.github.io/docs/tutorials/getting_started.html#overview-of-typical-data-generation-workflow) with some minor changes. + +
+

Note

+ +Now that you have followed these steps, you can generate datasets for any other variants of this task, as long as the TaskSpec does not change -- e.g. different object placements, different robot arms, and different object instances. The [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial provides examples of all three variations. + +
+ + +## Generating Data for New Simulation Frameworks + +
+

Note

+ +Before starting, you should ensure that a [robomimic environment wrapper](https://robomimic.github.io/docs/modules/environments.html) exists for the simulation framework you are using. See [this link](https://robomimic.github.io/docs/modules/environments.html#implement-an-environment-wrapper) for guidance on how to create one. The environment metadata in the source hdf5 should point to this environment wrapper. + +
+ +In this section, we will show how to apply MimicGen to new simulation frameworks. The key step is to implement an [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) base class for the simulation framework. We will use robosuite as a running example in this section. + +The `MG_EnvInterface` abstract base class in `env_interfaces/base.py` describes the methods that base subclasses for new simulators must implement. There are five important methods: + +```python + """ + These should be filled out by simulator subclasses (e.g. robosuite). + """ + @property + @classmethod + def INTERFACE_TYPE(self): + """ + Returns string corresponding to interface type. This is used to group + all subclasses together in the interface registry (for example, all robosuite + interfaces) and helps avoid name conflicts. + """ + raise NotImplementedError + + @abc.abstractmethod + def get_robot_eef_pose(self): + """ + Get current robot end effector pose. Should be the same frame as used by the robot end-effector controller. + + Returns: + pose (np.array): 4x4 eef pose matrix + """ + raise NotImplementedError + + @abc.abstractmethod + def target_pose_to_action(self, target_pose, relative=True): + """ + Takes a target pose for the end effector controller and returns an action + (usually a normalized delta pose action) to try and achieve that target pose. + + Args: + target_pose (np.array): 4x4 target eef pose + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + action (np.array): action compatible with env.step (minus gripper actuation) + """ + raise NotImplementedError + + @abc.abstractmethod + def action_to_target_pose(self, action, relative=True): + """ + Converts action (compatible with env.step) to a target pose for the end effector controller. + Inverse of @target_pose_to_action. Usually used to infer a sequence of target controller poses + from a demonstration trajectory using the recorded actions. + + Args: + action (np.array): environment action + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + target_pose (np.array): 4x4 target eef pose that @action corresponds to + """ + raise NotImplementedError + + @abc.abstractmethod + def action_to_gripper_action(self, action): + """ + Extracts the gripper actuation part of an action (compatible with env.step). + + Args: + action (np.array): environment action + + Returns: + gripper_action (np.array): subset of environment action for gripper actuation + """ + raise NotImplementedError +``` + +The `INTERFACE_TYPE` method is just used to make sure there are no class name conflicts in the environment interface registry. You should just make sure to choose a name unique to your simulation framework. For robosuite, we simply used `"robosuite"`. + +The `get_robot_eef_pose` method returns the current pose of the robot end effector and corresponds to the same frame used by the end effector controller in the simulation environment. In robosuite, the Operational Space Controller uses a specific MuJoCo site, so we return its pose: + +```python + def get_robot_eef_pose(self): + """ + Get current robot end effector pose. Should be the same frame as used by the robot end-effector controller. + + Returns: + pose (np.array): 4x4 eef pose matrix + """ + + # OSC control frame is a MuJoCo site - just retrieve its current pose + return self.get_object_pose( + obj_name=self.env.robots[0].controller.eef_name, + obj_type="site", + ) +``` + +The next two methods (`target_pose_to_action`, `action_to_target_pose`) are used to translate between simulator actions (e.g. those given to `env.step`) and absolute target poses for the end effector controller. This typically consists of simple scaling factors and transformations between different rotation conventions (as described in Appendix N.1 of the MimicGen paper): + +```python + def target_pose_to_action(self, target_pose, relative=True): + """ + Takes a target pose for the end effector controller and returns an action + (usually a normalized delta pose action) to try and achieve that target pose. + + Args: + target_pose (np.array): 4x4 target eef pose + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + action (np.array): action compatible with env.step (minus gripper actuation) + """ + + # version check for robosuite - must be v1.2+, so that we're using the correct controller convention + assert (robosuite.__version__.split(".")[0] == "1") + assert (robosuite.__version__.split(".")[1] >= "2") + + # target position and rotation + target_pos, target_rot = PoseUtils.unmake_pose(target_pose) + + # current position and rotation + curr_pose = self.get_robot_eef_pose() + curr_pos, curr_rot = PoseUtils.unmake_pose(curr_pose) + + # get maximum position and rotation action bounds + max_dpos = self.env.robots[0].controller.output_max[0] + max_drot = self.env.robots[0].controller.output_max[3] + + if relative: + # normalized delta position action + delta_position = target_pos - curr_pos + delta_position = np.clip(delta_position / max_dpos, -1., 1.) + + # normalized delta rotation action + delta_rot_mat = target_rot.dot(curr_rot.T) + delta_quat = T.mat2quat(delta_rot_mat) + delta_rotation = T.quat2axisangle(delta_quat) + delta_rotation = np.clip(delta_rotation / max_drot, -1., 1.) + return np.concatenate([delta_position, delta_rotation]) + + # absolute position and rotation action + target_quat = T.mat2quat(target_rot) + abs_rotation = T.quat2axisangle(target_quat) + return np.concatenate([target_pos, abs_rotation]) + + def action_to_target_pose(self, action, relative=True): + """ + Converts action (compatible with env.step) to a target pose for the end effector controller. + Inverse of @target_pose_to_action. Usually used to infer a sequence of target controller poses + from a demonstration trajectory using the recorded actions. + + Args: + action (np.array): environment action + relative (bool): if True, use relative pose actions, else absolute pose actions + + Returns: + target_pose (np.array): 4x4 target eef pose that @action corresponds to + """ + + # version check for robosuite - must be v1.2+, so that we're using the correct controller convention + assert (robosuite.__version__.split(".")[0] == "1") + assert (robosuite.__version__.split(".")[1] >= "2") + + if (not relative): + # convert absolute action to absolute pose + target_pos = action[:3] + target_quat = T.axisangle2quat(action[3:6]) + target_rot = T.quat2mat(target_quat) + else: + # get maximum position and rotation action bounds + max_dpos = self.env.robots[0].controller.output_max[0] + max_drot = self.env.robots[0].controller.output_max[3] + + # unscale actions + delta_position = action[:3] * max_dpos + delta_rotation = action[3:6] * max_drot + + # current position and rotation + curr_pose = self.get_robot_eef_pose() + curr_pos, curr_rot = PoseUtils.unmake_pose(curr_pose) + + # get pose target + target_pos = curr_pos + delta_position + delta_quat = T.axisangle2quat(delta_rotation) + delta_rot_mat = T.quat2mat(delta_quat) + target_rot = delta_rot_mat.dot(curr_rot) + + target_pose = PoseUtils.make_pose(target_pos, target_rot) + return target_pose +``` + +Finally, the `action_to_gripper_action` extracts the part of the simulator action that corresponds to gripper actuation: + +```python + def action_to_gripper_action(self, action): + """ + Extracts the gripper actuation part of an action (compatible with env.step). + + Args: + action (np.array): environment action + + Returns: + gripper_action (np.array): subset of environment action for gripper actuation + """ + + # last dimension is gripper action + return action[-1:] +``` + +Finally, you can follow the instructions in the [Generating Data for New Tasks](https://mimicgen.github.io/docs/tutorials/datagen_custom.html#generating-data-for-new-tasks) section to setup data generation for specific tasks in this simulation framework. diff --git a/docs/tutorials/debugging_datagen.md b/docs/tutorials/debugging_datagen.md index 6868a5e..6e40740 100644 --- a/docs/tutorials/debugging_datagen.md +++ b/docs/tutorials/debugging_datagen.md @@ -1,12 +1,79 @@ # Debugging Data Generation +We provide some useful suggestions for debugging data generation runs. -TODO: validating source demos and annotations section +## Source Demo Validation - TODO: visualize src dataset structure +### Get Source Dataset Information - TODO: visualize subtasks +You can use the `get_source_info.py` script to validate whether the source demonstrations you are using have the expected [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) structure, and are using the correct [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html): - TODO: can re-do offsets and subtask signals (annotations or env_interface function) +```sh +$ python mimicgen/scripts/get_source_info.py --dataset datasets/source/square.hdf5 +``` -TODO: generate dataset args for debugging, including pause subtask, render on-screen or not, etc, main entry point for codebase +It will print out information that looks like the following. This is a good way to also validate the object poses and subtask termination signals present in the file: + +``` +Environment Interface: MG_Square +Environment Interface Type: robosuite + +Structure of datagen_info in episode demo_0: + eef_pose: shape (127, 4, 4) + gripper_action: shape (127, 1) + object_poses: + square_nut: shape (127, 4, 4) + square_peg: shape (127, 4, 4) + subtask_term_signals: + grasp: shape (127,) + target_pose: shape (127, 4, 4) +``` + +### Visualizing Subtasks in Source Dataset + +You can visualize each subtask segment in a source demonstration using the `visualize_subtasks.py` script. + +The script needs to be aware of the order of the subtask signals as well as the maximum termination offsets being used (see the [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html)) page for more information on offsets) -- this can be specified by providing a config json (`--config`) or by providing the sequence of signals (`--signals`) and offsets (`--offsets`) for all except the last subtask. The end of each subtask is defined as the timestep of the first 0 to 1 transition in the corresponding signal added to the maximum offset value. + +The script supports either on-screen rendering (`--render`) or off-screen rendering to a video (`--video_path`). If using on-screen rendering, the script will pause after every subtask segment. If using off-screen rendering, the video alternates between no borders and red borders to show each subtask segment. + +```sh +# render on-screen +$ python visualize_subtasks.py --dataset /path/to/demo.hdf5 --config /path/to/config.json --render + +# render to video +$ python visualize_subtasks.py --dataset /path/to/demo.hdf5 --config /path/to/config.json --video_path /path/to/video.mp4 + +# specify subtask information manually instead of using a config +$ python visualize_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 --offsets 10 10 10 --render +``` + +The script is useful to tune the [Subtask Termination Signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) specified through the environment interface (the `get_subtask_term_signals` method) or the manual annotations provided through `scripts/annotate_subtasks.py` have defined that subtask segments properly, as well as the offsets you are using in the data generation config. + +
+

Warning

+ +When you change the `get_subtask_term_signals` method, you should re-run the `prepare_src_dataset.py` script on the source data to re-write the subtask termination signals to the dataset. + +
+ +## Visualization during Data Generation + +The main data generation script (`scripts/generate_dataset.py`) also has some useful features for debugging, including on-screen visualization (`--render`), off-screen rendering to video (`--video_path`), running a quick run for debugging (`--debug`), and pausing after each subtask execution (`--pause_subtask`): + +```sh +# run normal data generation +$ python generate_dataset.py --config /path/to/config.json + +# render all data generation attempts on-screen +$ python generate_dataset.py --config /path/to/config.json --render + +# render all data generation attempts to a video +$ python generate_dataset.py --config /path/to/config.json --video_path /path/to/video.mp4 + +# run a quick debug run +$ python generate_dataset.py --config /path/to/config.json --debug + +# pause after every subtask to debug data generation +$ python generate_dataset.py --config /path/to/config.json --render --pause_subtask +``` diff --git a/docs/tutorials/getting_started.md b/docs/tutorials/getting_started.md index 7254f74..d7f23fe 100644 --- a/docs/tutorials/getting_started.md +++ b/docs/tutorials/getting_started.md @@ -1,9 +1,9 @@ -# Getting Started and Pipeline Overview +# Getting Started

Note

-This section helps users get started with data generation. If you would just like to download our existing datasets and use them with policy learning methods please see the [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial for a guide, or the [Datasets](https://mimicgen.github.io/docs/datasets/overview.html) page to get details on the datasets. +This section helps users get started with data generation. If you would just like to download our existing datasets and use them with policy learning methods please see the [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial for a guide, or the [Datasets](https://mimicgen.github.io/docs/datasets/mimicgen_corl_2023.html) page to get details on the datasets.
@@ -12,31 +12,109 @@ This section helps users get started with data generation. If you would just lik Let's run a quick data generation example. -Befor starting, make sure you are at the base repo path: +Before starting, make sure you are at the base repo path: ```sh $ cd {/path/to/mimicgen} ``` -### Step 1: Prepare source human dataset. +### Step 1: Prepare source human dataset MimicGen requires a handful of human demonstrations to get started. -TODO: download square -TODO: note that you could collect your own as well, using teleoperation (e.g. link to robosuite / robomimic) - must be in robomimic hdf5 format +Download the source demonstrations for the Square task below: +```sh +$ python mimicgen/scripts/download_datasets.py --dataset_type source --tasks square +``` + +This is a basic robomimic dataset collected via teleoperation in robosuite (see [here](https://robomimic.github.io/docs/datasets/robosuite.html)). We need to add in extra information to the hdf5 to make it compatible with MimicGen: +```sh +$ python mimicgen/scripts/prepare_src_dataset.py --dataset datasets/source/square.hdf5 --env_interface MG_Square --env_interface_type robosuite +``` + +The `--env_interface` and `--env_interface_type` arguments allow the script to find the correct [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) class to extract [DatagenInfo objects](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) at each timestep. In general, each task needs to have an environment interface class to tell MimicGen how to retrieve object poses and other information needed during data generation. + +### Step 2: Prepare data generation config + +Each data generation run requires a config json (similar to robomimic [Configs](https://robomimic.github.io/docs/modules/configs.html)) that allows us to configure different settings. Template configs for each task are at `mimicgen/exps/templates` and are auto-generated (with `scripts/generate_config_templates.py`). The repository has easy ways to modify these templates to generate new config jsons. + +For now, we will use a script to produce experiment configs consistent with the MimicGen paper. Open `scripts/generate_core_configs.py` and set `NUM_TRAJ = 10` and `GUARANTEE = False` -- this means we will attempt to generate 10 new trajectories. Next, run the script: +```sh +$ python mimicgen/scripts/generate_core_configs.py +``` + +It generates a set of configs (and prints their paths) and also prints lines that correspond to data generation runs for each config. + +### Step 3: Run data generation and view outputs + +Next, we run data generation on the Square D1 task (this will take a couple minutes): +```sh +$ python mimicgen/scripts/generate_dataset.py --config /tmp/core_configs/demo_src_square_task_D1.json --auto-remove-exp +``` + +By default, the data generation folder can be found at `/tmp/core_datasets/square/demo_src_square_task_D1`. The contents of this folder are as follows: +``` +demo.hdf5 # generated hdf5 containing successful demonstrations +demo_failed.hdf5. # generated hdf5 containing failed demonstrations, up to a certain limit +important_stats.json # json file summarizing success rate and other statistics +log.txt # terminal output +logs/ # longer experiment runs will regularly sync progress jsons to this folder +mg_config.json # config used for this experiment +playback_demo_src_square_task_D1.mp4 # video that shows successful demonstrations, up to a certain limit +playback_demo_src_square_task_D1_failed.mp4. # video that shows failed demonstrations, up to a certain limit +``` + +
+

Note

+ +The generated `demo.hdf5` file is fully compatible with robomimic, which makes it easy to inspect the data (see [here](https://robomimic.github.io/docs/tutorials/dataset_contents.html)) or launch training jobs (we have a helper script at `scripts/generate_core_training_configs.py`). + +
-TODO: postprocess square (env interface - where to get information needed during data generation, link to env interface module) +Next, we outline the typical MimicGen workflow. -### Step 2: Prepare data generation config. -TODO: get config from template (lots of options for us to configure, but for now, 10 attempts) -TODO: note that config follows RoboMimic config style (link to it) +## Overview of Typical Data Generation Workflow +A typical application of MimicGen consists of the following steps. -### Step 3: View data generation outputs. -TODO: compatibility with robomimic, can get info (link to robomimic) -TODO: see dataset successes and failures, statistics in json +### Step 1: Collect source demonstrations + +Collect human demonstrations for a task of interest. Typically, this is done using a teleoperation pipeline. For example, [this](https://robosuite.ai/docs/algorithms/demonstrations.html) is how robosuite demonstrations can be collected. Make sure you end up with an hdf5 [compatible with robomimic](https://robomimic.github.io/docs/datasets/mimicgen_corl_2023.html#dataset-structure) -- this typically involves a postprocessing script (for example [this one](https://robomimic.github.io/docs/datasets/robosuite.html#converting-robosuite-hdf5-datasets) for robosuite). + +
+

Note

+ +You should ensure that a [robomimic environment wrapper](https://robomimic.github.io/docs/modules/environments.html) exists for the simulation framework you are using. See [this link](https://robomimic.github.io/docs/modules/environments.html#implement-an-environment-wrapper) for guidance on how to create one. The environment metadata in the source hdf5 should point to this environment wrapper. + +
+ +### Step 2: Prepare source demonstrations with additional annotations + +Next, information must be added to the source hdf5 to make it compatible with MimicGen. This is typically done with `scripts/prepare_src_dataset.py` just like we did above. However, this requires an [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) class to be implemented for your simulation framework (usually a base class) and your task (a subclass of the base class). + +These classes typically specify how to translate between environment actions and target poses for the end effector controller in the environment (see the MimicGen paper for more details on why this is needed). Furthermore, for each task, they provide a dictionary that maps object name to object pose -- recall that MimicGen requires observing an object pose at the start of each object-centric subtask. The structure of the extracted information can be found [here](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info). Finally, the class can optionally provide [subtask termination signals](https://mimicgen.github.io/docs/tutorials/subtask_termination_signals.html) that provide heuristics for splitting source demonstrations into subtask segments. As an example, the `MG_Square` environment interface senses when the nut has been grasped and provides this heuristic in the `grasp` subtask termination signal. + +Instead of using heuristics from the environment interface class, the source demonstrations can be manually segmented into subtasks using the annotation interface in `scripts/annotate_subtasks.py`. This step should be performed after running `scripts/prepare_src_dataset.py`. + +
+

Note

+ +See the [Data Generation for Custom Environments](https://mimicgen.github.io/docs/tutorials/datagen_custom.html) tutorial for a more comprehensive description of implementing environment interfaces for new simulators and new tasks. + +
+ +### Step 3: Run data generation + +Next, you can set up your MimicGen config and launch data generation. See the [Launching Several Data Generation Runs](https://mimicgen.github.io/docs/tutorials/launching_several.html) tutorial to see how data generation configs can be generated with ease. Once ready, run `scripts/generate_dataset.py`. + +
+

Note

+ +Data generation does not necessarily need to run on the exact same task. As described in the paper, you can run it on tasks with possibly different reset distributions, different robot arms, or different object instances. The [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial provides examples of all three variations. + +
-## Overview of Typical Data Generation Pipeline +### Step 4: Run policy learning on generated data -pipeline overview (collect demo, postprocess demo, optionally annotate demo subtask terminations, run data generation, then run policy training) \ No newline at end of file +You can now run any policy learning algorithm on the generated data to train an agent. A common choice is to run Behavioral Cloning. The generated data is compatible with [robomimic](https://robomimic.github.io/), which is an easy way to train an agent on generated datasets, and compare the performance of different learning methods. The [Reproducing Experiments](https://mimicgen.github.io/docs/tutorials/reproducing_experiments.html) tutorial shows examples of how to train agents on the generated data. diff --git a/docs/tutorials/launching_several.md b/docs/tutorials/launching_several.md index cdb2ac7..a1d5d0a 100644 --- a/docs/tutorials/launching_several.md +++ b/docs/tutorials/launching_several.md @@ -1,3 +1,18 @@ # Launching Several Data Generation Runs -TODO: describe how we use ConfigGenerator, can run multiple config generation sweeps easily. \ No newline at end of file +MimicGen inherits the [Config](https://robomimic.github.io/docs/tutorials/configs.html) system from robomimic. Configs are specified as json files, and support both dictionary and "dot" syntax (e.g. `config.experiment.name` and `config["experiment"]["name"]`). + +MimicGen also uses the `ConfigGenerator` class from robomimic, and can use it to generate several config jsons efficiently. For a tutorial on how this generator works, please see the [tutorial from robomimic](https://robomimic.github.io/docs/tutorials/hyperparam_scan.html). In this repository, we grouped several related settings together into helper functions in `utils/config_utils.py` that operate over `ConfigGenerator` objects. + +We furthermore provide examples of how to create and use multiple `ConfigGenerator` objects in scripts such as `scripts/generate_core_configs.py` and `scripts/generate_core_training_configs.py`. These scripts support using multiple base configs (e.g. one per task, since the task spec for each task will be different), and a user can specify different parameter settings per base config (see the `make_generators` function in these files). There are additional settings that are specifed as global variables at the top of the files. These scripts print the file paths for all generated configs, and the commands to launch runs for each config. + +Users can easily modify the following files to generate large amounts of data generation and policy learning configs efficiently. + +Data Generation: +- `scripts/generate_core_configs.py` +- `scripts/generate_robot_transfer_configs.py` + + +Policy Learning: +- `scripts/generate_core_training_configs.py` +- `scripts/generate_robot_transfer_training_configs.py` diff --git a/docs/tutorials/reproducing_experiments.md b/docs/tutorials/reproducing_experiments.md index 2b2ac44..05de63a 100644 --- a/docs/tutorials/reproducing_experiments.md +++ b/docs/tutorials/reproducing_experiments.md @@ -1,3 +1,58 @@ # Reproducing Published Experiments and Results -This is a guide on how to reproduce published experiments and results \ No newline at end of file +There are two options for reproducing the set of results in the MimicGen paper. + +## Option 1: Download datasets and run policy learning + +You can directly download the datasets we generated and used in the MimicGen paper, and then subsequently run policy learning on the downloaded data. See the [Datasets](https://mimicgen.github.io/docs/datasets/mimicgen_corl_2023.html) page for exact instructions on how to do this. + +## Option 2: Run data generation and then policy learning + +In this section, we show how to run MimicGen on the source demonstrations we released to generate datasets equivalent to the ones we produced in our paper, and then subsequently train policies on the generated data. + +
+

Note

+ +We recommend going through the [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) tutorial first, so that you are familiar with the way data generation works. + +
+ +The steps are very similar to the steps taken for the quick data generation run in the [Getting Started](https://mimicgen.github.io/docs/tutorials/getting_started.html) tutorial. We provide a brief outline of the steps and important changes below. + +Before starting, make sure you are at the base repo path: +```sh +$ cd {/path/to/mimicgen} +``` + +### Step 1: Prepare source human datasets + +Download all source demonstrations of interest. You can download all of them with the command below (optionally provide the `--download_dir` argument to set the download path): +```sh +$ python mimicgen/scripts/download_datasets.py --dataset_type source --tasks all +``` + +We need to prepare each one for data generation. The bash script `scripts/prepare_all_src_datasets.sh` outline the commands for each source dataset. We provide the command for Coffee below: +```sh +$ python mimicgen/scripts/prepare_src_dataset.py --dataset datasets/source/coffee.hdf5 --env_interface MG_Coffee --env_interface_type robosuite +``` + +### Step 2: Prepare data generation configs + +Open `scripts/generate_core_configs.py` and set `NUM_TRAJ = 1000` and `GUARANTEE = True` -- this means we will keep generating data until we generate 1000 successful trajectories. You can set additional parameters at the top of the file as well, e.g. in case you would like to change where data is generated. + +Next, run the script: +```sh +$ python mimicgen/scripts/generate_core_configs.py +``` + +The generated configs correspond to the **core** dataset type described on the [Datasets](https://mimicgen.github.io/docs/datasets/mimicgen_corl_2023.html) page, and the **object** dataset type as well (Mug Cleanup O1 and O2). + +If you would also like to generate configs for the **robot** dataset type (robot transfer experiments), you can follow the same steps above for `scripts/generate_robot_transfer_configs.py`. + +### Step 3: Run data generation + +The scripts above print lines that correspond to data generation runs for each config. You can pick and choose which ones you would like to run and then launch them with `scripts/generate_dataset.py`. + +### Step 4: Run policy training + +Finally, you can run policy training on the generated data -- to reproduce the paper results, you can run BC-RNN. To make this easy, we provide `scripts/generate_core_training_configs.py` and `scripts/generate_robot_transfer_training_configs.py`. As before, you can configure some settings in global variables at the top of the file, such as where to store policy training results. These scripts generate robomimic training configs that can be run with `scripts/train.py` in the robomimic repository. diff --git a/docs/tutorials/subtask_termination_signals.md b/docs/tutorials/subtask_termination_signals.md index 089f94b..ba1fde7 100644 --- a/docs/tutorials/subtask_termination_signals.md +++ b/docs/tutorials/subtask_termination_signals.md @@ -1,12 +1,75 @@ # Subtask Termination Signals -how is it used to parse source data, and how to make your own manual annotation -where we get it from (by default, read from envirnoment interface, but can have manual annotation) +## What are these signals? -the 0 to 1 thing +MimicGen expects subtask termination signals to be present in each episode of the source demonstrations. Each signal is a flat numpy array with binary entries (e.g. 0 or 1). -note: only used on source demonstrations +The [get_source_info.py script](https://mimicgen.github.io/docs/tutorials/debugging_datagen.html#get-source-dataset-information) can be used to print the signals present in a given source demonstration. For an episode (e.g. `demo_0`) they are expected to be at `f["data/demo_0/datagen_info/subtask_term_signals"]`. Under this hdf5 group, there will be one or more hdf5 datasets with the name of the subtask termination signal, and the corresponding flat numpy array. -link to tutorial on visualizing subtasks to see how your annotations lead to the subtask splits +## How are they used? -link to datagen info module as well +These signals are read from the source dataset by the `parse_source_dataset` function (see `mimicgen/utils/file_utils.py`) during data generation and used to split each source demonstration into contiguous subtask segments. Each subtask corresponds to a specific subtask termination signal, with the exception of the final subtask, which ends at the end of the source demonstration. This mapping between the subtask and the corresponding signal is specified through the [TaskSpec](https://mimicgen.github.io/docs/modules/task_spec.html) object that comes from the MimicGen config json. + +The end of each subtask is inferred from the first 0 to 1 transition in the corresponding signal. For example, let us consider the source demonstrations for the robosuite StackThree task. There are 4 subtasks with corresponding subtask termination signals: + +``` +1. (signal: grasp_1) grasping cubeA (motion relative to cubeA) +2. (signal: place_1) placing cubeA on cubeB (motion relative to cubeB) +3. (signal: grasp_2) grasping cubeC (motion relative to cubeC) +4. (signal: None) placing cubeC on cubeA (motion relative to cubeA) +``` + +For the first source demonstration, the first 0 to 1 transition for grasp_1 is 50, for place_1 is 94, and for grasp_2 is 148. This splits the first source demonstration into four subtask segments with start and end indices as follows: + +``` +1. (signal: grasp_1) [0, 50] +2. (signal: place_1) [50, 94] +3. (signal: grasp_2) [94, 148] +4. (signal: None) [148, 210] +``` + +These source subtask segments are subsequently transformed and stitched together through linear interpolation to generate data for new scenes. + +However, MimicGen also supports randomization of subtask boundaries -- this is where the `subtask_term_offset_range` parameter of the `TaskSpec` becomes relevant (see [TaskSpec](https://mimicgen.github.io/docs/modules/task_spec.html)). At the start of each data generation attempt, the subtask boundaries (indices 50, 94, and 148 above) can be randomized with an additive offset uniformly sampled in the given offset range bounds. + +The offset parameter can also be used to ensure that the end of each subtask happens at least N timesteps after the first 0 to 1 transition in the corresponding signal. For example, grasp_1 detects a successful grasp, but perhaps you would like the end of the subtask to be a little after grasping (e.g. when the lift begins). An easy way to do this is to specify an offset_range like (5, 10), so that the true subtask boundary will always occur 5 to 10 timesteps after the 0 to 1 transition. + +## How are they added to the source data? + +There are two ways that subtask termination signals can be added to a source dataset. The first is through the `prepare_src_dataset.py` script -- this will use the task-specific [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) (specifically, the `get_subtask_term_signals` method that is used to populate [DatagenInfo](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) objects) to get subtask termination signals for each timestep in each source demonstration. This relies on some heuristics per task. An example of this process can be found in the [Data Generation for Custom Environments](https://mimicgen.github.io/docs/tutorials/datagen_custom.html) tutorial. + +The second is to use manual human annotations for the end of each subtask in each source demonstration -- this can be done by using `scripts/annotate_subtasks.py`. + +
+

Note

+ +This script requires the `pygame` module to be installed (it will not be installed by default when installing the Mimicgen repo). + +
+ +The script plays back demonstrations (using visual observations and the pygame renderer) in order to allow a user to annotate portions of the demonstrations. This is useful to annotate the end of each object-centric subtask in each source demonstration used by MimicGen, as an alternative to implementing subtask termination signals directly in the simulation environment. Some example invocations of the script: + +```sh +# specify the sequence of signals that should be annotated and the dataset images to render on-screen +$ python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image + +# limit annotation to first 2 demos +$ python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --n 2 + +# limit annotation to demo 2 and 3 +$ python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --n 2 --start 1 + +# scale up dataset images when rendering to screen by factor of 10 +$ python annotate_subtasks.py --dataset /path/to/demo.hdf5 --signals grasp_1 insert_1 grasp_2 \ + --render_image_names agentview_image robot0_eye_in_hand_image --image_scale 10 +``` + +
+

Note

+ +We provide some utilities to debug your choice of subtask termination signals and offsets -- see the [Debugging Data Generation](https://mimicgen.github.io/docs/tutorials/debugging_datagen.html) tutorial for more information. + +
diff --git a/docs/tutorials/task_visualizations.md b/docs/tutorials/task_visualizations.md index 27af5f8..b815ac2 100644 --- a/docs/tutorials/task_visualizations.md +++ b/docs/tutorials/task_visualizations.md @@ -1,8 +1,13 @@ # Task Visualizations -We provide a convenience script to write videos for each task's reset distribution at `scripts/get_reset_videos.py`. Set the `OUTPUT_FOLDER` global variable to the folder where you want to write the videos, and set `DATASET_INFOS` appropriately if you would like to limit the environments visualized. Then run the script. +We provide a convenience script to write videos for each task's reset distribution at `scripts/get_reset_videos.py` for the robosuite tasks we provide in this repository. Set the `OUTPUT_FOLDER` global variable to the folder where you want to write the videos, and set `DATASET_INFOS` appropriately if you would like to limit the environments visualized. Then run the script. The environments are also readily compatible with robosuite visualization scripts such as the [demo_random_action.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/demos/demo_random_action.py) script and the [make_reset_video.py](https://github.com/ARISE-Initiative/robosuite/blob/b9d8d3de5e3dfd1724f4a0e6555246c460407daa/robosuite/scripts/make_reset_video.py) script, but you will need to modify these files to add a `import mimicgen` line to make sure that `robosuite` can find these environments. -**Note**: You can find task reset visualizations on the [website](https://mimicgen.github.io), but they may look a little different as they were generated with robosuite v1.2. \ No newline at end of file +
+

Note

+ +You can find task reset visualizations on the [website](https://mimicgen.github.io), but they may look a little different as they were generated with robosuite v1.2. + +
\ No newline at end of file diff --git a/mimicgen/env_interfaces/base.py b/mimicgen/env_interfaces/base.py index 643b4aa..0cf638e 100644 --- a/mimicgen/env_interfaces/base.py +++ b/mimicgen/env_interfaces/base.py @@ -168,6 +168,10 @@ def get_subtask_term_signals(self): """ raise NotImplementedError + """ + This method can be left as-is in most cases, as it calls other implemented methods to provide a + DatagenInfo object. + """ def get_datagen_info(self, action=None): """ Get information needed for data generation, at the current diff --git a/mimicgen/scripts/annotate_subtasks.py b/mimicgen/scripts/annotate_subtasks.py index 290cc30..9847285 100644 --- a/mimicgen/scripts/annotate_subtasks.py +++ b/mimicgen/scripts/annotate_subtasks.py @@ -38,7 +38,12 @@ # for rendering images on-screen import cv2 -import pygame +try: + import pygame +except ImportError as e: + print("Got error: {}".format(e)) + print("") + print("pygame is required. Please install with `pip install pygame`") import robomimic from robomimic.utils.file_utils import get_env_metadata_from_dataset diff --git a/mimicgen/scripts/generate_core_configs.py b/mimicgen/scripts/generate_core_configs.py index 51f750f..f61acba 100644 --- a/mimicgen/scripts/generate_core_configs.py +++ b/mimicgen/scripts/generate_core_configs.py @@ -24,23 +24,18 @@ # set path to folder containing src datasets SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") -# SRC_DATA_DIR = "/workspace/scratch/conda/public/mimicgen_environments/datasets/source" # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/core_configs" -# CONFIG_DIR = "/tmp/core_configs_ngc" # set base folder for newly generated datasets OUTPUT_FOLDER = "/tmp/core_datasets" -# OUTPUT_FOLDER = "/workspace/scratch/datasets/mimicgen_public/test_1" # number of trajectories to generate (or attempt to generate) -# NUM_TRAJ = 1000 -NUM_TRAJ = 10 +NUM_TRAJ = 1000 # whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) -# GUARANTEE = True -GUARANTEE = False +GUARANTEE = True # whether to run a quick debug run instead of full generation DEBUG = False diff --git a/mimicgen/scripts/generate_core_training_configs.py b/mimicgen/scripts/generate_core_training_configs.py index 1476081..b519116 100644 --- a/mimicgen/scripts/generate_core_training_configs.py +++ b/mimicgen/scripts/generate_core_training_configs.py @@ -23,16 +23,13 @@ # set path to folder with mimicgen generated datasets -# DATASET_DIR = "/tmp/core_datasets" -DATASET_DIR = "/workspace/scratch/datasets/mimicgen_public/test_1" +DATASET_DIR = "/tmp/core_datasets" # set base folder for where to generate new config files for training runs -# CONFIG_DIR = "/tmp/core_train_configs" -CONFIG_DIR = "/tmp/core_train_configs_ngc" +CONFIG_DIR = "/tmp/core_train_configs" # set base folder for training outputs (model checkpoints, videos, logs) -# OUTPUT_DIR = "/tmp/core_training_results" -OUTPUT_DIR = "/workspace/scratch/exp_results/mimicgen_public/test_1" +OUTPUT_DIR = "/tmp/core_training_results" # path to base config BASE_CONFIG = os.path.join(robomimic.__path__[0], "exps/templates/bc.json") diff --git a/mimicgen/scripts/generate_dataset.py b/mimicgen/scripts/generate_dataset.py index 937c73b..65fa22b 100644 --- a/mimicgen/scripts/generate_dataset.py +++ b/mimicgen/scripts/generate_dataset.py @@ -4,6 +4,23 @@ """ Main data generation script. + +Examples: + + # run normal data generation + python generate_dataset.py --config /path/to/config.json + + # render all data generation attempts on-screen + python generate_dataset.py --config /path/to/config.json --render + + # render all data generation attempts to a video + python generate_dataset.py --config /path/to/config.json --video_path /path/to/video.mp4 + + # run a quick debug run + python generate_dataset.py --config /path/to/config.json --debug + + # pause after every subtask to debug data generation + python generate_dataset.py --config /path/to/config.json --render --pause_subtask """ import os diff --git a/mimicgen/scripts/generate_robot_transfer_configs.py b/mimicgen/scripts/generate_robot_transfer_configs.py index 48436da..b124cf7 100644 --- a/mimicgen/scripts/generate_robot_transfer_configs.py +++ b/mimicgen/scripts/generate_robot_transfer_configs.py @@ -25,23 +25,18 @@ # set path to folder containing src datasets SRC_DATA_DIR = os.path.join(mimicgen.__path__[0], "../datasets/source") -# SRC_DATA_DIR = "/workspace/scratch/conda/public/mimicgen_environments/datasets/source" # set base folder for where to copy each base config and generate new config files for data generation CONFIG_DIR = "/tmp/robot_configs" -# CONFIG_DIR = "/tmp/robot_configs_ngc" # set base folder for newly generated datasets OUTPUT_FOLDER = "/tmp/robot_datasets" -# OUTPUT_FOLDER = "/workspace/scratch/datasets/mimicgen_public/test_1_robot" # number of trajectories to generate (or attempt to generate) -# NUM_TRAJ = 1000 -NUM_TRAJ = 10 +NUM_TRAJ = 1000 # whether to guarantee that many successful trajectories (e.g. keep running until that many successes, or stop at that many attempts) -# GUARANTEE = True -GUARANTEE = False +GUARANTEE = True # whether to run a quick debug run instead of full generation DEBUG = False diff --git a/mimicgen/scripts/generate_robot_transfer_training_configs.py b/mimicgen/scripts/generate_robot_transfer_training_configs.py index b9d4be9..9fd8401 100644 --- a/mimicgen/scripts/generate_robot_transfer_training_configs.py +++ b/mimicgen/scripts/generate_robot_transfer_training_configs.py @@ -24,16 +24,13 @@ # set path to folder with mimicgen generated datasets -# DATASET_DIR = "/tmp/robot_datasets" -DATASET_DIR = "/workspace/scratch/datasets/mimicgen_public/test_1_robot" +DATASET_DIR = "/tmp/robot_datasets" # set base folder for where to generate new config files for training runs -# CONFIG_DIR = "/tmp/robot_train_configs" -CONFIG_DIR = "/tmp/robot_train_configs_ngc" +CONFIG_DIR = "/tmp/robot_train_configs" # set base folder for training outputs (model checkpoints, videos, logs) -# OUTPUT_DIR = "/tmp/robot_training_results" -OUTPUT_DIR = "/workspace/scratch/exp_results/mimicgen_public/test_1_robot" +OUTPUT_DIR = "/tmp/robot_training_results" # path to base config BASE_CONFIG = os.path.join(robomimic.__path__[0], "exps/templates/bc.json") From 377d5d0271e7a53373589920ef623ea55bf0ddfe Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 14:57:59 -0700 Subject: [PATCH 06/11] update docs reqs --- requirements-docs.txt | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/requirements-docs.txt b/requirements-docs.txt index 78b8784..c3e63e5 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,8 +1,4 @@ # requirements for building sphinx docs -pygments==2.4.1 -sphinx -sphinx_rtd_theme -sphinx_markdown_tables -sphinx_book_theme -recommonmark -nbsphinx \ No newline at end of file +sphinx_book_theme==0.3.3 +sphinx_markdown_tables==0.0.15 +recommonmark==0.7.1 From 8267e1917496a012a1be2b3cbe5a1f2465959d8c Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 15:14:57 -0700 Subject: [PATCH 07/11] fix horizon for pick place --- mimicgen/scripts/generate_core_training_configs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mimicgen/scripts/generate_core_training_configs.py b/mimicgen/scripts/generate_core_training_configs.py index b519116..4787d54 100644 --- a/mimicgen/scripts/generate_core_training_configs.py +++ b/mimicgen/scripts/generate_core_training_configs.py @@ -151,7 +151,7 @@ def make_generators(base_config, dataset_dir, output_dir): dataset_names=[ "pick_place_D0", ], - horizon=500, + horizon=1000, ), # mug_cleanup dict( From e5d1c4b42d27677986300687fe0bfb0ae9d86af6 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 15:16:58 -0700 Subject: [PATCH 08/11] get rid of a couple unneeded comments --- mimicgen/scripts/generate_core_training_configs.py | 3 --- mimicgen/scripts/generate_robot_transfer_training_configs.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/mimicgen/scripts/generate_core_training_configs.py b/mimicgen/scripts/generate_core_training_configs.py index 4787d54..436b1d6 100644 --- a/mimicgen/scripts/generate_core_training_configs.py +++ b/mimicgen/scripts/generate_core_training_configs.py @@ -198,7 +198,6 @@ def make_generators(base_config, dataset_dir, output_dir): ret = [] for setting in all_settings: for mod in ["low_dim", "image"]: - # for mod in ["image"]: ret.append(make_gen(os.path.expanduser(base_config), setting, output_dir, mod)) return ret @@ -249,8 +248,6 @@ def make_gen(base_config, settings, output_dir, mod): value_names=settings["dataset_names"], ) - # print(json.dumps(settings["dataset_paths"], indent=4)) - # rollout settings generator.add_param( key="experiment.rollout.horizon", diff --git a/mimicgen/scripts/generate_robot_transfer_training_configs.py b/mimicgen/scripts/generate_robot_transfer_training_configs.py index 9fd8401..2656d5a 100644 --- a/mimicgen/scripts/generate_robot_transfer_training_configs.py +++ b/mimicgen/scripts/generate_robot_transfer_training_configs.py @@ -87,7 +87,6 @@ def make_generators(base_config, dataset_dir, output_dir): ret = [] for setting in all_settings: for mod in ["low_dim", "image"]: - # for mod in ["image"]: ret.append(make_gen(os.path.expanduser(base_config), setting, output_dir, mod)) return ret @@ -138,8 +137,6 @@ def make_gen(base_config, settings, output_dir, mod): value_names=settings["dataset_names"], ) - # print(json.dumps(settings["dataset_paths"], indent=4)) - # rollout settings generator.add_param( key="experiment.rollout.horizon", From 7fa351853743d92dd9496947315bf037747fb318 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 15:22:51 -0700 Subject: [PATCH 09/11] update date --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f52ad1..e443ed3 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ For business inquiries, please submit this form: [NVIDIA Research Licensing](htt ------- ## Latest Updates -- [07/05/2024] **v1.0.0**: Full code release, including data generation code +- [07/09/2024] **v1.0.0**: Full code release, including data generation code - [04/04/2024] **v0.1.1**: Dataset license changed to [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/), which is less restrictive (see [License](#license)) - [09/28/2023] **v0.1.0**: Initial code and paper release From bec1e492d76529e3fa5d23c1e53e9fae4bc99750 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 15:36:34 -0700 Subject: [PATCH 10/11] small change to reqs --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 99d4784..67bf26e 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,6 @@ "imageio-ffmpeg", "gdown", "chardet", - "mujoco==2.3.2", ], eager_resources=['*'], include_package_data=True, From aec4fd5713846f1244549ba658e4877cd60c36d1 Mon Sep 17 00:00:00 2001 From: Ajay Mandlekar Date: Mon, 8 Jul 2024 16:26:14 -0700 Subject: [PATCH 11/11] some docs updates --- docs/conf.py | 2 +- docs/introduction/installation.md | 16 +++++++++++---- docs/miscellaneous/troubleshooting.md | 13 +++++++++---- docs/tutorials/getting_started.md | 28 ++++++++++++++++++++++++--- requirements-docs.txt | 1 - 5 files changed, 47 insertions(+), 13 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 55a381e..74c2c52 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -33,7 +33,7 @@ # ones. extensions = [ 'sphinx.ext.napoleon', - 'sphinx_markdown_tables', + # 'sphinx_markdown_tables', 'sphinx.ext.mathjax', 'sphinx.ext.githubpages', 'sphinx.ext.autodoc', diff --git a/docs/introduction/installation.md b/docs/introduction/installation.md index 215799f..4601cea 100644 --- a/docs/introduction/installation.md +++ b/docs/introduction/installation.md @@ -43,8 +43,8 @@ Most of these additional dependencies are best installed from source. ```sh $ cd $ git clone https://github.com/ARISE-Initiative/robosuite.git -$ git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa $ cd robosuite +$ git checkout b9d8d3de5e3dfd1724f4a0e6555246c460407daa $ pip install -e . ``` @@ -64,8 +64,8 @@ The git checkout command corresponds to the commit we used for testing our polic ```sh $ cd $ git clone https://github.com/ARISE-Initiative/robomimic.git -$ git checkout d0b37cf214bd24fb590d182edb6384333f67b661 $ cd robomimic +$ git checkout d0b37cf214bd24fb590d182edb6384333f67b661 $ pip install -e . ``` @@ -87,11 +87,19 @@ The git checkout command corresponds to the commit we used for testing our polic +
+

Warning

+ +We recommend removing the dependencies in the `setup.py` file (the `install_requires` list) before installation, as it uses deprecated dependencies (such as mujoco-py). + +
+ ```sh $ cd $ git clone https://github.com/ARISE-Initiative/robosuite-task-zoo +$ cd robosuite-task-zoo $ git checkout 74eab7f88214c21ca1ae8617c2b2f8d19718a9ed -$ cd robosuite_task_zoo +# NOTE: should remove dependencies in setup.py in the "install_requires" before the last step $ pip install -e . ``` @@ -129,7 +137,7 @@ To test data generation please move on to the [Getting Started](https://mimicgen -## Install documentation dependencies +## Install Documentation Dependencies If you plan to contribute to the repository and add new features, you must install the additional requirements required to build the documentation locally: diff --git a/docs/miscellaneous/troubleshooting.md b/docs/miscellaneous/troubleshooting.md index 1cd5a97..475e7a4 100644 --- a/docs/miscellaneous/troubleshooting.md +++ b/docs/miscellaneous/troubleshooting.md @@ -1,9 +1,14 @@ # Troubleshooting and Known Issues -- If your robomimic training seems to be proceeding slowly (especially for image-based agents), it might be a problem with robomimic and more modern versions of PyTorch. We recommend PyTorch 1.12.1 (on Ubuntu, we used `conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch`). It is also a good idea to verify that the GPU is being utilized during training. -- In our testing on M1 macbook we ran into the following error when using `imageio-ffmpeg` installed through pip: `RuntimeError: No ffmpeg exe could be found. Install ffmpeg on your system, or set the IMAGEIO_FFMPEG_EXE environment variable.` Using `conda install imageio-ffmpeg` fixed this issue on our end. +If you run into an error not documented below, please search through the [GitHub issues](https://github.com/NVlabs/mimicgen/issues), and create a new one if you cannot find a fix. + +## Installation + +- In our testing on M1 macbook we ran into the following error: `RuntimeError: No ffmpeg exe could be found. Install ffmpeg on your system, or set the IMAGEIO_FFMPEG_EXE environment variable.` Using `conda install ffmpeg` fixed this issue on our end. - If you run into trouble with installing [egl_probe](https://github.com/StanfordVL/egl_probe) during robomimic installation (e.g. `ERROR: Failed building wheel for egl_probe`) you may need to make sure `cmake` is installed. A simple `pip install cmake` should work. - If you run into other strange installation issues, one potential fix is to launch a new terminal, activate your conda environment, and try the install commands that are failing once again. One clue that the current terminal state is corrupt and this fix will help is if you see installations going into a different conda environment than the one you have active. -- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](https://mimicgen.github.io/docs/introduction/installation.html) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). -If you run into an error not documented above, please search through the [GitHub issues](https://github.com/NVlabs/mimicgen/issues), and create a new one if you cannot find a fix. \ No newline at end of file +## Policy Learning + +- If your robomimic training seems to be proceeding slowly (especially for image-based agents), it might be a problem with robomimic and more modern versions of PyTorch. We recommend PyTorch 1.12.1 (on Ubuntu, we used `conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch`). It is also a good idea to verify that the GPU is being utilized during training. +- If you run into rendering issues with the Sawyer robot arm, or have trouble reproducing our results, your MuJoCo version might be the issue. As noted in the [Installation](https://mimicgen.github.io/docs/introduction/installation.html) section, please use MuJoCo 2.3.2 (`pip install mujoco==2.3.2`). diff --git a/docs/tutorials/getting_started.md b/docs/tutorials/getting_started.md index d7f23fe..0300873 100644 --- a/docs/tutorials/getting_started.md +++ b/docs/tutorials/getting_started.md @@ -28,7 +28,10 @@ $ python mimicgen/scripts/download_datasets.py --dataset_type source --tasks squ This is a basic robomimic dataset collected via teleoperation in robosuite (see [here](https://robomimic.github.io/docs/datasets/robosuite.html)). We need to add in extra information to the hdf5 to make it compatible with MimicGen: ```sh -$ python mimicgen/scripts/prepare_src_dataset.py --dataset datasets/source/square.hdf5 --env_interface MG_Square --env_interface_type robosuite +$ python mimicgen/scripts/prepare_src_dataset.py \ +--dataset datasets/source/square.hdf5 \ +--env_interface MG_Square \ +--env_interface_type robosuite ``` The `--env_interface` and `--env_interface_type` arguments allow the script to find the correct [Environment Interface](https://mimicgen.github.io/docs/modules/env_interfaces.html) class to extract [DatagenInfo objects](https://mimicgen.github.io/docs/modules/datagen.html#datagen-info) at each timestep. In general, each task needs to have an environment interface class to tell MimicGen how to retrieve object poses and other information needed during data generation. @@ -37,7 +40,17 @@ The `--env_interface` and `--env_interface_type` arguments allow the script to f Each data generation run requires a config json (similar to robomimic [Configs](https://robomimic.github.io/docs/modules/configs.html)) that allows us to configure different settings. Template configs for each task are at `mimicgen/exps/templates` and are auto-generated (with `scripts/generate_config_templates.py`). The repository has easy ways to modify these templates to generate new config jsons. -For now, we will use a script to produce experiment configs consistent with the MimicGen paper. Open `scripts/generate_core_configs.py` and set `NUM_TRAJ = 10` and `GUARANTEE = False` -- this means we will attempt to generate 10 new trajectories. Next, run the script: +For now, we will use a script to produce experiment configs consistent with the MimicGen paper. Open `scripts/generate_core_configs.py` and set `NUM_TRAJ = 10` and `GUARANTEE = False` -- this means we will attempt to generate 10 new trajectories. + +
+

Warning

+ +If you do not edit `scripts/generate_core_configs.py` the default settings will run data generation until 1000 success trajectories have been collected. This is why it is important to set `NUM_TRAJ = 10` and `GUARANTEE = False` for a quick run. Alternatively, pass the `--debug` flag to the command in Step 3, which will run an even smaller data generation run. + +
+ +Next, run the script: + ```sh $ python mimicgen/scripts/generate_core_configs.py ``` @@ -48,9 +61,18 @@ It generates a set of configs (and prints their paths) and also prints lines tha Next, we run data generation on the Square D1 task (this will take a couple minutes): ```sh -$ python mimicgen/scripts/generate_dataset.py --config /tmp/core_configs/demo_src_square_task_D1.json --auto-remove-exp +$ python mimicgen/scripts/generate_dataset.py \ +--config /tmp/core_configs/demo_src_square_task_D1.json \ +--auto-remove-exp ``` +
+

Note

+ +If you run into a `RuntimeError: No ffmpeg exe could be found.` at the end of the script, this means rendering the dataset to video failed. We found that a simple `conda install ffmpeg` fixed the problem on our end (as documented on the [troubleshooting page](https://mimicgen.github.io/docs/miscellaneous/troubleshooting.html)). + +
+ By default, the data generation folder can be found at `/tmp/core_datasets/square/demo_src_square_task_D1`. The contents of this folder are as follows: ``` demo.hdf5 # generated hdf5 containing successful demonstrations diff --git a/requirements-docs.txt b/requirements-docs.txt index c3e63e5..3381b70 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,4 +1,3 @@ # requirements for building sphinx docs sphinx_book_theme==0.3.3 -sphinx_markdown_tables==0.0.15 recommonmark==0.7.1