Skip to content

Commit

Permalink
🐛 Fix the gdown bug
Browse files Browse the repository at this point in the history
  • Loading branch information
Skylark0924 committed Jan 22, 2024
1 parent 43fcf55 commit 41d1338
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ def train(custom_args):
"headless={}".format(custom_args.headless),
"num_envs={}".format(custom_args.num_envs)]
cfg = get_config('./learning/rl', 'config', args=args_overrides)
cfg.train.Trainer.maximum_steps = 1000000
cfg.task.env.objectType = custom_args.object.lower()
cfg.train.Trainer.maximum_steps = 100000
cfg.train.Trainer.experiment_name = custom_args.object
cfg_dict = omegaconf_to_dict(cfg.task)

set_seed(cfg.train.Trainer.seed)
Expand Down Expand Up @@ -80,7 +82,7 @@ def inference(custom_args):
inference=True)
# load checkpoint
if custom_args.ckpt_path is None:
custom_args.ckpt_path = model_zoo(name=f"{custom_args.task}RofuncRLPPO.pth")
custom_args.ckpt_path = model_zoo(name=f"{custom_args.task}{custom_args.object}RofuncRLPPO.pth")
trainer.agent.load_ckpt(custom_args.ckpt_path)

# Start inference
Expand All @@ -100,12 +102,13 @@ def inference(custom_args):
# QbSoftHandGrasp, BiQbSoftHandGraspAndPlace, BiQbSoftHandSynergyGrasp, QbSoftHandSynergyGrasp
# ShadowHandGrasp, CURIQbSoftHandSynergyGrasp
parser.add_argument("--task", type=str, default="CURIQbSoftHandSynergyGrasp")
parser.add_argument("--object", type=str, default="Hammer")
parser.add_argument("--agent", type=str, default="ppo") # Available agents: ppo, sac, td3, a2c
parser.add_argument("--num_envs", type=int, default=1024)
parser.add_argument("--sim_device", type=int, default=0)
parser.add_argument("--rl_device", type=int, default=gpu_id)
parser.add_argument("--headless", type=str, default="False")
parser.add_argument("--inference", action="store_true", help="turn to inference mode while adding this argument")
parser.add_argument("--inference", action="store_false", help="turn to inference mode while adding this argument")
parser.add_argument("--ckpt_path", type=str, default=None)
custom_args = parser.parse_args()

Expand Down
5 changes: 5 additions & 0 deletions rofunc/config/learning/model_zoo.json
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,11 @@
"name": "BiShadowHandTwoCatchUnderarmRofuncRLPPO.pth",
"md5": ""
},
"CURIQbSoftHandSynergyGraspHammerRofuncRLPPO.pth": {
"url": "https://drive.google.com/uc?id=1VxFNboUOclQSzm0nAeA7kMjtNRQAk5-V&export=download",
"name": "CURIQbSoftHandSynergyGraspHammerRofuncRLPPO.pth",
"md5": ""
},
"efficientsam_s_gpu.jit": {
"url": "https://drive.google.com/uc?id=1lMCUFOLeQOlIpuaIA00MB174lL7ZiuoS&export=download",
"name": "efficientsam_s_gpu.jit",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1243,8 +1243,8 @@ def pre_physics_step(self, actions):
pose.r.y = goal_rot[i, 1]
pose.r.z = goal_rot[i, 2]
pose.r.w = goal_rot[i, 3]
gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, self.envs[i], pose)
gymutil.draw_lines(self.sphere_geom, self.gym, self.viewer, self.envs[i], pose)
# gymutil.draw_lines(self.axes_geom, self.gym, self.viewer, self.envs[i], pose)
# gymutil.draw_lines(self.sphere_geom, self.gym, self.viewer, self.envs[i], pose)

# hand_dist = torch.norm(self.object_pos - hand_pos, p=2, dim=-1)
# self.apply_forces[:, 0, :] = self.actions[:, 0:3] * self.dt * self.transition_scale * 100000
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
'hydra-core==1.3.2',
'opencv-python==4.7.0.72',
'neurokit2==0.2.4',
'gdown==4.7.1',
'gdown==4.6.3',
'pytz==2023.3',
'shutup==0.2.0',
'numpy<=1.23.0',
Expand Down

0 comments on commit 41d1338

Please sign in to comment.