forked from quantumiracle/robolite
-
Notifications
You must be signed in to change notification settings - Fork 0
/
demo_gym_functionality.py
101 lines (80 loc) · 2.98 KB
/
demo_gym_functionality.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
"""
This script shows how to adapt an environment to be compatible
with the OpenAI gym API. This is extremely useful when using
learning pipelines that require supporting these APIs.
For instance, this can be used with OpenAI Baselines
(https://github.com/openai/baselines) to train agents
with RL.
We base this script off of some code snippets found
in the "Getting Started with Gym" section of the OpenAI
gym documentation.
The following snippet was used to show how to list all environments.
from gym import envs
print(envs.registry.all())
The following snippet was used to demo basic functionality.
import gym
env = gym.make('CartPole-v0')
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
"""
"""
The following snippet was used to show how to list all environments.
from gym import envs
print(envs.registry.all())
We demonstrate equivalent functionality below.
"""
import robosuite as suite
# get the list of all environments
envs = sorted(suite.environments.ALL_ENVS)
print("Welcome to Surreal Robotics Suite v{}!".format(suite.__version__))
print(suite.__logo__)
print("Here is a list of environments in the suite:\n")
for k, env in enumerate(envs):
print("[{}] {}".format(k, env))
print()
"""
The following snippet was used to demo basic functionality.
import gym
env = gym.make('CartPole-v0')
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
We demonstrate equivalent functionality below.
"""
from robosuite.wrappers import GymWrapper
if __name__ == "__main__":
# Notice how the environment is wrapped by the wrapper
env = GymWrapper(
suite.make(
"SawyerLift",
use_camera_obs=False, # do not use pixel observations
has_offscreen_renderer=False, # not needed since not using pixel obs
has_renderer=True, # make sure we can render to the screen
reward_shaping=True, # use dense rewards
control_freq=100, # control should happen fast enough so that simulation looks smooth
)
)
for i_episode in range(20):
observation = env.reset()
for t in range(500):
env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break