Datasets:
File size: 5,676 Bytes
faa3682 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import os
import threading
import collections
import ale_py
import ale_py.roms as roms
import elements
import embodied
import numpy as np
from PIL import Image
class Atari(embodied.Env):
LOCK = threading.Lock()
WEIGHTS = np.array([0.299, 0.587, 1 - (0.299 + 0.587)])
ACTION_MEANING = (
'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',
'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',
'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE')
def __init__(
self, name, repeat=4, size=(84, 84), gray=True, noops=0, lives='unused',
sticky=True, actions='all', length=108000, pooling=2, aggregate='max',
resize='pillow', autostart=False, clip_reward=False, seed=None):
assert lives in ('unused', 'discount', 'reset'), lives
assert actions in ('all', 'needed'), actions
assert resize in ('opencv', 'pillow'), resize
assert aggregate in ('max', 'mean'), aggregate
assert pooling >= 1, pooling
assert repeat >= 1, repeat
if name == 'james_bond':
name = 'jamesbond'
self.repeat = repeat
self.size = size
self.gray = gray
self.noops = noops
self.lives = lives
self.sticky = sticky
self.length = length
self.pooling = pooling
self.aggregate = aggregate
self.resize = resize
self.autostart = autostart
self.clip_reward = clip_reward
self.rng = np.random.default_rng(seed)
with self.LOCK:
self.ale = ale_py.ALEInterface()
self.ale.setLoggerMode(ale_py.LoggerMode.Error)
self.ale.setInt(b'random_seed', self.rng.integers(0, 2 ** 31))
path = os.environ.get('ALE_ROM_PATH', None)
if path:
self.ale.loadROM(os.path.join(path, f'{name}.bin'))
else:
self.ale.loadROM(roms.get_rom_path(name))
self.ale.setFloat('repeat_action_probability', 0.25 if sticky else 0.0)
self.actionset = {
'all': self.ale.getLegalActionSet,
'needed': self.ale.getMinimalActionSet,
}[actions]()
W, H = self.ale.getScreenDims()
self.buffers = collections.deque(
[np.zeros((W, H, 3), np.uint8) for _ in range(self.pooling)],
maxlen=self.pooling)
self.prevlives = None
self.duration = None
self.done = True
@property
def obs_space(self):
return {
'image': elements.Space(np.uint8, (*self.size, 1 if self.gray else 3)),
'reward': elements.Space(np.float32),
'is_first': elements.Space(bool),
'is_last': elements.Space(bool),
'is_terminal': elements.Space(bool),
}
@property
def act_space(self):
return {
'action': elements.Space(np.int32, (), 0, len(self.actionset)),
'reset': elements.Space(bool),
}
def step(self, action):
if action['reset'] or self.done:
self._reset()
self.prevlives = self.ale.lives()
self.duration = 0
self.done = False
return self._obs(0.0, is_first=True)
reward = 0.0
terminal = False
last = False
assert 0 <= action['action'] < len(self.actionset), action['action']
act = self.actionset[action['action']]
for repeat in range(self.repeat):
reward += self.ale.act(act)
self.duration += 1
if repeat >= self.repeat - self.pooling:
self._render()
if self.ale.game_over():
terminal = True
last = True
if self.duration >= self.length:
last = True
lives = self.ale.lives()
if self.lives == 'discount' and 0 < lives < self.prevlives:
terminal = True
if self.lives == 'reset' and 0 < lives < self.prevlives:
terminal = True
last = True
self.prevlives = lives
if terminal or last:
break
self.done = last
obs = self._obs(reward, is_last=last, is_terminal=terminal)
return obs
def _reset(self):
with self.LOCK:
self.ale.reset_game()
for _ in range(self.rng.integers(self.noops + 1)):
self.ale.act(self.ACTION_MEANING.index('NOOP'))
if self.ale.game_over():
with self.LOCK:
self.ale.reset_game()
if self.autostart and self.ACTION_MEANING.index('FIRE') in self.actionset:
self.ale.act(self.ACTION_MEANING.index('FIRE'))
if self.ale.game_over():
with self.LOCK:
self.ale.reset_game()
self.ale.act(self.ACTION_MEANING.index('UP'))
if self.ale.game_over():
with self.LOCK:
self.ale.reset_game()
self._render()
for i, dst in enumerate(self.buffers):
if i > 0:
np.copyto(self.buffers[0], dst)
def _render(self, reset=False):
self.buffers.appendleft(self.buffers.pop())
self.ale.getScreenRGB(self.buffers[0])
def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
if self.clip_reward:
reward = np.sign(reward)
if self.aggregate == 'max':
image = np.amax(self.buffers, 0)
elif self.aggregate == 'mean':
image = np.mean(self.buffers, 0).astype(np.uint8)
if self.resize == 'opencv':
import cv2
image = cv2.resize(image, self.size, interpolation=cv2.INTER_AREA)
elif self.resize == 'pillow':
image = Image.fromarray(image)
image = image.resize(self.size, Image.BILINEAR)
image = np.array(image)
if self.gray:
# Averaging channels equally would not work. For example, a fully red
# object on a fully green background would average to the same color.
image = (image * self.WEIGHTS).sum(-1).astype(image.dtype)[:, :, None]
return dict(
image=image,
reward=np.float32(reward),
is_first=is_first,
is_last=is_last,
is_terminal=is_last,
)
|