key-data / models /embodied /envs /minecraft_flat.py
tostido's picture
Add embodied module back
faa3682
import logging
import threading
import elements
import embodied
import numpy as np
np.float = float
np.int = int
np.bool = bool
from minerl.herobraine.env_spec import EnvSpec
from minerl.herobraine.hero import handler
from minerl.herobraine.hero import handlers
from minerl.herobraine.hero import mc
from minerl.herobraine.hero.mc import INVERSE_KEYMAP
class Wood(embodied.Wrapper):
def __init__(self, *args, **kwargs):
actions = BASIC_ACTIONS
self.rewards = [
CollectReward('log', repeated=1),
HealthReward(),
]
length = kwargs.pop('length', 36000)
env = MinecraftBase(actions, *args, **kwargs)
env = embodied.wrappers.TimeLimit(env, length)
super().__init__(env)
def step(self, action):
obs = self.env.step(action)
reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
obs['reward'] = np.float32(reward)
return obs
class Climb(embodied.Wrapper):
def __init__(self, *args, **kwargs):
actions = BASIC_ACTIONS
length = kwargs.pop('length', 36000)
env = MinecraftBase(actions, *args, **kwargs)
env = embodied.wrappers.TimeLimit(env, length)
super().__init__(env)
self._previous = None
self._health_reward = HealthReward()
def step(self, action):
obs = self.env.step(action)
x, y, z = obs['log/player_pos']
height = np.float32(y)
if obs['is_first']:
self._previous = height
reward = (height - self._previous) + self._health_reward(obs)
obs['reward'] = np.float32(reward)
self._previous = height
return obs
class Diamond(embodied.Wrapper):
def __init__(self, *args, **kwargs):
actions = {
**BASIC_ACTIONS,
'craft_planks': dict(craft='planks'),
'craft_stick': dict(craft='stick'),
'craft_crafting_table': dict(craft='crafting_table'),
'place_crafting_table': dict(place='crafting_table'),
'craft_wooden_pickaxe': dict(nearbyCraft='wooden_pickaxe'),
'craft_stone_pickaxe': dict(nearbyCraft='stone_pickaxe'),
'craft_iron_pickaxe': dict(nearbyCraft='iron_pickaxe'),
'equip_stone_pickaxe': dict(equip='stone_pickaxe'),
'equip_wooden_pickaxe': dict(equip='wooden_pickaxe'),
'equip_iron_pickaxe': dict(equip='iron_pickaxe'),
'craft_furnace': dict(nearbyCraft='furnace'),
'place_furnace': dict(place='furnace'),
'smelt_iron_ingot': dict(nearbySmelt='iron_ingot'),
}
self.rewards = [
CollectReward('log', once=1),
CollectReward('planks', once=1),
CollectReward('stick', once=1),
CollectReward('crafting_table', once=1),
CollectReward('wooden_pickaxe', once=1),
CollectReward('cobblestone', once=1),
CollectReward('stone_pickaxe', once=1),
CollectReward('iron_ore', once=1),
CollectReward('furnace', once=1),
CollectReward('iron_ingot', once=1),
CollectReward('iron_pickaxe', once=1),
CollectReward('diamond', once=1),
HealthReward(),
]
length = kwargs.pop('length', 36000)
env = MinecraftBase(actions, *args, **kwargs)
env = embodied.wrappers.TimeLimit(env, length)
super().__init__(env)
def step(self, action):
obs = self.env.step(action)
reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
obs['reward'] = np.float32(reward)
return obs
BASIC_ACTIONS = {
'noop': dict(),
'attack': dict(attack=1),
'turn_up': dict(camera=(-15, 0)),
'turn_down': dict(camera=(15, 0)),
'turn_left': dict(camera=(0, -15)),
'turn_right': dict(camera=(0, 15)),
'forward': dict(forward=1),
'back': dict(back=1),
'left': dict(left=1),
'right': dict(right=1),
'jump': dict(jump=1, forward=1),
'place_dirt': dict(place='dirt'),
}
class CollectReward:
def __init__(self, item, once=0, repeated=0):
self.item = item
self.once = once
self.repeated = repeated
self.previous = 0
self.maximum = 0
def __call__(self, obs, inventory):
current = inventory[self.item]
if obs['is_first']:
self.previous = current
self.maximum = current
return 0
reward = self.repeated * max(0, current - self.previous)
if self.maximum == 0 and current > 0:
reward += self.once
self.previous = current
self.maximum = max(self.maximum, current)
return reward
class HealthReward:
def __init__(self, scale=0.01):
self.scale = scale
self.previous = None
def __call__(self, obs, inventory=None):
health = obs['health']
if obs['is_first']:
self.previous = health
return 0
reward = self.scale * (health - self.previous)
self.previous = health
return np.float32(reward)
class MinecraftBase(embodied.Env):
LOCK = threading.Lock()
NOOP = dict(
camera=(0, 0), forward=0, back=0, left=0, right=0, attack=0, sprint=0,
jump=0, sneak=0, craft='none', nearbyCraft='none', nearbySmelt='none',
place='none', equip='none')
def __init__(
self, actions,
repeat=1,
size=(64, 64),
break_speed=100.0,
gamma=10.0,
sticky_attack=30,
sticky_jump=10,
pitch_limit=(-60, 60),
log_inv_keys=('log', 'cobblestone', 'iron_ingot', 'diamond'),
logs=False,
):
if logs:
logging.basicConfig(level=logging.DEBUG)
self._repeat = repeat
self._size = size
if break_speed != 1.0:
sticky_attack = 0
# Make env
with self.LOCK:
self._gymenv = MineRLEnv(size, break_speed).make()
from . import from_gym
self._env = from_gym.FromGym(self._gymenv)
self._inventory = {}
# Observations
self._inv_keys = [
k for k in self._env.obs_space if k.startswith('inventory/')
if k != 'inventory/log2']
self._inv_log_keys = [f'inventory/{k}' for k in log_inv_keys]
assert all(k in self._inv_keys for k in self._inv_log_keys), (
self._inv_keys, self._inv_log_keys)
self._step = 0
self._max_inventory = None
self._equip_enum = self._gymenv.observation_space[
'equipped_items']['mainhand']['type'].values.tolist()
self._obs_space = self.obs_space
# Actions
actions = self._insert_defaults(actions)
self._action_names = tuple(actions.keys())
self._action_values = tuple(actions.values())
message = f'Minecraft action space ({len(self._action_values)}):'
print(message, ', '.join(self._action_names))
self._sticky_attack_length = sticky_attack
self._sticky_attack_counter = 0
self._sticky_jump_length = sticky_jump
self._sticky_jump_counter = 0
self._pitch_limit = pitch_limit
self._pitch = 0
@property
def obs_space(self):
return {
'image': elements.Space(np.uint8, self._size + (3,)),
'inventory': elements.Space(np.float32, len(self._inv_keys), 0),
'inventory_max': elements.Space(np.float32, len(self._inv_keys), 0),
'equipped': elements.Space(np.float32, len(self._equip_enum), 0, 1),
'reward': elements.Space(np.float32),
'health': elements.Space(np.float32),
'hunger': elements.Space(np.float32),
'breath': elements.Space(np.float32),
'is_first': elements.Space(bool),
'is_last': elements.Space(bool),
'is_terminal': elements.Space(bool),
**{f'log/{k}': elements.Space(np.int64) for k in self._inv_log_keys},
# 'log/player_pos': elements.Space(np.float32, 3),
}
@property
def act_space(self):
return {
'action': elements.Space(np.int32, (), 0, len(self._action_values)),
'reset': elements.Space(bool),
}
def step(self, action):
action = action.copy()
index = action.pop('action')
action.update(self._action_values[index])
action = self._action(action)
if action['reset']:
obs = self._reset()
else:
following = self.NOOP.copy()
for key in ('attack', 'forward', 'back', 'left', 'right'):
following[key] = action[key]
for act in [action] + ([following] * (self._repeat - 1)):
obs = self._env.step(act)
if self._env.info and 'error' in self._env.info:
obs = self._reset()
break
obs = self._obs(obs)
self._step += 1
assert 'pov' not in obs, list(obs.keys())
return obs
@property
def inventory(self):
return self._inventory
def _reset(self):
with self.LOCK:
obs = self._env.step({'reset': True})
self._step = 0
self._max_inventory = None
self._sticky_attack_counter = 0
self._sticky_jump_counter = 0
self._pitch = 0
self._inventory = {}
return obs
def _obs(self, obs):
obs['inventory/log'] += obs.pop('inventory/log2')
self._inventory = {
k.split('/', 1)[1]: obs[k] for k in self._inv_keys
if k != 'inventory/air'}
inventory = np.array([obs[k] for k in self._inv_keys], np.float32)
if self._max_inventory is None:
self._max_inventory = inventory
else:
self._max_inventory = np.maximum(self._max_inventory, inventory)
index = self._equip_enum.index(obs['equipped_items/mainhand/type'])
equipped = np.zeros(len(self._equip_enum), np.float32)
equipped[index] = 1.0
# player_x = obs['location_stats/xpos']
# player_y = obs['location_stats/ypos']
# player_z = obs['location_stats/zpos']
obs = {
'image': obs['pov'],
'inventory': inventory,
'inventory_max': self._max_inventory.copy(),
'equipped': equipped,
'health': np.float32(obs['life_stats/life'] / 20),
'hunger': np.float32(obs['life_stats/food'] / 20),
'breath': np.float32(obs['life_stats/air'] / 300),
'reward': np.float32(0.0),
'is_first': obs['is_first'],
'is_last': obs['is_last'],
'is_terminal': obs['is_terminal'],
**{f'log/{k}': np.int64(obs[k]) for k in self._inv_log_keys},
# 'log/player_pos': np.array([player_x, player_y, player_z], np.float32),
}
for key, value in obs.items():
space = self._obs_space[key]
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value in space, (key, value, value.dtype, value.shape, space)
return obs
def _action(self, action):
if self._sticky_attack_length:
if action['attack']:
self._sticky_attack_counter = self._sticky_attack_length
if self._sticky_attack_counter > 0:
action['attack'] = 1
action['jump'] = 0
self._sticky_attack_counter -= 1
if self._sticky_jump_length:
if action['jump']:
self._sticky_jump_counter = self._sticky_jump_length
if self._sticky_jump_counter > 0:
action['jump'] = 1
action['forward'] = 1
self._sticky_jump_counter -= 1
if self._pitch_limit and action['camera'][0]:
lo, hi = self._pitch_limit
if not (lo <= self._pitch + action['camera'][0] <= hi):
action['camera'] = (0, action['camera'][1])
self._pitch += action['camera'][0]
return action
def _insert_defaults(self, actions):
actions = {name: action.copy() for name, action in actions.items()}
for key, default in self.NOOP.items():
for action in actions.values():
if key not in action:
action[key] = default
return actions
class MineRLEnv(EnvSpec):
def __init__(self, resolution=(64, 64), break_speed=50):
self.resolution = resolution
self.break_speed = break_speed
super().__init__(name='MineRLEnv-v1')
def create_agent_start(self):
return [BreakSpeedMultiplier(self.break_speed)]
def create_agent_handlers(self):
return []
def create_server_world_generators(self):
return [handlers.DefaultWorldGenerator(force_reset=True)]
def create_server_quit_producers(self):
return [handlers.ServerQuitWhenAnyAgentFinishes()]
def create_server_initial_conditions(self):
return [
handlers.TimeInitialCondition(
allow_passage_of_time=True, start_time=0),
handlers.SpawningInitialCondition(allow_spawning=True),
]
def create_observables(self):
return [
handlers.POVObservation(self.resolution),
handlers.FlatInventoryObservation(mc.ALL_ITEMS),
handlers.EquippedItemObservation(
mc.ALL_ITEMS, _default='air', _other='other'),
handlers.ObservationFromCurrentLocation(),
handlers.ObservationFromLifeStats(),
]
def create_actionables(self):
kw = dict(_other='none', _default='none')
return [
handlers.KeybasedCommandAction('forward', INVERSE_KEYMAP['forward']),
handlers.KeybasedCommandAction('back', INVERSE_KEYMAP['back']),
handlers.KeybasedCommandAction('left', INVERSE_KEYMAP['left']),
handlers.KeybasedCommandAction('right', INVERSE_KEYMAP['right']),
handlers.KeybasedCommandAction('jump', INVERSE_KEYMAP['jump']),
handlers.KeybasedCommandAction('sneak', INVERSE_KEYMAP['sneak']),
handlers.KeybasedCommandAction('attack', INVERSE_KEYMAP['attack']),
handlers.CameraAction(),
handlers.PlaceBlock(['none'] + mc.ALL_ITEMS, **kw),
handlers.EquipAction(['none'] + mc.ALL_ITEMS, **kw),
handlers.CraftAction(['none'] + mc.ALL_ITEMS, **kw),
handlers.CraftNearbyAction(['none'] + mc.ALL_ITEMS, **kw),
handlers.SmeltItemNearby(['none'] + mc.ALL_ITEMS, **kw),
]
def is_from_folder(self, folder):
return folder == 'none'
def get_docstring(self):
return ''
def determine_success_from_rewards(self, rewards):
return True
def create_rewardables(self):
return []
def create_server_decorators(self):
return []
def create_mission_handlers(self):
return []
def create_monitors(self):
return []
class BreakSpeedMultiplier(handler.Handler):
def __init__(self, multiplier=1.0):
self.multiplier = multiplier
def to_string(self):
return f'break_speed({self.multiplier})'
def xml_template(self):
return '<BreakSpeedMultiplier>{{multiplier}}</BreakSpeedMultiplier>'