repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/gym_envs/__init__.py | motion_imitation/envs/gym_envs/__init__.py | """Setup such that environment can be created using gym.make()."""
from motion_imitation.envs.gym_envs.a1_gym_env import A1GymEnv
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/heightfield_randomizer.py | motion_imitation/envs/utilities/heightfield_randomizer.py | """Generates a random bumpy terrain at environment reset."""
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
class HeightfieldRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Generates an uneven terrain in the gym env."""
def __init__(self, max_height_perturbation=.05):
"""Initializes the randomizer.
Args:
max_height_perturbation: Max height of bumps in meters.
"""
self._max_height_perturbation = max_height_perturbation
self._terrain_shape = -1
self._initial = True
self._n_rows = 128
self._n_cols = 128
self._heightfield_data = [0] * self._n_rows * self._n_cols
self.terrain = None
def randomize_env(self, env):
for j in range(int(self._n_rows / 2)):
for i in range(int(self._n_cols / 2)):
height = np.random.uniform(0, self._max_height_perturbation)
self._heightfield_data[2 * i + 2 * j * self._n_rows] = height
self._heightfield_data[2 * i + 1 + 2 * j * self._n_rows] = height
self._heightfield_data[2 * i + (2 * j + 1) * self._n_rows] = height
self._heightfield_data[2 * i + 1 + (2 * j + 1) * self._n_rows] = height
# Rendering while loading is slow.
if env.rendering_enabled:
env.pybullet_client.configureDebugVisualizer(
env.pybullet_client.COV_ENABLE_RENDERING, 0)
self._terrain_shape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
flags=env.pybullet_client.GEOM_CONCAVE_INTERNAL_EDGE,
meshScale=[.15, .15, 1],
heightfieldData=self._heightfield_data,
numHeightfieldRows=self._n_rows,
numHeightfieldColumns=self._n_cols,
replaceHeightfieldIndex=self._terrain_shape)
if self._initial:
env.pybullet_client.removeBody(env.get_ground())
self.terrain = env.pybullet_client.createMultiBody(0, self._terrain_shape)
env.set_ground(self.terrain)
self._initial = False
texture_id = env.pybullet_client.loadTexture("checker_blue.png")
env.pybullet_client.changeVisualShape(
self.terrain, -1, textureUniqueId=texture_id, rgbaColor=(1, 1, 1, 1))
# Center terrain under robot in case robot is resetting in place.
x, y, _ = env.robot.GetBasePosition()
env.pybullet_client.resetBasePositionAndOrientation(self.terrain, [x, y, 0],
[0, 0, 0, 1])
if env.rendering_enabled:
env.pybullet_client.configureDebugVisualizer(
env.pybullet_client.COV_ENABLE_RENDERING, 1)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/fallen_robot_randomizer.py | motion_imitation/envs/utilities/fallen_robot_randomizer.py | """Drops the robot with random orientation at episode start."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
from motion_imitation.envs.utilities import env_randomizer_base
class FallenRobotRandomizer(env_randomizer_base.EnvRandomizerBase):
def __init__(self, max_roll_pitch=np.pi):
self._max_roll_pitch = max_roll_pitch
def randomize_env(self, env):
env.robot.ResetPose(add_constraint=False)
sampled_orientation = np.random.uniform(
low=[-self._max_roll_pitch, -self._max_roll_pitch, -np.pi],
high=[self._max_roll_pitch, self._max_roll_pitch, np.pi])
env.pybullet_client.resetBasePositionAndOrientation(
bodyUniqueId=env.robot.quadruped,
posObj=[0, 0, np.random.uniform(low=.3, high=.8)],
ornObj=env.pybullet_client.getQuaternionFromEuler(sampled_orientation))
for _ in range(1000):
env.pybullet_client.stepSimulation()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/env_randomizer_base.py | motion_imitation/envs/utilities/env_randomizer_base.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for environment randomizer."""
import abc
class EnvRandomizerBase(object):
"""Abstract base class for environment randomizer.
Randomizes physical parameters of the objects in the simulation and adds
perturbations to the stepping of the simulation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def randomize_env(self, env):
"""Randomize the simulated_objects in the environment.
Will be called at when env is reset. The physical parameters will be fixed
for that episode and be randomized again in the next environment.reset().
Args:
env: The Minitaur gym environment to be randomized.
"""
pass
def randomize_step(self, env):
"""Randomize simulation steps.
Will be called at every timestep. May add random forces/torques to Minitaur.
Args:
env: The Minitaur gym environment to be randomized.
"""
pass
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/minitaur_env_randomizer.py | motion_imitation/envs/utilities/minitaur_env_randomizer.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Randomize the minitaur_gym_env when reset() is called."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import random
import numpy as np
from six.moves import range
from motion_imitation.envs.utilities import env_randomizer_base
# Relative range.
MINITAUR_BASE_MASS_ERROR_RANGE = (-0.2, 0.2) # 0.2 means 20%
MINITAUR_LEG_MASS_ERROR_RANGE = (-0.2, 0.2) # 0.2 means 20%
# Absolute range.
BATTERY_VOLTAGE_RANGE = (14.8, 16.8) # Unit: Volt
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01) # Unit:N*m*s/rad (torque/angular vel)
MINITAUR_LEG_FRICTION = (0.8, 1.5) # Unit: dimensionless
class MinitaurEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that change the minitaur_gym_env during every reset."""
def __init__(self,
minitaur_base_mass_err_range=MINITAUR_BASE_MASS_ERROR_RANGE,
minitaur_leg_mass_err_range=MINITAUR_LEG_MASS_ERROR_RANGE,
battery_voltage_range=BATTERY_VOLTAGE_RANGE,
motor_viscous_damping_range=MOTOR_VISCOUS_DAMPING_RANGE):
self._minitaur_base_mass_err_range = minitaur_base_mass_err_range
self._minitaur_leg_mass_err_range = minitaur_leg_mass_err_range
self._battery_voltage_range = battery_voltage_range
self._motor_viscous_damping_range = motor_viscous_damping_range
def randomize_env(self, env):
self._randomize_minitaur(env.minitaur)
def _randomize_minitaur(self, minitaur):
"""Randomize various physical properties of minitaur.
It randomizes the mass/inertia of the base, mass/inertia of the legs,
friction coefficient of the feet, the battery voltage and the motor
damping at each reset() of the environment.
Args:
minitaur: the Minitaur instance in minitaur_gym_env environment.
"""
base_mass = minitaur.GetBaseMassesFromURDF()
randomized_base_mass = random.uniform(
np.array(base_mass) * (1.0 + self._minitaur_base_mass_err_range[0]),
np.array(base_mass) * (1.0 + self._minitaur_base_mass_err_range[1]))
minitaur.SetBaseMasses(randomized_base_mass)
leg_masses = minitaur.GetLegMassesFromURDF()
leg_masses_lower_bound = np.array(leg_masses) * (
1.0 + self._minitaur_leg_mass_err_range[0])
leg_masses_upper_bound = np.array(leg_masses) * (
1.0 + self._minitaur_leg_mass_err_range[1])
randomized_leg_masses = [
np.random.uniform(leg_masses_lower_bound[i], leg_masses_upper_bound[i])
for i in range(len(leg_masses))
]
minitaur.SetLegMasses(randomized_leg_masses)
randomized_battery_voltage = random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
minitaur.SetBatteryVoltage(randomized_battery_voltage)
randomized_motor_damping = random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
minitaur.SetMotorViscousDamping(randomized_motor_damping)
randomized_foot_friction = random.uniform(MINITAUR_LEG_FRICTION[0],
MINITAUR_LEG_FRICTION[1])
minitaur.SetFootFriction(randomized_foot_friction)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/env_utils.py | motion_imitation/envs/utilities/env_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to manipulate environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from gym import spaces
import numpy as np
def flatten_observations(observation_dict, observation_excluded=()):
"""Flattens the observation dictionary to an array.
If observation_excluded is passed in, it will still return a dictionary,
which includes all the (key, observation_dict[key]) in observation_excluded,
and ('other': the flattened array).
Args:
observation_dict: A dictionary of all the observations.
observation_excluded: A list/tuple of all the keys of the observations to be
ignored during flattening.
Returns:
An array or a dictionary of observations based on whether
observation_excluded is empty.
"""
if not isinstance(observation_excluded, (list, tuple)):
observation_excluded = [observation_excluded]
observations = []
for key, value in observation_dict.items():
if key not in observation_excluded:
observations.append(np.asarray(value).flatten())
flat_observations = np.concatenate(observations)
if not observation_excluded:
return flat_observations
else:
observation_dict_after_flatten = {"other": flat_observations}
for key in observation_excluded:
observation_dict_after_flatten[key] = observation_dict[key]
return collections.OrderedDict(
sorted(list(observation_dict_after_flatten.items())))
def flatten_observation_spaces(observation_spaces, observation_excluded=()):
"""Flattens the dictionary observation spaces to gym.spaces.Box.
If observation_excluded is passed in, it will still return a dictionary,
which includes all the (key, observation_spaces[key]) in observation_excluded,
and ('other': the flattened Box space).
Args:
observation_spaces: A dictionary of all the observation spaces.
observation_excluded: A list/tuple of all the keys of the observations to be
ignored during flattening.
Returns:
A box space or a dictionary of observation spaces based on whether
observation_excluded is empty.
"""
if not isinstance(observation_excluded, (list, tuple)):
observation_excluded = [observation_excluded]
lower_bound = []
upper_bound = []
for key, value in observation_spaces.spaces.items():
if key not in observation_excluded:
lower_bound.append(np.asarray(value.low).flatten())
upper_bound.append(np.asarray(value.high).flatten())
lower_bound = np.concatenate(lower_bound)
upper_bound = np.concatenate(upper_bound)
observation_space = spaces.Box(
np.array(lower_bound), np.array(upper_bound), dtype=np.float32)
if not observation_excluded:
return observation_space
else:
observation_spaces_after_flatten = {"other": observation_space}
for key in observation_excluded:
observation_spaces_after_flatten[key] = observation_spaces[key]
return spaces.Dict(observation_spaces_after_flatten)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/controllable_env_randomizer_from_config.py | motion_imitation/envs/utilities/controllable_env_randomizer_from_config.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A controllable environment randomizer that randomizes physical parameters from config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import copy
import functools
import numpy as np
import tensorflow as tf
from motion_imitation.envs.utilities import controllable_env_randomizer_base
from motion_imitation.envs.utilities import minitaur_env_randomizer_config
SIMULATION_TIME_STEP = 0.001
NUM_LEGS = 4
class ControllableEnvRandomizerFromConfig(
controllable_env_randomizer_base.ControllableEnvRandomizerBase):
"""A randomizer that change the minitaur_gym_env during every reset."""
def __init__(self,
config=None,
verbose=True,
param_bounds=(-1., 1.),
randomization_seed=None):
if config is None:
config = "all_params"
try:
config = getattr(minitaur_env_randomizer_config, config)
except AttributeError:
raise ValueError("Config {} is not found.".format(config))
self._randomization_param_dict = config()
tf.logging.info("Randomization config is: {}".format(
self._randomization_param_dict))
self._randomization_param_value_dict = {}
self._randomization_seed = randomization_seed
self._param_bounds = param_bounds
self._suspend_randomization = False
self._verbose = verbose
self._rejection_param_range = {}
self._np_random = np.random.RandomState()
@property
def suspend_randomization(self):
return self._suspend_randomization
@suspend_randomization.setter
def suspend_randomization(self, suspend_rand):
self._suspend_randomization = suspend_rand
@property
def randomization_seed(self):
"""Area of the square."""
return self._randomization_seed
@randomization_seed.setter
def randomization_seed(self, seed):
self._randomization_seed = seed
def _check_all_randomization_parameter_in_rejection_range(self):
"""Check if current randomized parameters are in the region to be rejected."""
for param_name, reject_random_range in sorted(
self._rejection_param_range.items()):
randomized_value = self._randomization_param_value_dict[param_name]
if np.any(randomized_value < reject_random_range[0]) or np.any(
randomized_value > reject_random_range[1]):
return False
return True
def randomize_env(self, env):
"""Randomize various physical properties of the environment.
It randomizes the physical parameters according to the input configuration.
Args:
env: A minitaur gym environment.
"""
if not self.suspend_randomization:
# Use a specific seed for controllable randomization.
if self._randomization_seed is not None:
self._np_random.seed(self._randomization_seed)
self._randomization_function_dict = self._build_randomization_function_dict(
env)
self._rejection_param_range = {}
for param_name, random_range in sorted(
self._randomization_param_dict.items()):
self._randomization_function_dict[param_name](
lower_bound=random_range[0], upper_bound=random_range[1])
if len(random_range) == 4:
self._rejection_param_range[param_name] = [
random_range[2], random_range[3]
]
if self._rejection_param_range:
while self._check_all_randomization_parameter_in_rejection_range():
for param_name, random_range in sorted(
self._randomization_param_dict.items()):
self._randomization_function_dict[param_name](
lower_bound=random_range[0], upper_bound=random_range[1])
elif self._randomization_param_value_dict:
# Re-apply the randomization because hard_reset might change previously
# randomized parameters.
self.set_env_from_randomization_parameters(
env, self._randomization_param_value_dict)
def get_randomization_parameters(self):
return copy.deepcopy(self._randomization_param_value_dict)
def set_env_from_randomization_parameters(self, env,
randomization_parameters):
self._randomization_param_value_dict = randomization_parameters
# Run the randomization function to propgate the parameters.
self._randomization_function_dict = self._build_randomization_function_dict(
env)
for param_name, random_range in self._randomization_param_dict.items():
self._randomization_function_dict[param_name](
lower_bound=random_range[0],
upper_bound=random_range[1],
parameters=randomization_parameters[param_name])
def _get_robot_from_env(self, env):
if hasattr(env, "minitaur"): # Compabible with v1 envs.
return env.minitaur
elif hasattr(env, "robot"): # Compatible with v2 envs.
return env.robot
else:
return None
def _build_randomization_function_dict(self, env):
func_dict = {}
robot = self._get_robot_from_env(env)
func_dict["mass"] = functools.partial(self._randomize_masses,
minitaur=robot)
func_dict["individual mass"] = functools.partial(
self._randomize_individual_masses, minitaur=robot)
func_dict["base mass"] = functools.partial(self._randomize_basemass,
minitaur=robot)
func_dict["inertia"] = functools.partial(self._randomize_inertia,
minitaur=robot)
func_dict["individual inertia"] = functools.partial(
self._randomize_individual_inertia, minitaur=robot)
func_dict["latency"] = functools.partial(self._randomize_latency,
minitaur=robot)
func_dict["joint friction"] = functools.partial(
self._randomize_joint_friction, minitaur=robot)
func_dict["motor friction"] = functools.partial(
self._randomize_motor_friction, minitaur=robot)
func_dict["restitution"] = functools.partial(
self._randomize_contact_restitution, minitaur=robot)
func_dict["lateral friction"] = functools.partial(
self._randomize_contact_friction, minitaur=robot)
func_dict["battery"] = functools.partial(self._randomize_battery_level,
minitaur=robot)
func_dict["motor strength"] = functools.partial(
self._randomize_motor_strength, minitaur=robot)
func_dict["global motor strength"] = functools.partial(
self._randomize_global_motor_strength, minitaur=robot)
# Setting control step needs access to the environment.
func_dict["control step"] = functools.partial(self._randomize_control_step,
env=env)
func_dict["leg weaken"] = functools.partial(self._randomize_leg_weakening,
minitaur=robot)
func_dict["single leg weaken"] = functools.partial(
self._randomize_single_leg_weakening, minitaur=robot)
return func_dict
def _randomize_control_step(self,
env,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["control step"] = sample
randomized_control_step = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
randomized_control_step = int(randomized_control_step)
env.set_time_step(randomized_control_step)
if self._verbose:
tf.logging.info("control step is: {}".format(randomized_control_step))
def _randomize_masses(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform([self._param_bounds[0]] * 2,
[self._param_bounds[1]] * 2)
else:
sample = parameters
self._randomization_param_value_dict["mass"] = sample
randomized_mass_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
base_mass = minitaur.GetBaseMassesFromURDF()
random_base_ratio = randomized_mass_ratios[0]
randomized_base_mass = random_base_ratio * np.array(base_mass)
minitaur.SetBaseMasses(randomized_base_mass)
if self._verbose:
tf.logging.info("base mass is: {}".format(randomized_base_mass))
leg_masses = minitaur.GetLegMassesFromURDF()
random_leg_ratio = randomized_mass_ratios[1]
randomized_leg_masses = random_leg_ratio * np.array(leg_masses)
minitaur.SetLegMasses(randomized_leg_masses)
if self._verbose:
tf.logging.info("leg mass is: {}".format(randomized_leg_masses))
def _randomize_individual_masses(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
base_mass = minitaur.GetBaseMassesFromURDF()
leg_masses = minitaur.GetLegMassesFromURDF()
param_dim = len(base_mass) + len(leg_masses)
if parameters is None:
sample = self._np_random.uniform([self._param_bounds[0]] * param_dim,
[self._param_bounds[1]] * param_dim)
else:
sample = parameters
self._randomization_param_value_dict["individual mass"] = sample
randomized_mass_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
random_base_ratio = randomized_mass_ratios[0:len(base_mass)]
randomized_base_mass = random_base_ratio * np.array(base_mass)
minitaur.SetBaseMasses(randomized_base_mass)
if self._verbose:
tf.logging.info("base mass is: {}".format(randomized_base_mass))
random_leg_ratio = randomized_mass_ratios[len(base_mass):]
randomized_leg_masses = random_leg_ratio * np.array(leg_masses)
minitaur.SetLegMasses(randomized_leg_masses)
if self._verbose:
tf.logging.info("randomization dim: {}".format(param_dim))
tf.logging.info("leg mass is: {}".format(randomized_leg_masses))
def _randomize_basemass(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["base mass"] = sample
randomized_mass_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
base_mass = minitaur.GetBaseMassesFromURDF()
random_base_ratio = randomized_mass_ratios
randomized_base_mass = random_base_ratio * np.array(base_mass)
minitaur.SetBaseMasses(randomized_base_mass)
if self._verbose:
tf.logging.info("base mass is: {}".format(randomized_base_mass))
def _randomize_individual_inertia(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
base_inertia = minitaur.GetBaseInertiasFromURDF()
leg_inertia = minitaur.GetLegInertiasFromURDF()
param_dim = (len(base_inertia) + len(leg_inertia)) * 3
if parameters is None:
sample = self._np_random.uniform([self._param_bounds[0]] * param_dim,
[self._param_bounds[1]] * param_dim)
else:
sample = parameters
self._randomization_param_value_dict["individual inertia"] = sample
randomized_inertia_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
random_base_ratio = np.reshape(
randomized_inertia_ratios[0:len(base_inertia) * 3],
(len(base_inertia), 3))
randomized_base_inertia = random_base_ratio * np.array(base_inertia)
minitaur.SetBaseInertias(randomized_base_inertia)
if self._verbose:
tf.logging.info("base inertia is: {}".format(randomized_base_inertia))
random_leg_ratio = np.reshape(
randomized_inertia_ratios[len(base_inertia) * 3:],
(len(leg_inertia), 3))
randomized_leg_inertia = random_leg_ratio * np.array(leg_inertia)
minitaur.SetLegInertias(randomized_leg_inertia)
if self._verbose:
tf.logging.info("leg inertia is: {}".format(randomized_leg_inertia))
def _randomize_inertia(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform([self._param_bounds[0]] * 2,
[self._param_bounds[1]] * 2)
else:
sample = parameters
self._randomization_param_value_dict["inertia"] = sample
randomized_inertia_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
base_inertia = minitaur.GetBaseInertiasFromURDF()
random_base_ratio = randomized_inertia_ratios[0]
randomized_base_inertia = random_base_ratio * np.array(base_inertia)
minitaur.SetBaseInertias(randomized_base_inertia)
if self._verbose:
tf.logging.info("base inertia is: {}".format(randomized_base_inertia))
leg_inertia = minitaur.GetLegInertiasFromURDF()
random_leg_ratio = randomized_inertia_ratios[1]
randomized_leg_inertia = random_leg_ratio * np.array(leg_inertia)
minitaur.SetLegInertias(randomized_leg_inertia)
if self._verbose:
tf.logging.info("leg inertia is: {}".format(randomized_leg_inertia))
def _randomize_latency(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["latency"] = sample
randomized_latency = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetControlLatency(randomized_latency)
if self._verbose:
tf.logging.info("control latency is: {}".format(randomized_latency))
def _randomize_joint_friction(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
num_knee_joints = minitaur.GetNumKneeJoints()
if parameters is None:
sample = self._np_random.uniform(
[self._param_bounds[0]] * num_knee_joints,
[self._param_bounds[1]] * num_knee_joints)
else:
sample = parameters
self._randomization_param_value_dict["joint friction"] = sample
randomized_joint_frictions = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetJointFriction(randomized_joint_frictions)
if self._verbose:
tf.logging.info(
"joint friction is: {}".format(randomized_joint_frictions))
def _randomize_motor_friction(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["motor friction"] = sample
randomized_motor_damping = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetMotorViscousDamping(randomized_motor_damping)
if self._verbose:
tf.logging.info("motor friction is: {}".format(randomized_motor_damping))
def _randomize_contact_restitution(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["restitution"] = sample
randomized_restitution = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetFootRestitution(randomized_restitution)
if self._verbose:
tf.logging.info("foot restitution is: {}".format(randomized_restitution))
def _randomize_contact_friction(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["lateral friction"] = sample
randomized_foot_friction = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetFootFriction(randomized_foot_friction)
if self._verbose:
tf.logging.info("foot friction is: {}".format(randomized_foot_friction))
def _randomize_battery_level(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["battery"] = sample
randomized_battery_voltage = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetBatteryVoltage(randomized_battery_voltage)
if self._verbose:
tf.logging.info(
"battery voltage is: {}".format(randomized_battery_voltage))
def _randomize_global_motor_strength(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
sample = parameters
self._randomization_param_value_dict["global motor strength"] = sample
randomized_motor_strength_ratio = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetMotorStrengthRatios([randomized_motor_strength_ratio] *
minitaur.num_motors)
if self._verbose:
tf.logging.info("global motor strength is: {}".format(
randomized_motor_strength_ratio))
def _randomize_motor_strength(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
if parameters is None:
sample = self._np_random.uniform(
[self._param_bounds[0]] * minitaur.num_motors,
[self._param_bounds[1]] * minitaur.num_motors)
else:
sample = parameters
self._randomization_param_value_dict["motor strength"] = sample
randomized_motor_strength_ratios = (sample - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
minitaur.SetMotorStrengthRatios(randomized_motor_strength_ratios)
if self._verbose:
tf.logging.info(
"motor strength is: {}".format(randomized_motor_strength_ratios))
def _randomize_leg_weakening(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
motor_per_leg = int(minitaur.num_motors / NUM_LEGS)
if parameters is None:
# First choose which leg to weaken
leg_to_weaken = self._np_random.randint(NUM_LEGS)
# Choose what ratio to randomize
normalized_ratio = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
sample = [leg_to_weaken, normalized_ratio]
else:
sample = [parameters[0], parameters[1]]
leg_to_weaken = sample[0]
normalized_ratio = sample[1]
self._randomization_param_value_dict["leg weaken"] = sample
leg_weaken_ratio = (normalized_ratio - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
motor_strength_ratios = np.ones(minitaur.num_motors)
motor_strength_ratios[leg_to_weaken * motor_per_leg:(leg_to_weaken + 1) *
motor_per_leg] = leg_weaken_ratio
minitaur.SetMotorStrengthRatios(motor_strength_ratios)
if self._verbose:
tf.logging.info("weakening leg {} with ratio: {}".format(
leg_to_weaken, leg_weaken_ratio))
def _randomize_single_leg_weakening(self,
minitaur,
lower_bound,
upper_bound,
parameters=None):
motor_per_leg = int(minitaur.num_motors / NUM_LEGS)
leg_to_weaken = 0
if parameters is None:
# Choose what ratio to randomize
normalized_ratio = self._np_random.uniform(self._param_bounds[0],
self._param_bounds[1])
else:
normalized_ratio = parameters
self._randomization_param_value_dict["single leg weaken"] = normalized_ratio
leg_weaken_ratio = (normalized_ratio - self._param_bounds[0]) / (
self._param_bounds[1] -
self._param_bounds[0]) * (upper_bound - lower_bound) + lower_bound
motor_strength_ratios = np.ones(minitaur.num_motors)
motor_strength_ratios[leg_to_weaken * motor_per_leg:(leg_to_weaken + 1) *
motor_per_leg] = leg_weaken_ratio
minitaur.SetMotorStrengthRatios(motor_strength_ratios)
if self._verbose:
tf.logging.info("weakening leg {} with ratio: {}".format(
leg_to_weaken, leg_weaken_ratio))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/minitaur_env_randomizer_config.py | motion_imitation/envs/utilities/minitaur_env_randomizer_config.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A config file for parameters and their ranges in dynamics randomization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def all_params():
"""Randomize all the physical parameters."""
param_range = {
# The following ranges are in percentage. e.g. 0.8 means 80%.
"mass": [0.8, 1.2],
"inertia": [0.5, 1.5],
"motor strength": [0.8, 1.2],
# The following ranges are the physical values, in SI unit.
"motor friction": [0, 0.05], # Viscous damping (Nm s/rad).
"latency": [0.0, 0.04], # Time inteval (s).
"lateral friction": [0.5, 1.25], # Friction coefficient (dimensionless).
"battery": [14.0, 16.8], # Voltage (V).
"joint friction": [0, 0.05], # Coulomb friction torque (Nm).
}
return param_range
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/controllable_env_randomizer_base.py | motion_imitation/envs/utilities/controllable_env_randomizer_base.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for controllable environment randomizer."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from motion_imitation.envs.utilities import env_randomizer_base
class ControllableEnvRandomizerBase(env_randomizer_base.EnvRandomizerBase):
"""Base class for environment randomizer that can be manipulated explicitly.
Randomizes physical parameters of the objects in the simulation and adds
perturbations to the stepping of the simulation.
"""
def get_randomization_parameters(self):
"""Get the parameters of the randomization."""
raise NotImplementedError
def set_randomization_from_parameters(self, env, randomization_parameters):
"""Set the parameters of the randomization."""
raise NotImplementedError
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/__init__.py | motion_imitation/envs/utilities/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/utilities/minitaur_env_randomizer_from_config.py | motion_imitation/envs/utilities/minitaur_env_randomizer_from_config.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""An environment randomizer that randomizes physical parameters from config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import functools
import random
import numpy as np
import six
import tensorflow.compat.v1 as tf
from motion_imitation.envs.utilities import env_randomizer_base
from motion_imitation.envs.utilities import minitaur_env_randomizer_config
SIMULATION_TIME_STEP = 0.001
class MinitaurEnvRandomizerFromConfig(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that change the minitaur_gym_env during every reset."""
def __init__(self, config=None):
if config is None:
config = "all_params"
try:
config = getattr(minitaur_env_randomizer_config, config)
except AttributeError:
raise ValueError("Config {} is not found.".format(config))
self._randomization_param_dict = config()
tf.logging.info("Randomization config is: {}".format(
self._randomization_param_dict))
def randomize_env(self, env):
"""Randomize various physical properties of the environment.
It randomizes the physical parameters according to the input configuration.
Args:
env: A minitaur gym environment.
"""
self._randomization_function_dict = self._build_randomization_function_dict(
env)
for param_name, random_range in six.iteritems(
self._randomization_param_dict):
self._randomization_function_dict[param_name](
lower_bound=random_range[0], upper_bound=random_range[1])
def _get_robot_from_env(self, env):
if hasattr(env, "minitaur"): # Compabible with v1 envs.
return env.minitaur
elif hasattr(env, "robot"): # Compatible with v2 envs.
return env.robot
else:
return None
def _build_randomization_function_dict(self, env):
func_dict = {}
robot = self._get_robot_from_env(env)
func_dict["mass"] = functools.partial(
self._randomize_masses, minitaur=robot)
func_dict["inertia"] = functools.partial(
self._randomize_inertia, minitaur=robot)
func_dict["latency"] = functools.partial(
self._randomize_latency, minitaur=robot)
func_dict["joint friction"] = functools.partial(
self._randomize_joint_friction, minitaur=robot)
func_dict["motor friction"] = functools.partial(
self._randomize_motor_friction, minitaur=robot)
func_dict["restitution"] = functools.partial(
self._randomize_contact_restitution, minitaur=robot)
func_dict["lateral friction"] = functools.partial(
self._randomize_contact_friction, minitaur=robot)
func_dict["battery"] = functools.partial(
self._randomize_battery_level, minitaur=robot)
func_dict["motor strength"] = functools.partial(
self._randomize_motor_strength, minitaur=robot)
# Settinmg control step needs access to the environment.
func_dict["control step"] = functools.partial(
self._randomize_control_step, env=env)
return func_dict
def _randomize_control_step(self, env, lower_bound, upper_bound):
randomized_control_step = random.uniform(lower_bound, upper_bound)
env.set_time_step(randomized_control_step)
tf.logging.info("control step is: {}".format(randomized_control_step))
def _randomize_masses(self, minitaur, lower_bound, upper_bound):
base_mass = minitaur.GetBaseMassesFromURDF()
random_base_ratio = random.uniform(lower_bound, upper_bound)
randomized_base_mass = random_base_ratio * np.array(base_mass)
minitaur.SetBaseMasses(randomized_base_mass)
tf.logging.info("base mass is: {}".format(randomized_base_mass))
leg_masses = minitaur.GetLegMassesFromURDF()
random_leg_ratio = random.uniform(lower_bound, upper_bound)
randomized_leg_masses = random_leg_ratio * np.array(leg_masses)
minitaur.SetLegMasses(randomized_leg_masses)
tf.logging.info("leg mass is: {}".format(randomized_leg_masses))
def _randomize_inertia(self, minitaur, lower_bound, upper_bound):
base_inertia = minitaur.GetBaseInertiasFromURDF()
random_base_ratio = random.uniform(lower_bound, upper_bound)
randomized_base_inertia = random_base_ratio * np.array(base_inertia)
minitaur.SetBaseInertias(randomized_base_inertia)
tf.logging.info("base inertia is: {}".format(randomized_base_inertia))
leg_inertia = minitaur.GetLegInertiasFromURDF()
random_leg_ratio = random.uniform(lower_bound, upper_bound)
randomized_leg_inertia = random_leg_ratio * np.array(leg_inertia)
minitaur.SetLegInertias(randomized_leg_inertia)
tf.logging.info("leg inertia is: {}".format(randomized_leg_inertia))
def _randomize_latency(self, minitaur, lower_bound, upper_bound):
randomized_latency = random.uniform(lower_bound, upper_bound)
minitaur.SetControlLatency(randomized_latency)
tf.logging.info("control latency is: {}".format(randomized_latency))
def _randomize_joint_friction(self, minitaur, lower_bound, upper_bound):
num_knee_joints = minitaur.GetNumKneeJoints()
randomized_joint_frictions = np.random.uniform(
[lower_bound] * num_knee_joints, [upper_bound] * num_knee_joints)
minitaur.SetJointFriction(randomized_joint_frictions)
tf.logging.info("joint friction is: {}".format(randomized_joint_frictions))
def _randomize_motor_friction(self, minitaur, lower_bound, upper_bound):
randomized_motor_damping = random.uniform(lower_bound, upper_bound)
minitaur.SetMotorViscousDamping(randomized_motor_damping)
tf.logging.info("motor friction is: {}".format(randomized_motor_damping))
def _randomize_contact_restitution(self, minitaur, lower_bound, upper_bound):
randomized_restitution = random.uniform(lower_bound, upper_bound)
minitaur.SetFootRestitution(randomized_restitution)
tf.logging.info("foot restitution is: {}".format(randomized_restitution))
def _randomize_contact_friction(self, minitaur, lower_bound, upper_bound):
randomized_foot_friction = random.uniform(lower_bound, upper_bound)
minitaur.SetFootFriction(randomized_foot_friction)
tf.logging.info("foot friction is: {}".format(randomized_foot_friction))
def _randomize_battery_level(self, minitaur, lower_bound, upper_bound):
randomized_battery_voltage = random.uniform(lower_bound, upper_bound)
minitaur.SetBatteryVoltage(randomized_battery_voltage)
tf.logging.info("battery voltage is: {}".format(randomized_battery_voltage))
def _randomize_motor_strength(self, minitaur, lower_bound, upper_bound):
randomized_motor_strength_ratios = np.random.uniform(
[lower_bound] * minitaur.num_motors,
[upper_bound] * minitaur.num_motors)
minitaur.SetMotorStrengthRatios(randomized_motor_strength_ratios)
tf.logging.info(
"motor strength is: {}".format(randomized_motor_strength_ratios))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/robot_sensors.py | motion_imitation/envs/sensors/robot_sensors.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sensors related to the robot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import typing
from robots import minitaur_pose_utils
from motion_imitation.envs.sensors import sensor
_ARRAY = typing.Iterable[float] #pylint: disable=invalid-name
_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY] #pylint: disable=invalid-name
_DATATYPE_LIST = typing.Iterable[typing.Any] #pylint: disable=invalid-name
class MotorAngleSensor(sensor.BoxSpaceSensor):
"""A sensor that reads motor angles from the robot."""
def __init__(self,
num_motors: int,
noisy_reading: bool = True,
observe_sine_cosine: bool = False,
lower_bound: _FLOAT_OR_ARRAY = -np.pi,
upper_bound: _FLOAT_OR_ARRAY = np.pi,
name: typing.Text = "MotorAngle",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs MotorAngleSensor.
Args:
num_motors: the number of motors in the robot
noisy_reading: whether values are true observations
observe_sine_cosine: whether to convert readings to sine/cosine values for
continuity
lower_bound: the lower bound of the motor angle
upper_bound: the upper bound of the motor angle
name: the name of the sensor
dtype: data type of sensor value
"""
self._num_motors = num_motors
self._noisy_reading = noisy_reading
self._observe_sine_cosine = observe_sine_cosine
if observe_sine_cosine:
super(MotorAngleSensor, self).__init__(
name=name,
shape=(self._num_motors * 2,),
lower_bound=-np.ones(self._num_motors * 2),
upper_bound=np.ones(self._num_motors * 2),
dtype=dtype)
else:
super(MotorAngleSensor, self).__init__(
name=name,
shape=(self._num_motors,),
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
def _get_observation(self) -> _ARRAY:
if self._noisy_reading:
motor_angles = self._robot.GetMotorAngles()
else:
motor_angles = self._robot.GetTrueMotorAngles()
if self._observe_sine_cosine:
return np.hstack((np.cos(motor_angles), np.sin(motor_angles)))
else:
return motor_angles
class MinitaurLegPoseSensor(sensor.BoxSpaceSensor):
"""A sensor that reads leg_pose from the Minitaur robot."""
def __init__(self,
num_motors: int,
noisy_reading: bool = True,
observe_sine_cosine: bool = False,
lower_bound: _FLOAT_OR_ARRAY = -np.pi,
upper_bound: _FLOAT_OR_ARRAY = np.pi,
name: typing.Text = "MinitaurLegPose",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs MinitaurLegPoseSensor.
Args:
num_motors: the number of motors in the robot
noisy_reading: whether values are true observations
observe_sine_cosine: whether to convert readings to sine/cosine values for
continuity
lower_bound: the lower bound of the motor angle
upper_bound: the upper bound of the motor angle
name: the name of the sensor
dtype: data type of sensor value
"""
self._num_motors = num_motors
self._noisy_reading = noisy_reading
self._observe_sine_cosine = observe_sine_cosine
if observe_sine_cosine:
super(MinitaurLegPoseSensor, self).__init__(
name=name,
shape=(self._num_motors * 2,),
lower_bound=-np.ones(self._num_motors * 2),
upper_bound=np.ones(self._num_motors * 2),
dtype=dtype)
else:
super(MinitaurLegPoseSensor, self).__init__(
name=name,
shape=(self._num_motors,),
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
def _get_observation(self) -> _ARRAY:
motor_angles = (
self._robot.GetMotorAngles()
if self._noisy_reading else self._robot.GetTrueMotorAngles())
leg_pose = minitaur_pose_utils.motor_angles_to_leg_pose(motor_angles)
if self._observe_sine_cosine:
return np.hstack((np.cos(leg_pose), np.sin(leg_pose)))
else:
return leg_pose
class BaseDisplacementSensor(sensor.BoxSpaceSensor):
"""A sensor that reads displacement of robot base."""
def __init__(self,
lower_bound: _FLOAT_OR_ARRAY = -0.1,
upper_bound: _FLOAT_OR_ARRAY = 0.1,
convert_to_local_frame: bool = False,
name: typing.Text = "BaseDisplacement",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs BaseDisplacementSensor.
Args:
lower_bound: the lower bound of the base displacement
upper_bound: the upper bound of the base displacement
convert_to_local_frame: whether to project dx, dy to local frame based on
robot's current yaw angle. (Note that it's a projection onto 2D plane,
and the roll, pitch of the robot is not considered.)
name: the name of the sensor
dtype: data type of sensor value
"""
self._channels = ["x", "y", "z"]
self._num_channels = len(self._channels)
super(BaseDisplacementSensor, self).__init__(
name=name,
shape=(self._num_channels,),
lower_bound=np.array([lower_bound] * 3),
upper_bound=np.array([upper_bound] * 3),
dtype=dtype)
datatype = [("{}_{}".format(name, channel), self._dtype)
for channel in self._channels]
self._datatype = datatype
self._convert_to_local_frame = convert_to_local_frame
self._last_yaw = 0
self._last_base_position = np.zeros(3)
self._current_yaw = 0
self._current_base_position = np.zeros(3)
def get_channels(self) -> typing.Iterable[typing.Text]:
"""Returns channels (displacement in x, y, z direction)."""
return self._channels
def get_num_channels(self) -> int:
"""Returns number of channels."""
return self._num_channels
def get_observation_datatype(self) -> _DATATYPE_LIST:
"""See base class."""
return self._datatype
def _get_observation(self) -> _ARRAY:
"""See base class."""
dx, dy, dz = self._current_base_position - self._last_base_position
if self._convert_to_local_frame:
dx_local = np.cos(self._last_yaw) * dx + np.sin(self._last_yaw) * dy
dy_local = -np.sin(self._last_yaw) * dx + np.cos(self._last_yaw) * dy
return np.array([dx_local, dy_local, dz])
else:
return np.array([dx, dy, dz])
def on_reset(self, env):
"""See base class."""
self._current_base_position = np.array(self._robot.GetBasePosition())
self._last_base_position = np.array(self._robot.GetBasePosition())
self._current_yaw = self._robot.GetBaseRollPitchYaw()[2]
self._last_yaw = self._robot.GetBaseRollPitchYaw()[2]
def on_step(self, env):
"""See base class."""
self._last_base_position = self._current_base_position
self._current_base_position = np.array(self._robot.GetBasePosition())
self._last_yaw = self._current_yaw
self._current_yaw = self._robot.GetBaseRollPitchYaw()[2]
class IMUSensor(sensor.BoxSpaceSensor):
"""An IMU sensor that reads orientations and angular velocities."""
def __init__(self,
channels: typing.Iterable[typing.Text] = None,
noisy_reading: bool = True,
lower_bound: _FLOAT_OR_ARRAY = None,
upper_bound: _FLOAT_OR_ARRAY = None,
name: typing.Text = "IMU",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs IMUSensor.
It generates separate IMU value channels, e.g. IMU_R, IMU_P, IMU_dR, ...
Args:
channels: value channels wants to subscribe. A upper letter represents
orientation and a lower letter represents angular velocity. (e.g. ['R',
'P', 'Y', 'dR', 'dP', 'dY'] or ['R', 'P', 'dR', 'dP'])
noisy_reading: whether values are true observations
lower_bound: the lower bound IMU values
(default: [-2pi, -2pi, -2000pi, -2000pi])
upper_bound: the lower bound IMU values
(default: [2pi, 2pi, 2000pi, 2000pi])
name: the name of the sensor
dtype: data type of sensor value
"""
self._channels = channels if channels else ["R", "P", "dR", "dP"]
self._num_channels = len(self._channels)
self._noisy_reading = noisy_reading
# Compute the default lower and upper bounds
if lower_bound is None and upper_bound is None:
lower_bound = []
upper_bound = []
for channel in self._channels:
if channel in ["R", "P", "Y"]:
lower_bound.append(-2.0 * np.pi)
upper_bound.append(2.0 * np.pi)
elif channel in ["Rcos", "Rsin", "Pcos", "Psin", "Ycos", "Ysin"]:
lower_bound.append(-1.)
upper_bound.append(1.)
elif channel in ["dR", "dP", "dY"]:
lower_bound.append(-2000.0 * np.pi)
upper_bound.append(2000.0 * np.pi)
super(IMUSensor, self).__init__(
name=name,
shape=(self._num_channels,),
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
# Compute the observation_datatype
datatype = [("{}_{}".format(name, channel), self._dtype)
for channel in self._channels]
self._datatype = datatype
def get_channels(self) -> typing.Iterable[typing.Text]:
return self._channels
def get_num_channels(self) -> int:
return self._num_channels
def get_observation_datatype(self) -> _DATATYPE_LIST:
"""Returns box-shape data type."""
return self._datatype
def _get_observation(self) -> _ARRAY:
if self._noisy_reading:
rpy = self._robot.GetBaseRollPitchYaw()
drpy = self._robot.GetBaseRollPitchYawRate()
else:
rpy = self._robot.GetTrueBaseRollPitchYaw()
drpy = self._robot.GetTrueBaseRollPitchYawRate()
assert len(rpy) >= 3, rpy
assert len(drpy) >= 3, drpy
observations = np.zeros(self._num_channels)
for i, channel in enumerate(self._channels):
if channel == "R":
observations[i] = rpy[0]
if channel == "Rcos":
observations[i] = np.cos(rpy[0])
if channel == "Rsin":
observations[i] = np.sin(rpy[0])
if channel == "P":
observations[i] = rpy[1]
if channel == "Pcos":
observations[i] = np.cos(rpy[1])
if channel == "Psin":
observations[i] = np.sin(rpy[1])
if channel == "Y":
observations[i] = rpy[2]
if channel == "Ycos":
observations[i] = np.cos(rpy[2])
if channel == "Ysin":
observations[i] = np.sin(rpy[2])
if channel == "dR":
observations[i] = drpy[0]
if channel == "dP":
observations[i] = drpy[1]
if channel == "dY":
observations[i] = drpy[2]
return observations
class BasePositionSensor(sensor.BoxSpaceSensor):
"""A sensor that reads the base position of the Minitaur robot."""
def __init__(self,
lower_bound: _FLOAT_OR_ARRAY = -100,
upper_bound: _FLOAT_OR_ARRAY = 100,
name: typing.Text = "BasePosition",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs BasePositionSensor.
Args:
lower_bound: the lower bound of the base position of the robot.
upper_bound: the upper bound of the base position of the robot.
name: the name of the sensor
dtype: data type of sensor value
"""
super(BasePositionSensor, self).__init__(
name=name,
shape=(3,), # x, y, z
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
def _get_observation(self) -> _ARRAY:
return self._robot.GetBasePosition()
class PoseSensor(sensor.BoxSpaceSensor):
"""A sensor that reads the (x, y, theta) of a robot."""
def __init__(self,
lower_bound: _FLOAT_OR_ARRAY = -100,
upper_bound: _FLOAT_OR_ARRAY = 100,
name: typing.Text = "PoseSensor",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs PoseSensor.
Args:
lower_bound: the lower bound of the pose of the robot.
upper_bound: the upper bound of the pose of the robot.
name: the name of the sensor.
dtype: data type of sensor value.
"""
super(PoseSensor, self).__init__(
name=name,
shape=(3,), # x, y, orientation
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
def _get_observation(self) -> _ARRAY:
return np.concatenate((self._robot.GetBasePosition()[:2],
(self._robot.GetTrueBaseRollPitchYaw()[2],)))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/space_utils.py | motion_imitation/envs/sensors/space_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a list of sensors to gym space."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import gym
from gym import spaces
import numpy as np
import typing
from motion_imitation.envs.sensors import sensor
class UnsupportedConversionError(Exception):
"""An exception when the function cannot convert sensors to the gym space."""
class AmbiguousDataTypeError(Exception):
"""An exception when the function cannot determine the data type."""
def convert_sensors_to_gym_space(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of sensors to the corresponding gym space.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
"""
if all([
isinstance(s, sensor.BoxSpaceSensor) and s.get_dimension() == 1
for s in sensors
]):
return convert_1d_box_sensors_to_gym_space(sensors)
raise UnsupportedConversionError('sensors = ' + str(sensors))
def convert_1d_box_sensors_to_gym_space(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of 1D BoxSpaceSensors to the corresponding gym space.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
AmbiguousDataTypeError: raises when the function cannot determine the
data types because they are not uniform.
"""
# Check if all sensors are 1D BoxSpaceSensors
if not all([
isinstance(s, sensor.BoxSpaceSensor) and s.get_dimension() == 1
for s in sensors
]):
raise UnsupportedConversionError('sensors = ' + str(sensors))
# Check if all sensors have the same data type
sensor_dtypes = [s.get_dtype() for s in sensors]
if sensor_dtypes.count(sensor_dtypes[0]) != len(sensor_dtypes):
raise AmbiguousDataTypeError('sensor datatypes are inhomogeneous')
lower_bound = np.concatenate([s.get_lower_bound() for s in sensors])
upper_bound = np.concatenate([s.get_upper_bound() for s in sensors])
observation_space = spaces.Box(np.array(lower_bound),
np.array(upper_bound),
dtype=np.float32)
return observation_space
def convert_sensors_to_gym_space_dictionary(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of sensors to the corresponding gym space dictionary.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space dictionary
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
"""
gym_space_dict = {}
for s in sensors:
if isinstance(s, sensor.BoxSpaceSensor):
gym_space_dict[s.get_name()] = spaces.Box(np.array(s.get_lower_bound()),
np.array(s.get_upper_bound()),
dtype=np.float32)
else:
raise UnsupportedConversionError('sensors = ' + str(sensors))
return spaces.Dict(gym_space_dict)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/environment_sensors.py | motion_imitation/envs/sensors/environment_sensors.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sensors related to the environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import typing
from motion_imitation.envs.sensors import sensor
_ARRAY = typing.Iterable[float] # pylint:disable=invalid-name
_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY] # pylint:disable=invalid-name
_DATATYPE_LIST = typing.Iterable[typing.Any] # pylint:disable=invalid-name
class LastActionSensor(sensor.BoxSpaceSensor):
"""A sensor that reports the last action taken."""
def __init__(self,
num_actions: int,
lower_bound: _FLOAT_OR_ARRAY = -1.0,
upper_bound: _FLOAT_OR_ARRAY = 1.0,
name: typing.Text = "LastAction",
dtype: typing.Type[typing.Any] = np.float64) -> None:
"""Constructs LastActionSensor.
Args:
num_actions: the number of actions to read
lower_bound: the lower bound of the actions
upper_bound: the upper bound of the actions
name: the name of the sensor
dtype: data type of sensor value
"""
self._num_actions = num_actions
self._env = None
super(LastActionSensor, self).__init__(name=name,
shape=(self._num_actions,),
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the last action of the environment."""
return self._env.last_action
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/sensor_wrappers.py | motion_imitation/envs/sensors/sensor_wrappers.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper classes for extending sensor information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import collections
import numpy as np
import typing
from motion_imitation.envs.sensors import sensor
_ARRAY = typing.Iterable[float] # pylint: disable=invalid-name
class SensorWrapper(sensor.BoxSpaceSensor):
"""A base interface for sensor wrappers."""
def __init__(self, wrapped_sensor: sensor.BoxSpaceSensor, **kwargs) -> None:
"""A base wrapper interface.
Args:
wrapped_sensor: an inner sensor that you wrap around
**kwargs: keyword arguments to the parent class
"""
super(SensorWrapper, self).__init__(**kwargs)
self._wrapped_sensor = wrapped_sensor
def __call__(self, env):
return self._wrapped_sensor(env)
def __getattr__(self, attr):
return getattr(self._wrapped_sensor, attr)
def set_robot(self, robot) -> None:
"""Set a robot instance."""
self._wrapped_sensor.set_robot(robot)
def get_robot(self):
"""Returns the robot instance."""
return self._wrapped_sensor.get_robot()
def on_reset(self, env) -> None:
"""A callback function for the reset event.
Args:
env: the environment who invokes this callback function.
"""
self._wrapped_sensor.on_reset(env)
def on_step(self, env) -> None:
"""A callback function for the step event.
Args:
env: the environment who invokes this callback function.
"""
self._wrapped_sensor.on_step(env)
def on_terminate(self, env) -> None:
"""A callback function for the terminate event.
Args:
env: the environment who invokes this callback function.
"""
self._wrapped_sensor.on_terminate(env)
class HistoricSensorWrapper(SensorWrapper):
"""A sensor wrapper for maintaining the history of the sensor."""
def __init__(self,
wrapped_sensor: sensor.BoxSpaceSensor,
num_history: int,
append_history_axis: bool = False,
name: typing.Text = None) -> None:
"""Constructs HistoricSensorWrapper.
Note that the history begins with the recent one and becomes older. In
other world, the most recent observation is the first item in the
history buffer.
Args:
wrapped_sensor: an inner sensor that you wrap around
num_history: the history of sensors want to maintain
append_history_axis: if True, add an extra axis at the end of the
observation array for history. If False, stack the historical
observations without adding an axis.
name: label for the sensor. Defaults to HistoricSensorWrapper(<wrapped
sensor name>).
"""
self._num_history = num_history
self._append_history_axis = append_history_axis
name = name or "HistoricSensorWrapper(%s)" % wrapped_sensor.get_name()
if self._append_history_axis:
lower_bound = np.tile(
np.expand_dims(wrapped_sensor.get_lower_bound(), -1),
(1, self._num_history))
upper_bound = np.tile(
np.expand_dims(wrapped_sensor.get_upper_bound(), -1),
(1, self._num_history))
else:
lower_bound = np.tile(wrapped_sensor.get_lower_bound(),
self._num_history)
upper_bound = np.tile(wrapped_sensor.get_upper_bound(),
self._num_history)
shape = lower_bound.shape
self._history_buffer = None
super(HistoricSensorWrapper, self).__init__(name=name,
shape=shape,
lower_bound=lower_bound,
upper_bound=upper_bound,
wrapped_sensor=wrapped_sensor)
def on_reset(self, env) -> None:
"""A callback for the reset event that initializes the history buffer.
Args:
env: the environment who invokes this callback function (unused)
"""
super(HistoricSensorWrapper, self).on_reset(env)
self._history_buffer = collections.deque(maxlen=self._num_history)
for _ in range(self._num_history):
self._history_buffer.appendleft(self._wrapped_sensor.get_observation())
def on_step(self, env):
"""A callback for the step event that updates the history buffer.
Args:
env: the environment who invokes this callback function (unused)
"""
super(HistoricSensorWrapper, self).on_step(env)
self._history_buffer.appendleft(self._wrapped_sensor.get_observation())
def get_observation(self) -> _ARRAY:
"""Returns the observation by concatenating the history buffer."""
if self._append_history_axis:
return np.stack(self._history_buffer, axis=-1)
else:
return np.concatenate(self._history_buffer)
@property
def history_buffer(self):
"""Returns the raw history buffer."""
return self._history_buffer
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/sensor.py | motion_imitation/envs/sensors/sensor.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sensor prototype class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import typing
_ARRAY = typing.Iterable[float] # pylint: disable=invalid-name
_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY] # pylint: disable=invalid-name
_DATATYPE_LIST = typing.Iterable[typing.Any] # pylint: disable=invalid-name
class Sensor(object):
"""A prototype class of sensors."""
def __init__(self,
name: typing.Text):
"""A basic constructor of the sensor.
This initialized a robot as none. This instance may be regularly updated
by the environment, when it resets the simulation environment.
Args:
name: the name of the sensor
"""
self._robot = None
self._name = name
def get_name(self) -> typing.Text:
return self._name
def get_dtype(self):
pass
def get_observation_datatype(self):
"""Returns the data type for the numpy structured array.
It is recommended to define a list of tuples: (name, datatype, shape)
Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html
Ex:
return [('motor_angles', np.float64, (8, ))] # motor angle sensor
return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU
Returns:
datatype: a list of data types.
"""
pass
def get_lower_bound(self):
"""Returns the lower bound of the observation.
Returns:
lower_bound: the lower bound of sensor values in np.array format
"""
pass
def get_upper_bound(self):
"""Returns the upper bound of the observation.
Returns:
upper_bound: the upper bound of sensor values in np.array format
"""
pass
def get_observation(self):
"""Returns the observation data.
Returns:
observation: the observed sensor values in np.array format
"""
pass
def set_robot(self, robot):
"""Set a robot instance."""
self._robot = robot
def get_robot(self):
"""Returns the robot instance."""
return self._robot
def on_reset(self, env):
"""A callback function for the reset event.
Args:
env: the environment who invokes this callback function.
"""
pass
def on_step(self, env):
"""A callback function for the step event.
Args:
env: the environment who invokes this callback function.
"""
pass
def on_terminate(self, env):
"""A callback function for the terminate event.
Args:
env: the environment who invokes this callback function.
"""
pass
class BoxSpaceSensor(Sensor):
"""A prototype class of sensors with Box shapes."""
def __init__(self,
name: typing.Text,
shape: typing.Tuple[int, ...],
lower_bound: _FLOAT_OR_ARRAY = -np.pi,
upper_bound: _FLOAT_OR_ARRAY = np.pi,
dtype=np.float64) -> None:
"""Constructs a box type sensor.
Args:
name: the name of the sensor
shape: the shape of the sensor values
lower_bound: the lower_bound of sensor value, in float or np.array.
upper_bound: the upper_bound of sensor value, in float or np.array.
dtype: data type of sensor value
"""
super(BoxSpaceSensor, self).__init__(name)
self._shape = shape
self._dtype = dtype
if isinstance(lower_bound, (float, int)):
self._lower_bound = np.full(shape, lower_bound, dtype=dtype)
else:
self._lower_bound = np.array(lower_bound)
if isinstance(upper_bound, (float, int)):
self._upper_bound = np.full(shape, upper_bound, dtype=dtype)
else:
self._upper_bound = np.array(upper_bound)
def get_shape(self) -> typing.Tuple[int, ...]:
return self._shape
def get_dimension(self) -> int:
return len(self._shape)
def get_dtype(self):
pass
def get_observation_datatype(self) -> _DATATYPE_LIST:
"""Returns box-shape data type."""
return [(self._name, self._dtype, self._shape)]
def get_lower_bound(self) -> _ARRAY:
"""Returns the computed lower bound."""
return self._lower_bound
def get_upper_bound(self) -> _ARRAY:
"""Returns the computed upper bound."""
return self._upper_bound
def _get_observation(self) -> _ARRAY:
"""Returns raw observation"""
raise NotImplementedError()
def get_observation(self) -> np.ndarray:
return np.asarray(self._get_observation(), dtype=self._dtype)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/envs/sensors/__init__.py | motion_imitation/envs/sensors/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/learning/ppo_imitation.py | motion_imitation/learning/ppo_imitation.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import time
from collections import deque
import numpy as np
import tensorflow as tf
from mpi4py import MPI
from stable_baselines.common import Dataset, explained_variance, fmt_row, zipsame, ActorCriticRLModel, SetVerbosity, \
TensorboardWriter
from stable_baselines import logger
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.mpi_moments import mpi_moments
from stable_baselines.common.misc_util import flatten_lists
from stable_baselines.common.runners import traj_segment_generator
from stable_baselines.trpo_mpi.utils import add_vtarg_and_adv
from stable_baselines.ppo1 import pposgd_simple
from motion_imitation.learning.imitation_runners import traj_segment_generator
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
:param seg: (dict) the current segment of the trajectory (see traj_segment_generator return for more information)
:param gamma: (float) Discount factor
:param lam: (float) GAE factor
"""
# last element is only used for last vtarg, but we already zeroed it if last new = 1
episode_starts = np.append(seg["episode_starts"], False)
vpred = seg["vpred"]
nexvpreds = seg["nextvpreds"]
rew_len = len(seg["rewards"])
seg["adv"] = np.empty(rew_len, 'float32')
rewards = seg["rewards"]
lastgaelam = 0
for step in reversed(range(rew_len)):
nonterminal = 1 - float(episode_starts[step + 1])
delta = rewards[step] + gamma * nexvpreds[step] - vpred[step]
seg["adv"][step] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
return
class PPOImitation(pposgd_simple.PPO1):
"""
Proximal Policy Optimization algorithm (MPI version).
Paper: https://arxiv.org/abs/1707.06347
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param timesteps_per_actorbatch: (int) timesteps per actor per update
:param clip_param: (float) clipping parameter epsilon
:param entcoeff: (float) the entropy loss weight
:param optim_epochs: (float) the optimizer's number of epochs
:param optim_stepsize: (float) the optimizer's stepsize
:param optim_batchsize: (int) the optimizer's the batch size
:param gamma: (float) discount factor
:param lam: (float) advantage estimation
:param adam_epsilon: (float) the epsilon value for the adam optimizer
:param schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_actorbatch=256, clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, lam=0.95, adam_epsilon=1e-5,
schedule='linear', verbose=0, tensorboard_log=None, _init_setup_model=True,
policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1):
super().__init__(policy=policy,
env=env,
gamma=gamma,
timesteps_per_actorbatch=timesteps_per_actorbatch,
clip_param=clip_param,
entcoeff=entcoeff,
optim_epochs=optim_epochs,
optim_stepsize=optim_stepsize,
optim_batchsize=optim_batchsize,
lam=lam,
adam_epsilon=adam_epsilon,
schedule=schedule,
verbose=verbose,
tensorboard_log=tensorboard_log,
_init_setup_model=_init_setup_model,
policy_kwargs=policy_kwargs,
full_tensorboard_log=full_tensorboard_log,
seed=seed,
n_cpu_tf_sess=n_cpu_tf_sess)
return
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
# Target advantage function (if applicable)
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
# Empirical return
ret = tf.placeholder(dtype=tf.float32, shape=[None])
# learning rate multiplier, updated with schedule
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[])
# Annealed cliping parameter epislon
clip_param = self.clip_param * lrmult
obs_ph = self.policy_pi.obs_ph
action_ph = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_pi.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-self.entcoeff) * meanent
# pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action_ph) -
old_pi.proba_distribution.logp(action_ph))
# surrogate from conservative policy iteration
surr1 = ratio * atarg
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg
clip_frac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), clip_param)))
# PPO's pessimistic surrogate (L^CLIP)
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))
vf_loss = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
self.loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
tf.summary.scalar('entropy_loss', pol_entpen)
tf.summary.scalar('policy_gradient_loss', pol_surr)
tf.summary.scalar('value_function_loss', vf_loss)
tf.summary.scalar('approximate_kullback-leibler', meankl)
tf.summary.scalar('clip_factor', clip_param)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('clip_frac', clip_frac)
self.params = tf_util.get_trainable_vars("model")
self.assign_old_eq_new = tf_util.function(
[], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"), tf_util.get_globals_vars("model"))])
with tf.variable_scope("Adam_mpi", reuse=False):
self.adam = MpiAdam(self.params, epsilon=self.adam_epsilon, sess=self.sess)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.optim_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_param))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.optim_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('clip_range', self.clip_param)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', obs_ph)
else:
tf.summary.histogram('observation', obs_ph)
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
tf_util.initialize(sess=self.sess)
self.summary = tf.summary.merge_all()
self.lossandgrad = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
[self.summary, tf_util.flatgrad(total_loss, self.params)] + losses)
self.compute_losses = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
losses)
return
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="PPO1",
reset_num_timesteps=True, save_path=None, save_iters=20):
is_root = (MPI.COMM_WORLD.Get_rank() == 0)
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO1 model must be " \
"an instance of common.policies.ActorCriticPolicy."
with self.sess.as_default():
self.adam.sync()
callback.on_training_start(locals(), globals())
# Prepare for rollouts
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_actorbatch,
callback=callback)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
# rolling buffer for episode lengths
len_buffer = deque(maxlen=100)
# rolling buffer for episode rewards
reward_buffer = deque(maxlen=100)
while True:
if timesteps_so_far >= total_timesteps:
break
if self.schedule == 'constant':
cur_lrmult = 1.0
elif self.schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / total_timesteps, 0)
else:
raise NotImplementedError
if is_root:
logger.log("********** Iteration %i ************" % iters_so_far)
seg = seg_gen.__next__()
# Stop training early (triggered by the callback)
if not seg.get('continue_training', True): # pytype: disable=attribute-error
break
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
observations, actions = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
# true_rew is the reward without discount
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape((self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
# predicted value function before udpate
vpredbefore = seg["vpred"]
# standardized advantage function estimate
atarg = (atarg - atarg.mean()) / atarg.std()
dataset = Dataset(dict(ob=observations, ac=actions, atarg=atarg, vtarg=tdlamret),
shuffle=not self.policy.recurrent)
optim_batchsize = self.optim_batchsize or observations.shape[0]
# set old parameter values to new parameter values
self.assign_old_eq_new(sess=self.sess)
if is_root:
logger.log("Optimizing...")
logger.log(fmt_row(13, self.loss_names))
# Here we do a bunch of optimization epochs over the data
for k in range(self.optim_epochs):
# list of tuples, each of which gives the loss for a minibatch
losses = []
for i, batch in enumerate(dataset.iterate_once(optim_batchsize)):
steps = (self.num_timesteps +
k * optim_batchsize +
int(i * (optim_batchsize / len(dataset.data_map))))
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata
# (memory, compute time, ...)
if self.full_tensorboard_log and (1 + k) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % steps)
else:
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess)
writer.add_summary(summary, steps)
else:
_, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"], cur_lrmult,
sess=self.sess)
self.adam.update(grad, self.optim_stepsize * cur_lrmult)
losses.append(newlosses)
if is_root:
logger.log(fmt_row(13, np.mean(losses, axis=0)))
if is_root:
logger.log("Evaluating losses...")
losses = []
for batch in dataset.iterate_once(optim_batchsize):
newlosses = self.compute_losses(batch["ob"], batch["ob"], batch["ac"], batch["atarg"],
batch["vtarg"], cur_lrmult, sess=self.sess)
losses.append(newlosses)
mean_losses, _, _ = mpi_moments(losses, axis=0)
if is_root:
logger.log(fmt_row(13, mean_losses))
for (loss_val, name) in zipsame(mean_losses, self.loss_names):
logger.record_tabular("loss_" + name, loss_val)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
# local values
lrlocal = (seg["ep_lens"], seg["ep_rets"])
# list of tuples
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)
lens, rews = map(flatten_lists, zip(*listoflrpairs))
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
if is_root and (save_path is not None) and (iters_so_far % save_iters == 0):
self.save(save_path)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and is_root:
logger.dump_tabular()
callback.on_training_end()
if is_root:
self.save(save_path)
return self
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/learning/imitation_policies.py | motion_imitation/learning/imitation_policies.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines.common.distributions import make_proba_dist_type, spaces, \
DiagGaussianProbabilityDistributionType
from stable_baselines.common.policies import FeedForwardPolicy, nature_cnn, mlp_extractor, linear
def make_proba_dist_type(ac_space):
"""
return an instance of ProbabilityDistributionType for the correct type of action space
:param ac_space: (Gym Space) the input action space
:return: (ProbabilityDistributionType) the appropriate instance of a ProbabilityDistributionType
"""
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1, "Error: the action space must be a vector"
return DiagGaussianFixedVarProbabilityDistributionType(ac_space.shape[0])
else:
return make_proba_dist_type(ac_space)
class DiagGaussianFixedVarProbabilityDistributionType(DiagGaussianProbabilityDistributionType):
def __init__(self, size):
super(DiagGaussianFixedVarProbabilityDistributionType, self).__init__(size)
return
def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector,
pi_init_scale=1.0, pi_init_bias=0.0, pi_init_std=1.0,
vf_init_scale=1.0, vf_init_bias=0.0):
mean = linear(pi_latent_vector, 'pi', self.size, init_scale=pi_init_scale, init_bias=pi_init_bias)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.constant_initializer(np.log(pi_init_std)), trainable=False)
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
q_values = linear(vf_latent_vector, 'q', self.size, init_scale=vf_init_scale, init_bias=vf_init_bias)
return self.proba_distribution_from_flat(pdparam), mean, q_values
class ImitationPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a feed forward neural network.
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) (deprecated, use net_arch instead) The size of the Neural network for the policy
(if None, default to [64, 64])
:param net_arch: (list) Specification of the actor-critic policy network architecture (see mlp_extractor
documentation for details).
:param act_fun: (tf.func) the activation function to use in the neural network.
:param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, layers=None, net_arch=None,
act_fun=tf.tanh, cnn_extractor=nature_cnn, feature_extraction="mlp", **kwargs):
super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse,
scale=(feature_extraction == "cnn"))
self._pdtype = make_proba_dist_type(ac_space)
self._kwargs_check(feature_extraction, kwargs)
if layers is not None:
warnings.warn("Usage of the `layers` parameter is deprecated! Use net_arch instead "
"(it has a different semantics though).", DeprecationWarning)
if net_arch is not None:
warnings.warn("The new `net_arch` parameter overrides the deprecated `layers` parameter!",
DeprecationWarning)
if net_arch is None:
if layers is None:
layers = [64, 64]
net_arch = [dict(vf=layers, pi=layers)]
with tf.variable_scope("model", reuse=reuse):
if feature_extraction == "cnn":
pi_latent = vf_latent = cnn_extractor(self.processed_obs, **kwargs)
else:
pi_latent, vf_latent = mlp_extractor(tf.layers.flatten(self.processed_obs), net_arch, act_fun)
self._value_fn = linear(vf_latent, 'vf', 1)
self._proba_distribution, self._policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(pi_latent, vf_latent,
pi_init_scale=1.0, pi_init_bias=0.0, pi_init_std=0.125,
vf_init_scale=1.0, vf_init_bias=0.0)
self._setup_init()
return
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/learning/__init__.py | motion_imitation/learning/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/learning/imitation_runners.py | motion_imitation/learning/imitation_runners.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
from stable_baselines.common.vec_env import VecEnv
def traj_segment_generator(policy, env, horizon, reward_giver=None, gail=False, callback=None):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
:param policy: (MLPPolicy) the policy
:param env: (Gym Environment) the environment
:param horizon: (int) the number of timesteps to run per batch
:param reward_giver: (TransitionClassifier) the reward predicter from obsevation and action
:param gail: (bool) Whether we are using this generator for standard trpo or with gail
:param callback: (BaseCallback)
:return: (dict) generator that returns a dict with the following keys:
- observations: (np.ndarray) observations
- rewards: (numpy float) rewards (if gail is used it is the predicted reward)
- true_rewards: (numpy float) if gail is used it is the original reward
- vpred: (numpy float) action logits
- dones: (numpy bool) dones (is end of episode, used for logging)
- episode_starts: (numpy bool)
True if first timestep of an episode, used for GAE
- actions: (np.ndarray) actions
- ep_rets: (float) cumulated current episode reward
- ep_lens: (int) the length of the current episode
- ep_true_rets: (float) the real environment reward
- continue_training: (bool) Whether to continue training
or stop early (triggered by the callback)
"""
# Check when using GAIL
assert not (gail and reward_giver is None), "You must pass a reward giver when using GAIL"
# Initialize state variables
step = 0
action = env.action_space.sample() # not used, just so we have the datatype
observation = env.reset()
cur_ep_ret = 0 # return in current episode
current_it_len = 0 # len of current iteration
current_ep_len = 0 # len of current episode
cur_ep_true_ret = 0
ep_true_rets = []
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # Episode lengths
# Initialize history arrays
observations = np.array([observation for _ in range(horizon)])
true_rewards = np.zeros(horizon, 'float32')
rewards = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
nextvpreds = np.zeros(horizon, 'float32')
episode_starts = np.zeros(horizon, 'bool')
dones = np.zeros(horizon, 'bool')
actions = np.array([action for _ in range(horizon)])
states = policy.initial_state
episode_start = True # marks if we're on first timestep of an episode
done = False
callback.on_rollout_start()
while True:
action, vpred, states, info = policy.step(observation.reshape(-1, *observation.shape), states, done)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if step > 0 and step % horizon == 0:
terminated = ("terminated" not in info) or info["terminated"]
if terminated:
last_vpred = 0.0
else:
last_vpred = policy.value(observation.reshape(-1, *observation.shape), states, done)
last_vpred = last_vpred[0]
nextvpreds[i] = last_vpred
callback.on_rollout_end()
yield {
"observations": observations,
"rewards": rewards,
"dones": dones,
"episode_starts": episode_starts,
"true_rewards": true_rewards,
"vpred": vpreds,
"nextvpreds": nextvpreds,
"actions": actions,
"ep_rets": ep_rets,
"ep_lens": ep_lens,
"ep_true_rets": ep_true_rets,
"total_timestep": current_it_len,
'continue_training': True
}
_, vpred, _, info = policy.step(observation.reshape(-1, *observation.shape))
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_true_rets = []
ep_lens = []
# Reset current iteration length
current_it_len = 0
callback.on_rollout_start()
i = step % horizon
observations[i] = observation
vpreds[i] = vpred[0]
actions[i] = action[0]
episode_starts[i] = episode_start
if (not episode_start) and (i > 0):
nextvpreds[i - 1] = vpred[0]
clipped_action = action
# Clip the actions to avoid out of bound error
if isinstance(env.action_space, gym.spaces.Box):
clipped_action = np.clip(action, env.action_space.low, env.action_space.high)
if gail:
reward = reward_giver.get_reward(observation, clipped_action[0])
observation, true_reward, done, info = env.step(clipped_action[0])
else:
observation, reward, done, info = env.step(clipped_action[0])
true_reward = reward
if callback is not None:
if callback.on_step() is False:
# We have to return everything so pytype does not complain
yield {
"observations": observations,
"rewards": rewards,
"dones": dones,
"episode_starts": episode_starts,
"true_rewards": true_rewards,
"vpred": vpreds,
"nextvpreds": nextvpreds,
"actions": actions,
"ep_rets": ep_rets,
"ep_lens": ep_lens,
"ep_true_rets": ep_true_rets,
"total_timestep": current_it_len,
'continue_training': False
}
return
rewards[i] = reward
true_rewards[i] = true_reward
dones[i] = done
episode_start = done
cur_ep_ret += reward
cur_ep_true_ret += true_reward
current_it_len += 1
current_ep_len += 1
if done:
terminated = ("terminated" not in info) or info["terminated"]
if terminated:
last_vpred = 0.0
else:
last_vpred = policy.value(observation.reshape(-1, *observation.shape), states, done)
last_vpred = last_vpred[0]
nextvpreds[i] = last_vpred
# Retrieve unnormalized reward if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
if not gail:
cur_ep_ret = maybe_ep_info['r']
cur_ep_true_ret = maybe_ep_info['r']
ep_rets.append(cur_ep_ret)
ep_true_rets.append(cur_ep_true_ret)
ep_lens.append(current_ep_len)
cur_ep_ret = 0
cur_ep_true_ret = 0
current_ep_len = 0
if not isinstance(env, VecEnv):
observation = env.reset()
step += 1
return
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/minitaur_motor.py | motion_imitation/robots/minitaur_motor.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements an accurate motor model."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
from motion_imitation.robots import robot_config
VOLTAGE_CLIPPING = 50
OBSERVED_TORQUE_LIMIT = 5.7
MOTOR_VOLTAGE = 16.0
MOTOR_RESISTANCE = 0.186
MOTOR_TORQUE_CONSTANT = 0.0954
MOTOR_VISCOUS_DAMPING = 0
MOTOR_SPEED_LIMIT = MOTOR_VOLTAGE / (MOTOR_VISCOUS_DAMPING +
MOTOR_TORQUE_CONSTANT)
NUM_MOTORS = 8
MOTOR_POS_LB = 0.5
MOTOR_POS_UB = 2.5
class MotorModel(object):
"""The accurate motor model, which is based on the physics of DC motors.
The motor model support two types of control: position control and torque
control. In position control mode, a desired motor angle is specified, and a
torque is computed based on the internal motor model. When the torque control
is specified, a pwm signal in the range of [-1.0, 1.0] is converted to the
torque.
The internal motor model takes the following factors into consideration:
pd gains, viscous friction, back-EMF voltage and current-torque profile.
"""
def __init__(self,
kp=1.2,
kd=0,
torque_limits=None,
motor_control_mode=robot_config.MotorControlMode.POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
self._motor_control_mode = motor_control_mode
self._resistance = MOTOR_RESISTANCE
self._voltage = MOTOR_VOLTAGE
self._torque_constant = MOTOR_TORQUE_CONSTANT
self._viscous_damping = MOTOR_VISCOUS_DAMPING
self._current_table = [0, 10, 20, 30, 40, 50, 60]
self._torque_table = [0, 1, 1.9, 2.45, 3.0, 3.25, 3.5]
self._strength_ratios = [1.0] * NUM_MOTORS
def set_strength_ratios(self, ratios):
"""Set the strength of each motors relative to the default value.
Args:
ratios: The relative strength of motor output. A numpy array ranging from
0.0 to 1.0.
"""
self._strength_ratios = np.array(ratios)
def set_motor_gains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
self._voltage = voltage
def get_voltage(self):
return self._voltage
def set_viscous_damping(self, viscous_damping):
self._viscous_damping = viscous_damping
def get_viscous_dampling(self):
return self._viscous_damping
def convert_to_torque(self,
motor_commands,
motor_angle,
motor_velocity,
true_motor_velocity,
motor_control_mode=None):
"""Convert the commands (position control or pwm control) to torque.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_angle: The motor angle observed at the current time step. It is
actually the true motor angle observed a few milliseconds ago (pd
latency).
motor_velocity: The motor velocity observed at the current time step, it
is actually the true motor velocity a few milliseconds ago (pd latency).
true_motor_velocity: The true motor velocity. The true velocity is used to
compute back EMF voltage and viscous damping.
motor_control_mode: A MotorControlMode enum.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
if (motor_control_mode is robot_config.MotorControlMode.TORQUE) or (
motor_control_mode is robot_config.MotorControlMode.HYBRID):
raise ValueError("{} is not a supported motor control mode".format(
motor_control_mode))
kp = self._kp
kd = self._kd
if motor_control_mode is robot_config.MotorControlMode.PWM:
# The following implements a safety controller that softly enforces the
# joint angles to remain within safe region: If PD controller targeting
# the positive (negative) joint limit outputs a negative (positive)
# signal, the corresponding joint violates the joint constraint, so
# we should add the PD output to motor_command to bring it back to the
# safe region.
pd_max = -1 * kp * (motor_angle -
MOTOR_POS_UB) - kd / 2. * motor_velocity
pd_min = -1 * kp * (motor_angle -
MOTOR_POS_LB) - kd / 2. * motor_velocity
pwm = motor_commands + np.minimum(pd_max, 0) + np.maximum(pd_min, 0)
else:
pwm = -1 * kp * (motor_angle - motor_commands) - kd * motor_velocity
pwm = np.clip(pwm, -1.0, 1.0)
return self._convert_to_torque_from_pwm(pwm, true_motor_velocity)
def _convert_to_torque_from_pwm(self, pwm, true_motor_velocity):
"""Convert the pwm signal to torque.
Args:
pwm: The pulse width modulation.
true_motor_velocity: The true motor velocity at the current moment. It is
used to compute the back EMF voltage and the viscous damping.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
observed_torque = np.clip(
self._torque_constant *
(np.asarray(pwm) * self._voltage / self._resistance),
-OBSERVED_TORQUE_LIMIT, OBSERVED_TORQUE_LIMIT)
if self._torque_limits is not None:
observed_torque = np.clip(observed_torque, -1.0 * self._torque_limits,
self._torque_limits)
# Net voltage is clipped at 50V by diodes on the motor controller.
voltage_net = np.clip(
np.asarray(pwm) * self._voltage -
(self._torque_constant + self._viscous_damping) *
np.asarray(true_motor_velocity), -VOLTAGE_CLIPPING, VOLTAGE_CLIPPING)
current = voltage_net / self._resistance
current_sign = np.sign(current)
current_magnitude = np.absolute(current)
# Saturate torque based on empirical current relation.
actual_torque = np.interp(current_magnitude, self._current_table,
self._torque_table)
actual_torque = np.multiply(current_sign, actual_torque)
actual_torque = np.multiply(self._strength_ratios, actual_torque)
if self._torque_limits is not None:
actual_torque = np.clip(actual_torque, -1.0 * self._torque_limits,
self._torque_limits)
return actual_torque, observed_torque
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/minitaur_pose_utils.py | motion_imitation/robots/minitaur_pose_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to calculate Minitaur's pose and motor angles."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import attr
import numpy as np
NUM_MOTORS = 8
NUM_LEGS = 4
MOTOR_SIGNS = (1, 1, -1, -1)
# Constants for the function swing_extend_to_motor_angles
EPS = 0.1
# Range of motion for the legs (does not allow pointing towards the body).
LEG_SWING_LIMIT_LOW = -math.pi / 2 + EPS
LEG_SWING_LIMIT_HIGH = 3 * math.pi / 2 - EPS
# Range of gap between motors for feasibility.
MOTORS_GAP_LIMIT_HIGH = 2 * math.pi - EPS
MOTORS_GAP_LIMIT_LOW = EPS
@attr.s
class MinitaurPose(object):
"""Default pose of the Minitaur."""
swing_angle_0 = attr.ib(type=float, default=0)
swing_angle_1 = attr.ib(type=float, default=0)
swing_angle_2 = attr.ib(type=float, default=0)
swing_angle_3 = attr.ib(type=float, default=0)
extension_angle_0 = attr.ib(type=float, default=0)
extension_angle_1 = attr.ib(type=float, default=0)
extension_angle_2 = attr.ib(type=float, default=0)
extension_angle_3 = attr.ib(type=float, default=0)
def motor_angles_to_leg_pose(motor_angles):
"""Convert motor angles to the leg pose.
A single leg pose is a tuple (swing, extension). The definition can be find
in:
Sim-to-Real: Learning Agile Locomotion For Quadruped Robot
Args:
motor_angles: A numpy array. Contains all eight motor angles for Minitaur.
Returns:
A numpy array. Contains the leg pose for all four legs: [swing_0, swing_1,
swing_2, swing_3, extension_0, extension_1, extension_2, extension_3]
"""
motor_angles = np.array(motor_angles)
swings = 0.5 * np.multiply(
np.array(MOTOR_SIGNS), (motor_angles[1::2] - motor_angles[::2]))
extensions = 0.5 * (motor_angles[::2] + motor_angles[1::2])
return np.concatenate((swings, extensions), axis=None)
def leg_pose_to_motor_angles(leg_pose):
"""Converts the leg pose to the motor angles.
Args:
leg_pose: A numpy array. Contains the leg pose for all four legs: [swing_0,
swing_1, swing_2, swing_3, extension_0, extension_1, extension_2,
extension_3]
Returns:
A numpy array. All eight motor angles.
"""
leg_pose = np.array(leg_pose)
# swings multiplied with the sign array.
signed_swings = np.multiply(np.array(MOTOR_SIGNS), leg_pose[0:NUM_LEGS])
extensions = leg_pose[NUM_LEGS:]
motor_angles = np.zeros(NUM_MOTORS)
motor_angles[1::2] = signed_swings + extensions
motor_angles[::2] = extensions - signed_swings
return motor_angles
# This method also does the same conversion, but 0 swing and 0 extension maps
# to a neutral standing still motor positions with motors at + or - pi. It also
# contains a safety layer so that the legs don't swing or extend too much to hit
# the body of the robot.
def leg_pose_to_motor_angles_with_half_pi_offset_and_safety(leg_pose):
"""Converts the swing extension poses to the motor angles with safety limits.
Args:
leg_pose: A numpy array. Contains the leg pose for all four legs: [swing_0,
extension_0, swing_1, extension_1, swing_2, extension_2, swing_3,
extension_3]
Returns:
A numpy array. All eight motor angles.
"""
motor_angles = []
for idx in range(4):
swing = leg_pose[idx * 2]
extend = leg_pose[idx * 2 + 1]
motor_angles.extend(swing_extend_to_motor_angles(idx, swing, extend))
return motor_angles
def swing_extend_to_motor_angles(leg_id, swing, extension, noise_stdev=0):
"""Swing - extension based leg model for minitaur.
Swing extension leg model calculates motor positions using 2 separate motions:
swing and extension. Swing rotates the whole leg by rotating both motors
equally towards same direction. Extension increases or decreases the length
of the leg by turning both motors equally in opposite direction.
This method also does the same conversion as leg_pose_to_motor_angles, but 0
swing and 0 extension maps to a neutral standing still motor positions with
motors at + or - pi.
Args:
leg_id: The id of the leg that the conversion is made for (0, 1, 2, 3).
swing: Swing degree for the leg (in radians). 0 means perpendicular to the
body).
extension: Extension level (length) of the leg, limited to [-1, 1].
noise_stdev: Standard deviation of the introduced noise at the motor
position level. Noise is turned off by default.
Returns:
motor0: Position for the first motor for that leg.
motor1: Position for the second motor for that leg.
Raises:
ValueError: In case calculated positions are outside the allowed boundaries.
"""
# Check if the leg_id is in valid range
if not 0 <= leg_id <= 3:
raise ValueError('leg {} does not exist for a quadruped.'.format(leg_id))
# Front legs can not swing too much towards the body.
if leg_id % 2 == 0:
swing = np.clip(swing, LEG_SWING_LIMIT_LOW, LEG_SWING_LIMIT_HIGH)
# Back legs can not swing too much towards the body (opposite direction).
else:
swing = np.clip(swing, -LEG_SWING_LIMIT_HIGH, -LEG_SWING_LIMIT_LOW)
# Check if the motors are too close or too far away to make it impossible
# for the physical robot.
gap = math.pi - 2 * extension
if gap < MOTORS_GAP_LIMIT_LOW or gap > MOTORS_GAP_LIMIT_HIGH:
top_extension = (math.pi - MOTORS_GAP_LIMIT_LOW) / 2.0
least_extension = (math.pi - MOTORS_GAP_LIMIT_HIGH) / 2.0
extension = np.clip(extension, least_extension, top_extension)
# Initialization to neutral standing position where both motors point to
# opposite directions
motor0 = math.pi / 2
motor1 = math.pi / 2
# Rotational move
if leg_id in (0, 1):
motor0 += swing
motor1 -= swing
elif leg_id in (2, 3):
motor0 -= swing
motor1 += swing
# Extension
motor0 += extension
motor1 += extension
# Add noise if requested.
if noise_stdev > 0:
motor0 += np.random.normal(0, noise_stdev)
motor1 += np.random.normal(0, noise_stdev)
return motor0, motor1
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/laikago_motor.py | motion_imitation/robots/laikago_motor.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor model for laikago."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import collections
import numpy as np
from motion_imitation.robots import robot_config
NUM_MOTORS = 12
MOTOR_COMMAND_DIMENSION = 5
# These values represent the indices of each field in the motor command tuple
POSITION_INDEX = 0
POSITION_GAIN_INDEX = 1
VELOCITY_INDEX = 2
VELOCITY_GAIN_INDEX = 3
TORQUE_INDEX = 4
class LaikagoMotorModel(object):
"""A simple motor model for Laikago.
When in POSITION mode, the torque is calculated according to the difference
between current and desired joint angle, as well as the joint velocity.
For more information about PD control, please refer to:
https://en.wikipedia.org/wiki/PID_controller.
The model supports a HYBRID mode in which each motor command can be a tuple
(desired_motor_angle, position_gain, desired_motor_velocity, velocity_gain,
torque).
"""
def __init__(self,
kp=60,
kd=1,
torque_limits=None,
motor_control_mode=robot_config.MotorControlMode.POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
if torque_limits is not None:
if isinstance(torque_limits, (collections.Sequence, np.ndarray)):
self._torque_limits = np.asarray(torque_limits)
else:
self._torque_limits = np.full(NUM_MOTORS, torque_limits)
self._motor_control_mode = motor_control_mode
self._strength_ratios = np.full(NUM_MOTORS, 1)
def set_strength_ratios(self, ratios):
"""Set the strength of each motors relative to the default value.
Args:
ratios: The relative strength of motor output. A numpy array ranging from
0.0 to 1.0.
"""
self._strength_ratios = ratios
def set_motor_gains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
pass
def get_voltage(self):
return 0.0
def set_viscous_damping(self, viscous_damping):
pass
def get_viscous_dampling(self):
return 0.0
def convert_to_torque(self,
motor_commands,
motor_angle,
motor_velocity,
true_motor_velocity,
motor_control_mode):
"""Convert the commands (position control or torque control) to torque.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_angle: The motor angle observed at the current time step. It is
actually the true motor angle observed a few milliseconds ago (pd
latency).
motor_velocity: The motor velocity observed at the current time step, it
is actually the true motor velocity a few milliseconds ago (pd latency).
true_motor_velocity: The true motor velocity. The true velocity is used to
compute back EMF voltage and viscous damping.
motor_control_mode: A MotorControlMode enum.
Returns:
actual_torque: The torque that needs to be applied to the motor.
observed_torque: The torque observed by the sensor.
"""
del true_motor_velocity
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
if motor_control_mode is robot_config.MotorControlMode.PWM:
raise ValueError(
"{} is not a supported motor control mode".format(motor_control_mode))
# No processing for motor torques
if motor_control_mode is robot_config.MotorControlMode.TORQUE:
assert len(motor_commands) == NUM_MOTORS
motor_torques = self._strength_ratios * motor_commands
return motor_torques, motor_torques
desired_motor_angles = None
desired_motor_velocities = None
kp = None
kd = None
additional_torques = np.full(NUM_MOTORS, 0)
if motor_control_mode is robot_config.MotorControlMode.POSITION:
assert len(motor_commands) == NUM_MOTORS
kp = self._kp
kd = self._kd
desired_motor_angles = motor_commands
desired_motor_velocities = np.full(NUM_MOTORS, 0)
elif motor_control_mode is robot_config.MotorControlMode.HYBRID:
# The input should be a 60 dimension vector
assert len(motor_commands) == MOTOR_COMMAND_DIMENSION * NUM_MOTORS
kp = motor_commands[POSITION_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
kd = motor_commands[VELOCITY_GAIN_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_angles = motor_commands[
POSITION_INDEX::MOTOR_COMMAND_DIMENSION]
desired_motor_velocities = motor_commands[
VELOCITY_INDEX::MOTOR_COMMAND_DIMENSION]
additional_torques = motor_commands[TORQUE_INDEX::MOTOR_COMMAND_DIMENSION]
else:
print("Undefined motor_control_mode=",motor_control_mode)
exit()
motor_torques = -1 * (kp * (motor_angle - desired_motor_angles)) - kd * (
motor_velocity - desired_motor_velocities) + additional_torques
motor_torques = self._strength_ratios * motor_torques
if self._torque_limits is not None:
if len(self._torque_limits) != len(motor_torques):
raise ValueError(
"Torque limits dimension does not match the number of motors.")
motor_torques = np.clip(motor_torques, -1.0 * self._torque_limits,
self._torque_limits)
return motor_torques, motor_torques
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/robot_config.py | motion_imitation/robots/robot_config.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configuration parameters for our robots."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
class MotorControlMode(enum.Enum):
"""The supported motor control modes."""
POSITION = 1
# Apply motor torques directly.
TORQUE = 2
# Apply a tuple (q, qdot, kp, kd, tau) for each motor. Here q, qdot are motor
# position and velocities. kp and kd are PD gains. tau is the additional
# motor torque. This is the most flexible control mode.
HYBRID = 3
# PWM mode is only availalbe for Minitaur
PWM = 4
class SafetyError(Exception):
pass
# Each hybrid action is a tuple (position, position_gain, velocity,
# velocity_gain, torque)
HYBRID_ACTION_DIMENSION = 5
class HybridActionIndex(enum.Enum):
# The index of each component within the hybrid action tuple.
POSITION = 0
POSITION_GAIN = 1
VELOCITY = 2
VELOCITY_GAIN = 3
TORQUE = 4
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/action_filter.py | motion_imitation/robots/action_filter.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two types of filters which can be applied to policy output sequences.
1. Simple exponential filter
2. Butterworth filter - lowpass or bandpass
The implementation of the butterworth filter follows scipy's lfilter
https://github.com/scipy/scipy/blob/v1.2.1/scipy/signal/signaltools.py
We re-implement the logic in order to explicitly manage the y states
The filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
We assume M == N.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import numpy as np
from scipy.signal import butter
ACTION_FILTER_ORDER = 2
ACTION_FILTER_LOW_CUT = 0.0
ACTION_FILTER_HIGH_CUT = 4.0
class ActionFilter(object):
"""Implements a generic lowpass or bandpass action filter."""
def __init__(self, a, b, order, num_joints, ftype='lowpass'):
"""Initializes filter.
Either one per joint or same for all joints.
Args:
a: filter output history coefficients
b: filter input coefficients
order: filter order
num_joints: robot DOF
ftype: filter type. 'lowpass' or 'bandpass'
"""
self.num_joints = num_joints
if isinstance(a, list):
self.a = a
self.b = b
else:
self.a = [a]
self.b = [b]
# Either a set of parameters per joint must be specified as a list
# Or one filter is applied to every joint
if not ((len(self.a) == len(self.b) == num_joints) or (
len(self.a) == len(self.b) == 1)):
raise ValueError('Incorrect number of filter values specified')
# Normalize by a[0]
for i in range(len(self.a)):
self.b[i] /= self.a[i][0]
self.a[i] /= self.a[i][0]
# Convert single filter to same format as filter per joint
if len(self.a) == 1:
self.a *= num_joints
self.b *= num_joints
self.a = np.stack(self.a)
self.b = np.stack(self.b)
if ftype == 'bandpass':
assert len(self.b[0]) == len(self.a[0]) == 2 * order + 1
self.hist_len = 2 * order
elif ftype == 'lowpass':
assert len(self.b[0]) == len(self.a[0]) == order + 1
self.hist_len = order
else:
raise ValueError('%s filter type not supported' % (ftype))
logging.info('Filter shapes: a: %s, b: %s', self.a.shape, self.b.shape)
logging.info('Filter type:%s', ftype)
self.yhist = collections.deque(maxlen=self.hist_len)
self.xhist = collections.deque(maxlen=self.hist_len)
self.reset()
def reset(self):
"""Resets the history buffers to 0."""
self.yhist.clear()
self.xhist.clear()
for _ in range(self.hist_len):
self.yhist.appendleft(np.zeros((self.num_joints, 1)))
self.xhist.appendleft(np.zeros((self.num_joints, 1)))
def filter(self, x):
"""Returns filtered x."""
xs = np.concatenate(list(self.xhist), axis=-1)
ys = np.concatenate(list(self.yhist), axis=-1)
y = np.multiply(x, self.b[:, 0]) + np.sum(
np.multiply(xs, self.b[:, 1:]), axis=-1) - np.sum(
np.multiply(ys, self.a[:, 1:]), axis=-1)
self.xhist.appendleft(x.reshape((self.num_joints, 1)).copy())
self.yhist.appendleft(y.reshape((self.num_joints, 1)).copy())
return y
def init_history(self, x):
x = np.expand_dims(x, axis=-1)
for i in range(self.hist_len):
self.xhist[i] = x
self.yhist[i] = x
class ActionFilterButter(ActionFilter):
"""Butterworth filter."""
def __init__(self,
lowcut=None,
highcut=None,
sampling_rate=None,
order=ACTION_FILTER_ORDER,
num_joints=None):
"""Initializes a butterworth filter.
Either one per joint or same for all joints.
Args:
lowcut: list of strings defining the low cutoff frequencies.
The list must contain either 1 element (same filter for all joints)
or num_joints elements
0 for lowpass, > 0 for bandpass. Either all values must be 0
or all > 0
highcut: list of strings defining the high cutoff frequencies.
The list must contain either 1 element (same filter for all joints)
or num_joints elements
All must be > 0
sampling_rate: frequency of samples in Hz
order: filter order
num_joints: robot DOF
"""
highcut = [3.0]
self.lowcut = ([float(x) for x in lowcut]
if lowcut is not None else [ACTION_FILTER_LOW_CUT])
self.highcut = ([float(x) for x in highcut]
if highcut is not None else [ACTION_FILTER_HIGH_CUT])
if len(self.lowcut) != len(self.highcut):
raise ValueError('Number of lowcut and highcut filter values should '
'be the same')
if sampling_rate is None:
raise ValueError('sampling_rate should be provided.')
if num_joints is None:
raise ValueError('num_joints should be provided.')
if np.any(self.lowcut):
if not np.all(self.lowcut):
raise ValueError('All the filters must be of the same type: '
'lowpass or bandpass')
self.ftype = 'bandpass'
else:
self.ftype = 'lowpass'
a_coeffs = []
b_coeffs = []
for i, (l, h) in enumerate(zip(self.lowcut, self.highcut)):
if h <= 0.0:
raise ValueError('Highcut must be > 0')
b, a = self.butter_filter(l, h, sampling_rate, order)
logging.info(
'Butterworth filter: joint: %d, lowcut: %f, highcut: %f, '
'sampling rate: %d, order: %d, num joints: %d', i, l, h,
sampling_rate, order, num_joints)
b_coeffs.append(b)
a_coeffs.append(a)
super(ActionFilterButter, self).__init__(
a_coeffs, b_coeffs, order, num_joints, self.ftype)
def butter_filter(self, lowcut, highcut, fs, order=5):
"""Returns the coefficients of a butterworth filter.
If lowcut = 0, the function returns the coefficients of a low pass filter.
Otherwise, the coefficients of a band pass filter are returned.
Highcut should be > 0
Args:
lowcut: low cutoff frequency
highcut: high cutoff frequency
fs: sampling rate
order: filter order
Return:
b, a: parameters of a butterworth filter
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
if low:
b, a = butter(order, [low, high], btype='band')
else:
b, a = butter(order, [high], btype='low')
return b, a
class ActionFilterExp(ActionFilter):
"""Filter by way of simple exponential smoothing.
y = alpha * x + (1 - alpha) * previous_y
"""
def __init__(self, alpha, num_joints):
"""Initialize the filter.
Args:
alpha: list of strings defining the alphas.
The list must contain either 1 element (same filter for all joints)
or num_joints elements
0 < alpha <= 1
num_joints: robot DOF
"""
self.alphas = [float(x) for x in alpha]
logging.info('Exponential filter: alpha: %d', self.alphas)
a_coeffs = []
b_coeffs = []
for a in self.alphas:
a_coeffs.append(np.asarray([1., a - 1.]))
b_coeffs.append(np.asarray([a, 0]))
order = 1
self.ftype = 'lowpass'
super(ActionFilterExp, self).__init__(
a_coeffs, b_coeffs, order, num_joints, self.ftype)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/robot_pose_utils.py | motion_imitation/robots/robot_pose_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the robot specific pose tools."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import math
import attr
import numpy as np
from motion_imitation.robots import laikago_pose_utils
from motion_imitation.robots import minitaur_pose_utils
from motion_imitation.robots import laikago
_ABDUCTION_ACTION_INDEXES = [0, 3, 6, 9]
# The default values used to give a neutral pose for minitaur.
_MINITAUR_DEFAULT_EXTENSION_POS = math.pi / 2
_MINITAUR_DEFAULT_SWING_POS = 0
_LAIKAGO_NEUTRAL_POSE_HIP_ANGLE = math.pi / 4
_LAIKAGO_NEUTRAL_POSE_KNEE_ANGLE = -math.pi / 2
_LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER = 1.0
_LAIKAGO_SWING_CONVERSION_MULTIPLIER = -1.0
_MINI_CHEETAH_NEUTRAL_POSE_HIP_ANGLE = -math.pi / 4
_MINI_CHEETAH_NEUTRAL_POSE_KNEE_ANGLE = math.pi / 2
_MINI_CHEETAH_EXTENSION_CONVERSION_MULTIPLIER = -1.0
_MINI_CHEETAH_SWING_CONVERSION_MULTIPLIER = 1.0
def get_neutral_motor_angles(robot_class):
"""Return a neutral (standing) pose for a given robot type.
Args:
robot_class: This returns the class (not the instance) for the robot.
Currently it supports minitaur, laikago and mini-cheetah.
Returns:
Pose object for the given robot. It's either MinitaurPose, LaikagoPose or
MiniCheetahPose.
Raises:
ValueError: If the given robot_class is different than the supported robots.
"""
if str(robot_class) == str(laikago.Laikago):
init_pose = np.array(
attr.astuple(
laikago_pose_utils.LaikagoPose(
abduction_angle_0=0,
hip_angle_0=_LAIKAGO_NEUTRAL_POSE_HIP_ANGLE,
knee_angle_0=_LAIKAGO_NEUTRAL_POSE_KNEE_ANGLE,
abduction_angle_1=0,
hip_angle_1=_LAIKAGO_NEUTRAL_POSE_HIP_ANGLE,
knee_angle_1=_LAIKAGO_NEUTRAL_POSE_KNEE_ANGLE,
abduction_angle_2=0,
hip_angle_2=_LAIKAGO_NEUTRAL_POSE_HIP_ANGLE,
knee_angle_2=_LAIKAGO_NEUTRAL_POSE_KNEE_ANGLE,
abduction_angle_3=0,
hip_angle_3=_LAIKAGO_NEUTRAL_POSE_HIP_ANGLE,
knee_angle_3=_LAIKAGO_NEUTRAL_POSE_KNEE_ANGLE)))
else:
init_pose = robot_class.get_neutral_motor_angles()
return init_pose
def convert_leg_pose_to_motor_angles(robot_class, leg_poses):
"""Convert swing-extend coordinate space to motor angles for a robot type.
Args:
robot_class: This returns the class (not the instance) for the robot.
Currently it supports minitaur, laikago and mini-cheetah.
leg_poses: A list of leg poses in [swing,extend] or [abduction, swing,
extend] space for all 4 legs. The order is [abd_0, swing_0, extend_0,
abd_1, swing_1, extend_1, ...] or [swing_0, extend_0, swing_1, extend_1,
...]. Zero swing and zero extend gives a neutral standing pose for all the
robots. For minitaur, the conversion is fully accurate, for laikago and
mini-cheetah the conversion is approximate where swing is reflected to hip
and extend is reflected to both knee and the hip.
Returns:
List of motor positions for the selected robot. The list include 8 or 12
motor angles depending on the given robot type as an argument. Currently
laikago and mini-cheetah has motors for abduction which does not exist for
minitaur robot.
Raises:
ValueError: Conversion fails due to wrong inputs.
"""
if len(leg_poses) not in [8, 12]:
raise ValueError("Dimension of the leg pose provided is not 8 or 12.")
neutral_motor_angles = get_neutral_motor_angles(robot_class)
motor_angles = leg_poses
# If it is a robot with 12 motors but the provided leg pose does not contain
# abduction, extend the pose to include abduction.
if len(neutral_motor_angles) == 12 and len(leg_poses) == 8:
for i in _ABDUCTION_ACTION_INDEXES:
motor_angles.insert(i, 0)
# If the robot does not have abduction (minitaur) but the input contains them,
# ignore the abduction angles for the conversion.
elif len(neutral_motor_angles) == 8 and len(leg_poses) == 12:
del leg_poses[::3]
# Minitaur specific conversion calculations using minitaur-specific safety
# limits.
if str(robot_class) == str(laikago.Laikago):
swing_scale = 1.0
extension_scale = 1.0
# Laikago specific conversion multipliers.
swing_scale = _LAIKAGO_SWING_CONVERSION_MULTIPLIER
extension_scale = _LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER
else:
motor_angles = robot_class.convert_leg_pose_to_motor_angles(leg_poses)
return motor_angles
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/a1_robot_velocity_estimator.py | motion_imitation/robots/a1_robot_velocity_estimator.py | """Estimates base velocity for A1 robot from accelerometer readings."""
import numpy as np
from filterpy.kalman import KalmanFilter
from motion_imitation.utilities.moving_window_filter import MovingWindowFilter
class VelocityEstimator:
"""Estimates base velocity of A1 robot.
The velocity estimator consists of 2 parts:
1) A state estimator for CoM velocity.
Two sources of information are used:
The integrated reading of accelerometer and the velocity estimation from
contact legs. The readings are fused together using a Kalman Filter.
2) A moving average filter to smooth out velocity readings
"""
def __init__(self,
robot,
accelerometer_variance=0.1,
sensor_variance=0.1,
initial_variance=0.1,
moving_window_filter_size=120):
"""Initiates the velocity estimator.
See filterpy documentation in the link below for more details.
https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html
Args:
robot: the robot class for velocity estimation.
accelerometer_variance: noise estimation for accelerometer reading.
sensor_variance: noise estimation for motor velocity reading.
initial_covariance: covariance estimation of initial state.
"""
self.robot = robot
self.filter = KalmanFilter(dim_x=3, dim_z=3, dim_u=3)
self.filter.x = np.zeros(3)
self._initial_variance = initial_variance
self.filter.P = np.eye(3) * self._initial_variance # State covariance
self.filter.Q = np.eye(3) * accelerometer_variance
self.filter.R = np.eye(3) * sensor_variance
self.filter.H = np.eye(3) # measurement function (y=H*x)
self.filter.F = np.eye(3) # state transition matrix
self.filter.B = np.eye(3)
self._window_size = moving_window_filter_size
self.moving_window_filter_x = MovingWindowFilter(
window_size=self._window_size)
self.moving_window_filter_y = MovingWindowFilter(
window_size=self._window_size)
self.moving_window_filter_z = MovingWindowFilter(
window_size=self._window_size)
self._estimated_velocity = np.zeros(3)
self._last_timestamp = 0
def reset(self):
self.filter.x = np.zeros(3)
self.filter.P = np.eye(3) * self._initial_variance
self.moving_window_filter_x = MovingWindowFilter(
window_size=self._window_size)
self.moving_window_filter_y = MovingWindowFilter(
window_size=self._window_size)
self.moving_window_filter_z = MovingWindowFilter(
window_size=self._window_size)
self._last_timestamp = 0
def _compute_delta_time(self, current_time):
if self._last_timestamp == 0.:
# First timestamp received, return an estimated delta_time.
delta_time_s = self.robot.time_step
else:
delta_time_s = current_time - self._last_timestamp
self._last_timestamp = current_time
return delta_time_s
def update(self, current_time):
"""Propagate current state estimate with new accelerometer reading."""
delta_time_s = self._compute_delta_time(current_time)
sensor_acc = self.robot.GetBaseAcceleration()
base_orientation = self.robot.GetBaseOrientation()
rot_mat = self.robot.pybullet_client.getMatrixFromQuaternion(
base_orientation)
rot_mat = np.array(rot_mat).reshape((3, 3))
calibrated_acc = rot_mat.dot(sensor_acc) + np.array([0., 0., -9.8])
self.filter.predict(u=calibrated_acc * delta_time_s)
# Correct estimation using contact legs
observed_velocities = []
foot_contact = self.robot.GetFootContacts()
for leg_id in range(4):
if foot_contact[leg_id]:
jacobian = self.robot.ComputeJacobian(leg_id)
# Only pick the jacobian related to joint motors
joint_velocities = self.robot.GetMotorVelocities()[leg_id *
3:(leg_id + 1) * 3]
leg_velocity_in_base_frame = jacobian.dot(joint_velocities)
base_velocity_in_base_frame = -leg_velocity_in_base_frame[:3]
observed_velocities.append(rot_mat.dot(base_velocity_in_base_frame))
if observed_velocities:
observed_velocities = np.mean(observed_velocities, axis=0)
self.filter.update(observed_velocities)
vel_x = self.moving_window_filter_x.calculate_average(self.filter.x[0])
vel_y = self.moving_window_filter_y.calculate_average(self.filter.x[1])
vel_z = self.moving_window_filter_z.calculate_average(self.filter.x[2])
self._estimated_velocity = np.array([vel_x, vel_y, vel_z])
@property
def estimated_velocity(self):
return self._estimated_velocity.copy()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/minitaur.py | motion_imitation/robots/minitaur.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the functionalities of a minitaur using pybullet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import collections
import copy
import math
import re
import numpy as np
from motion_imitation.robots import minitaur_constants
from motion_imitation.robots import minitaur_motor
from motion_imitation.robots import robot_config
from motion_imitation.robots import action_filter
from motion_imitation.robots import kinematics
KNEE_CONSTRAINT_POINT_RIGHT = [0, 0.005, 0.2]
KNEE_CONSTRAINT_POINT_LEFT = [0, 0.01, 0.2]
OVERHEAT_SHUTDOWN_TORQUE = 2.45
OVERHEAT_SHUTDOWN_TIME = 1.0
LEG_POSITION = ["front_left", "back_left", "front_right", "back_right"]
MOTOR_NAMES = [
"motor_front_leftL_joint", "motor_front_leftR_joint",
"motor_back_leftL_joint", "motor_back_leftR_joint",
"motor_front_rightL_joint", "motor_front_rightR_joint",
"motor_back_rightL_joint", "motor_back_rightR_joint"
]
_CHASSIS_NAME_PATTERN = re.compile(r"chassis\D*center")
_MOTOR_NAME_PATTERN = re.compile(r"motor\D*joint")
_KNEE_NAME_PATTERN = re.compile(r"knee\D*")
_BRACKET_NAME_PATTERN = re.compile(r"motor\D*_bracket_joint")
_LEG_NAME_PATTERN1 = re.compile(r"hip\D*joint")
_LEG_NAME_PATTERN2 = re.compile(r"hip\D*link")
_LEG_NAME_PATTERN3 = re.compile(r"motor\D*link")
SENSOR_NOISE_STDDEV = (0.0,) * 6
MINITAUR_DEFAULT_MOTOR_DIRECTIONS = (-1, -1, -1, -1, 1, 1, 1, 1)
MINITAUR_DEFAULT_MOTOR_OFFSETS = (0, 0, 0, 0, 0, 0, 0, 0)
MINITAUR_NUM_MOTORS = 8
TWO_PI = 2 * math.pi
MINITAUR_DOFS_PER_LEG = 2
_UNIT_QUATERNION = (0, 0, 0, 1)
_GRAVITY_ACCELERATION_OFFSET = (0, 0, 10)
def MapToMinusPiToPi(angles):
"""Maps a list of angles to [-pi, pi].
Args:
angles: A list of angles in rad.
Returns:
A list of angle mapped to [-pi, pi].
"""
mapped_angles = copy.deepcopy(angles)
for i in range(len(angles)):
mapped_angles[i] = math.fmod(angles[i], TWO_PI)
if mapped_angles[i] >= math.pi:
mapped_angles[i] -= TWO_PI
elif mapped_angles[i] < -math.pi:
mapped_angles[i] += TWO_PI
return mapped_angles
class Minitaur(object):
"""The minitaur class that simulates a quadruped robot from Ghost Robotics."""
INIT_POSITION = [0, 0, .2]
INIT_RACK_POSITION = [0, 0, 1]
INIT_ORIENTATION = [0, 0, 0, 1]
def __init__(self,
pybullet_client,
num_motors=MINITAUR_NUM_MOTORS,
dofs_per_leg=MINITAUR_DOFS_PER_LEG,
time_step=0.01,
action_repeat=1,
self_collision_enabled=False,
motor_control_mode=robot_config.MotorControlMode.POSITION,
motor_model_class=minitaur_motor.MotorModel,
motor_kp=1.0,
motor_kd=0.02,
motor_torque_limits=None,
pd_latency=0.0,
control_latency=0.0,
observation_noise_stdev=SENSOR_NOISE_STDDEV,
motor_overheat_protection=False,
motor_direction=MINITAUR_DEFAULT_MOTOR_DIRECTIONS,
motor_offset=MINITAUR_DEFAULT_MOTOR_OFFSETS,
on_rack=False,
reset_at_current_position=False,
reset_func_name="_PybulletReset",
sensors=None,
enable_action_interpolation=False,
enable_action_filter=False,
reset_time=-1):
"""Constructs a minitaur and reset it to the initial states.
Args:
pybullet_client: The instance of BulletClient to manage different
simulations.
num_motors: The number of the motors on the robot.
dofs_per_leg: The number of degrees of freedom for each leg.
time_step: The time step of the simulation.
action_repeat: The number of ApplyAction() for each control step.
self_collision_enabled: Whether to enable self collision.
motor_control_mode: Enum. Can either be POSITION, TORQUE, or HYBRID.
motor_model_class: We can choose from simple pd model to more accurate DC
motor models.
motor_kp: proportional gain for the motors.
motor_kd: derivative gain for the motors.
motor_torque_limits: Torque limits for the motors. Can be a single float
or a list of floats specifying different limits for different robots. If
not provided, the default limit of the robot is used.
pd_latency: The latency of the observations (in seconds) used to calculate
PD control. On the real hardware, it is the latency between the
microcontroller and the motor controller.
control_latency: The latency of the observations (in second) used to
calculate action. On the real hardware, it is the latency from the motor
controller, the microcontroller to the host (Nvidia TX2).
observation_noise_stdev: The standard deviation of a Gaussian noise model
for the sensor. It should be an array for separate sensors in the
following order [motor_angle, motor_velocity, motor_torque,
base_roll_pitch_yaw, base_angular_velocity, base_linear_acceleration]
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more
details.
motor_direction: A list of direction values, either 1 or -1, to compensate
the axis difference of motors between the simulation and the real robot.
motor_offset: A list of offset value for the motor angles. This is used to
compensate the angle difference between the simulation and the real
robot.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
reset_at_current_position: Whether to reset the minitaur at the current
position and orientation. This is for simulating the reset behavior in
the real world.
sensors: a list of sensors that are attached to the robot.
enable_action_interpolation: Whether to interpolate the current action
with the previous action in order to produce smoother motions
enable_action_filter: Boolean specifying if a lowpass filter should be
used to smooth actions.
"""
self.num_motors = num_motors
self.num_legs = self.num_motors // dofs_per_leg
self._pybullet_client = pybullet_client
self._action_repeat = action_repeat
self._self_collision_enabled = self_collision_enabled
self._motor_direction = motor_direction
self._motor_offset = motor_offset
self._observed_motor_torques = np.zeros(self.num_motors)
self._applied_motor_torques = np.zeros(self.num_motors)
self._max_force = 3.5
self._pd_latency = pd_latency
self._control_latency = control_latency
self._observation_noise_stdev = observation_noise_stdev
self._observation_history = collections.deque(maxlen=100)
self._control_observation = []
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._reset_at_current_position = reset_at_current_position
self._reset_func = getattr(self, reset_func_name)
self.SetAllSensors(sensors if sensors is not None else list())
self._is_safe = True
self._enable_action_interpolation = enable_action_interpolation
self._enable_action_filter = enable_action_filter
self._last_action = None
self._velocity = np.zeros((3,))
self._prev_velocity = np.zeros((3,))
self._accelerometer_reading = np.zeros((3,))
self._last_state_time = 0
if not motor_model_class:
raise ValueError("Must provide a motor model class!")
if self._on_rack and self._reset_at_current_position:
raise ValueError("on_rack and reset_at_current_position "
"cannot be enabled together")
if isinstance(motor_kp, (collections.Sequence, np.ndarray)):
self._motor_kps = np.asarray(motor_kp)
else:
self._motor_kps = np.full(num_motors, motor_kp)
if isinstance(motor_kd, (collections.Sequence, np.ndarray)):
self._motor_kds = np.asarray(motor_kd)
else:
self._motor_kds = np.full(num_motors, motor_kd)
if isinstance(motor_torque_limits, (collections.Sequence, np.ndarray)):
self._motor_torque_limits = np.asarray(motor_torque_limits)
elif motor_torque_limits is None:
self._motor_torque_limits = None
else:
self._motor_torque_limits = motor_torque_limits
self._motor_control_mode = motor_control_mode
self._motor_model = motor_model_class(
kp=motor_kp,
kd=motor_kd,
torque_limits=self._motor_torque_limits,
motor_control_mode=motor_control_mode)
self.time_step = time_step
self._step_counter = 0
# This also includes the time spent during the Reset motion.
self._state_action_counter = 0
_, self._init_orientation_inv = self._pybullet_client.invertTransform(
position=[0, 0, 0], orientation=self._GetDefaultInitOrientation())
if self._enable_action_filter:
self._action_filter = self._BuildActionFilter()
# reset_time=-1.0 means skipping the reset motion.
# See Reset for more details.
self.Reset(reset_time=reset_time)
self.ReceiveObservation()
def GetTimeSinceReset(self):
return self._step_counter * self.time_step
def _StepInternal(self, action, motor_control_mode):
self.ApplyAction(action, motor_control_mode)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
self._state_action_counter += 1
def Step(self, action, control_mode=None):
"""Steps simulation."""
if self._enable_action_filter:
action = self._FilterAction(action)
if control_mode==None:
control_mode = self._motor_control_mode
for i in range(self._action_repeat):
proc_action = self.ProcessAction(action, i)
self._StepInternal(proc_action, control_mode)
self._step_counter += 1
self._last_action = action
def Terminate(self):
pass
def GetFootLinkIDs(self):
"""Get list of IDs for all foot links."""
return self._foot_link_ids
def _RecordMassInfoFromURDF(self):
"""Records the mass information from the URDF file."""
self._base_mass_urdf = []
for chassis_id in self._chassis_link_ids:
self._base_mass_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, chassis_id)[0])
self._leg_masses_urdf = []
for leg_id in self._leg_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, leg_id)[0])
for motor_id in self._motor_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped, motor_id)[0])
def _RecordInertiaInfoFromURDF(self):
"""Record the inertia of each body from URDF file."""
self._link_urdf = []
num_bodies = self._pybullet_client.getNumJoints(self.quadruped)
for body_id in range(-1, num_bodies): # -1 is for the base link.
inertia = self._pybullet_client.getDynamicsInfo(self.quadruped,
body_id)[2]
self._link_urdf.append(inertia)
# We need to use id+1 to index self._link_urdf because it has the base
# (index = -1) at the first element.
self._base_inertia_urdf = [
self._link_urdf[chassis_id + 1]
for chassis_id in self._chassis_link_ids
]
self._leg_inertia_urdf = [
self._link_urdf[leg_id + 1] for leg_id in self._leg_link_ids
]
self._leg_inertia_urdf.extend(
[self._link_urdf[motor_id + 1] for motor_id in self._motor_link_ids])
def _BuildJointNameToIdDict(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode("UTF-8")] = joint_info[0]
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
# The self._leg_link_ids include both the upper and lower links of the leg.
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
self._bracket_link_ids = []
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if _CHASSIS_NAME_PATTERN.match(joint_name):
self._chassis_link_ids.append(joint_id)
elif _BRACKET_NAME_PATTERN.match(joint_name):
self._bracket_link_ids.append(joint_id)
elif _MOTOR_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
elif _KNEE_NAME_PATTERN.match(joint_name):
self._foot_link_ids.append(joint_id)
elif (_LEG_NAME_PATTERN1.match(joint_name)
or _LEG_NAME_PATTERN2.match(joint_name)
or _LEG_NAME_PATTERN3.match(joint_name)):
self._leg_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._foot_link_ids)
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
self._bracket_link_ids.sort()
def _RemoveDefaultJointDamping(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._pybullet_client.changeDynamics(joint_info[0],
-1,
linearDamping=0,
angularDamping=0)
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name]
for motor_name in self._GetMotorNames()
]
def _CreateRackConstraint(self, init_position, init_orientation):
"""Create a constraint that keeps the chassis at a fixed frame.
This frame is defined by init_position and init_orientation.
Args:
init_position: initial position of the fixed frame.
init_orientation: initial orientation of the fixed frame in quaternion
format [x,y,z,w].
Returns:
Return the constraint id.
"""
fixed_constraint = self._pybullet_client.createConstraint(
parentBodyUniqueId=self.quadruped,
parentLinkIndex=-1,
childBodyUniqueId=-1,
childLinkIndex=-1,
jointType=self._pybullet_client.JOINT_FIXED,
jointAxis=[0, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=init_position,
childFrameOrientation=init_orientation)
return fixed_constraint
def IsObservationValid(self):
"""Whether the observation is valid for the current time step.
In simulation, observations are always valid. In real hardware, it may not
be valid from time to time when communication error happens between the
Nvidia TX2 and the microcontroller.
Returns:
Whether the observation is valid for the current time step.
"""
return True
def _PybulletReset(self, default_motor_angles, reset_time):
self._pybullet_client.resetBasePositionAndOrientation(
self.quadruped, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
self._pybullet_client.resetBaseVelocity(self.quadruped, [0, 0, 0],
[0, 0, 0])
self.ResetPose(add_constraint=False)
self._SettleDownForReset(default_motor_angles, reset_time)
def _SafeJointsReset(self, default_motor_angles=None, reset_time=None):
"""Moves joints within bounds."""
del default_motor_angles
del reset_time
self._is_safe = True
self.ReceiveObservation()
target_angles = np.clip(self.GetMotorAngles(),
self._joint_angle_lower_limits + 0.1,
self._joint_angle_upper_limits - 0.1)
upper = self._joint_angle_upper_limits - 0.03
lower = self._joint_angle_lower_limits + 0.03
def _AreAnglesInvalid():
return (any(self.GetTrueMotorAngles() > upper) or
any(self.GetTrueMotorAngles() < lower))
max_steps = 100
steps = 0
while _AreAnglesInvalid() and self._is_safe and steps < max_steps:
# In A1._ValidateMotorStates, invalid torques and velocities will be
# caught but invalid angles ignored because we are currently resetting.
self.Step(target_angles, robot_config.MotorControlMode.POSITION)
steps += 1
def Reset(self, reload_urdf=True, default_motor_angles=None, reset_time=3.0):
"""Reset the minitaur to its initial states.
Args:
reload_urdf: Whether to reload the urdf file. If not, Reset() just place
the minitaur back to its starting position.
default_motor_angles: The default motor angles. If it is None, minitaur
will hold a default pose (motor angle math.pi / 2) for 100 steps. In
torque control mode, the phase of holding the default pose is skipped.
reset_time: The duration (in seconds) to hold the default motor angles. If
reset_time <= 0 or in torque control mode, the phase of holding the
default pose is skipped.
"""
if reload_urdf:
self._LoadRobotURDF()
if self._on_rack:
self.rack_constraint = (self._CreateRackConstraint(
self._GetDefaultInitPosition(), self._GetDefaultInitOrientation()))
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
self._RemoveDefaultJointDamping()
self._BuildMotorIdList()
self._RecordMassInfoFromURDF()
self._RecordInertiaInfoFromURDF()
self.ResetPose(add_constraint=True)
else:
self._reset_func(default_motor_angles, reset_time)
self._overheat_counter = np.zeros(self.num_motors)
self._motor_enabled_list = [True] * self.num_motors
self._observation_history.clear()
self._step_counter = 0
self._state_action_counter = 0
self._is_safe = True
self._last_action = None
self.ReceiveObservation()
if self._enable_action_filter:
self._ResetActionFilter()
self._position_at_reset, self._quat_at_reset = (
self._pybullet_client.getBasePositionAndOrientation(self.quadruped))
self._velocity = np.zeros((3,))
self._prev_velocity = np.zeros((3,))
self._accelerometer_reading = np.zeros((3,))
self._last_state_time = 0
def RelativeTransformSinceReset(self):
"""Returns relative xyz, rpy from robot's position at reset."""
current_pos, current_quat = (
self._pybullet_client.getBasePositionAndOrientation(self.quadruped))
pos, quat = self._pybullet_client.multiplyTransforms(
*self._pybullet_client.invertTransform(self._position_at_reset,
self._quat_at_reset),
current_pos,
current_quat,
)
return pos, self._pybullet_client.getEulerFromQuaternion(quat)
def _LoadRobotURDF(self):
"""Loads the URDF file for the robot."""
urdf_file = self.GetURDFFile()
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
urdf_file,
self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation(),
flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(
urdf_file, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
def _SettleDownForReset(self, default_motor_angles, reset_time):
"""Sets the default motor angles and waits for the robot to settle down.
The reset is skipped is reset_time is less than zereo.
Args:
default_motor_angles: A list of motor angles that the robot will achieve
at the end of the reset phase.
reset_time: The time duration for the reset phase.
"""
if reset_time <= 0:
return
# Important to fill the observation buffer.
self.ReceiveObservation()
for _ in range(100):
self._StepInternal(
[math.pi / 2] * self.num_motors,
motor_control_mode=robot_config.MotorControlMode.POSITION)
# Don't continue to reset if a safety error has occurred.
if not self._is_safe:
return
if default_motor_angles is None:
return
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self._StepInternal(
default_motor_angles,
motor_control_mode=robot_config.MotorControlMode.POSITION)
# Don't continue to reset if a safety error has occurred.
if not self._is_safe:
return
def _SetMotorTorqueById(self, motor_id, torque):
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=motor_id,
controlMode=self._pybullet_client.TORQUE_CONTROL,
force=torque)
def _SetMotorTorqueByIds(self, motor_ids, torques):
self._pybullet_client.setJointMotorControlArray(
bodyIndex=self.quadruped,
jointIndices=motor_ids,
controlMode=self._pybullet_client.TORQUE_CONTROL,
forces=torques)
def _SetDesiredMotorAngleByName(self, motor_name, desired_angle):
self._SetDesiredMotorAngleById(self._joint_name_to_id[motor_name],
desired_angle)
def GetURDFFile(self):
return "quadruped/minitaur.urdf"
def ResetPose(self, add_constraint):
"""Reset the pose of the minitaur.
Args:
add_constraint: Whether to add a constraint at the joints of two feet.
"""
for i in range(self.num_legs):
self._ResetPoseForLeg(i, add_constraint)
def _ResetPoseForLeg(self, leg_id, add_constraint):
"""Reset the initial pose for the leg.
Args:
leg_id: It should be 0, 1, 2, or 3, which represents the leg at
front_left, back_left, front_right and back_right.
add_constraint: Whether to add a constraint at the joints of two feet.
"""
knee_friction_force = 0
half_pi = math.pi / 2.0
knee_angle = -2.1834
leg_position = LEG_POSITION[leg_id]
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["motor_" + leg_position + "L_joint"],
self._motor_direction[2 * leg_id] * half_pi,
targetVelocity=0)
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["knee_" + leg_position + "L_link"],
self._motor_direction[2 * leg_id] * knee_angle,
targetVelocity=0)
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["motor_" + leg_position + "R_joint"],
self._motor_direction[2 * leg_id + 1] * half_pi,
targetVelocity=0)
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["knee_" + leg_position + "R_link"],
self._motor_direction[2 * leg_id + 1] * knee_angle,
targetVelocity=0)
if add_constraint:
self._pybullet_client.createConstraint(
self.quadruped,
self._joint_name_to_id["knee_" + leg_position + "R_link"],
self.quadruped,
self._joint_name_to_id["knee_" + leg_position + "L_link"],
self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0],
KNEE_CONSTRAINT_POINT_RIGHT, KNEE_CONSTRAINT_POINT_LEFT)
# Disable the default motor in pybullet.
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position +
"L_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position +
"R_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["knee_" + leg_position + "L_link"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["knee_" + leg_position + "R_link"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
def GetBasePosition(self):
"""Get the position of minitaur's base.
Returns:
The position of minitaur's base.
"""
return self._base_position
def GetBaseVelocity(self):
"""Get the linear velocity of minitaur's base.
Returns:
The velocity of minitaur's base.
"""
return np.array(self._velocity)
def GetTrueBaseRollPitchYaw(self):
"""Get minitaur's base orientation in euler angle in the world frame.
Returns:
A tuple (roll, pitch, yaw) of the base in world frame.
"""
orientation = self.GetTrueBaseOrientation()
roll_pitch_yaw = self._pybullet_client.getEulerFromQuaternion(orientation)
return np.asarray(roll_pitch_yaw)
def GetBaseRollPitchYaw(self):
"""Get minitaur's base orientation in euler angle in the world frame.
This function mimicks the noisy sensor reading and adds latency.
Returns:
A tuple (roll, pitch, yaw) of the base in world frame polluted by noise
and latency.
"""
delayed_orientation = np.array(
self._control_observation[3 * self.num_motors:3 * self.num_motors + 4])
delayed_roll_pitch_yaw = self._pybullet_client.getEulerFromQuaternion(
delayed_orientation)
roll_pitch_yaw = self._AddSensorNoise(np.array(delayed_roll_pitch_yaw),
self._observation_noise_stdev[3])
return roll_pitch_yaw
def GetBaseAcceleration(self):
"""Get robot's robot-relative (x, y, z) acceleration in m/s^2."""
return self._AddSensorNoise(self._accelerometer_reading,
self._observation_noise_stdev[5])
def GetHipPositionsInBaseFrame(self):
"""Get the hip joint positions of the robot within its base frame."""
raise NotImplementedError("Not implemented for Minitaur.")
def ComputeMotorAnglesFromFootLocalPosition(self, leg_id,
foot_local_position):
"""Use IK to compute the motor angles, given the foot link's local position.
Args:
leg_id: The leg index.
foot_local_position: The foot link's position in the base frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
assert len(self._foot_link_ids) == self.num_legs
toe_id = self._foot_link_ids[leg_id]
motors_per_leg = self.num_motors // self.num_legs
joint_position_idxs = list(
range(leg_id * motors_per_leg,
leg_id * motors_per_leg + motors_per_leg))
joint_angles = kinematics.joint_angles_from_link_position(
robot=self,
link_position=foot_local_position,
link_id=toe_id,
joint_ids=joint_position_idxs,
)
# Joint offset is necessary for Laikago.
joint_angles = np.multiply(
np.asarray(joint_angles) -
np.asarray(self._motor_offset)[joint_position_idxs],
self._motor_direction[joint_position_idxs])
# Return the joing index (the same as when calling GetMotorAngles) as well
# as the angles.
return joint_position_idxs, joint_angles.tolist()
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Does not work for Minitaur which has the four bar mechanism for now.
assert len(self._foot_link_ids) == self.num_legs
full_jacobian = kinematics.compute_jacobian(
robot=self,
link_id=self._foot_link_ids[leg_id],
)
motors_per_leg = self.num_motors // self.num_legs
com_dof = 6
return full_jacobian[com_dof + leg_id * motors_per_leg:com_dof +
(leg_id + 1) * motors_per_leg]
def MapContactForceToJointTorques(self, leg_id, contact_force):
"""Maps the foot contact force to the leg joint torques."""
jv = self.ComputeJacobian(leg_id)
motor_torques_list = np.matmul(contact_force, jv)
motor_torques_dict = {}
motors_per_leg = self.num_motors // self.num_legs
for torque_id, joint_id in enumerate(
range(leg_id * motors_per_leg, (leg_id + 1) * motors_per_leg)):
motor_torques_dict[joint_id] = motor_torques_list[torque_id]
return motor_torques_dict
def GetFootContacts(self):
"""Get minitaur's foot contact situation with the ground.
Returns:
A list of 4 booleans. The ith boolean is True if leg i is in contact with
ground.
"""
contacts = []
for leg_idx in range(MINITAUR_NUM_MOTORS // 2):
link_id_1 = self._foot_link_ids[leg_idx * 2]
link_id_2 = self._foot_link_ids[leg_idx * 2 + 1]
contact_1 = bool(
self._pybullet_client.getContactPoints(bodyA=0,
bodyB=self.quadruped,
linkIndexA=-1,
linkIndexB=link_id_1))
contact_2 = bool(
self._pybullet_client.getContactPoints(bodyA=0,
bodyB=self.quadruped,
linkIndexA=-1,
linkIndexB=link_id_2))
contacts.append(contact_1 or contact_2)
return contacts
def GetFootPositionsInBaseFrame(self):
"""Get the robot's foot position in the base frame."""
assert len(self._foot_link_ids) == self.num_legs
foot_positions = []
for foot_id in self.GetFootLinkIDs():
foot_positions.append(
kinematics.link_position_in_base_frame(
robot=self,
link_id=foot_id,
))
return np.array(foot_positions)
def GetTrueMotorAngles(self):
"""Gets the eight motor angles at the current moment, mapped to [-pi, pi].
Returns:
Motor angles, mapped to [-pi, pi].
"""
motor_angles = [state[0] for state in self._joint_states]
motor_angles = np.multiply(
np.asarray(motor_angles) - np.asarray(self._motor_offset),
self._motor_direction)
return motor_angles
def GetMotorAngles(self):
"""Gets the eight motor angles.
This function mimicks the noisy sensor reading and adds latency. The motor
angles that are delayed, noise polluted, and mapped to [-pi, pi].
Returns:
Motor angles polluted by noise and latency, mapped to [-pi, pi].
"""
motor_angles = self._AddSensorNoise(
np.array(self._control_observation[0:self.num_motors]),
self._observation_noise_stdev[0])
return MapToMinusPiToPi(motor_angles)
def GetTrueMotorVelocities(self):
"""Get the velocity of all eight motors.
Returns:
Velocities of all eight motors.
"""
motor_velocities = [state[1] for state in self._joint_states]
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | true |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/laikago.py | motion_imitation/robots/laikago.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
"""Pybullet simulation of a Laikago robot."""
import math
import os
import re
import numpy as np
import pybullet as pyb # pytype: disable=import-error
from motion_imitation.robots import laikago_pose_utils
from motion_imitation.robots import laikago_constants
from motion_imitation.robots import laikago_motor
from motion_imitation.robots import minitaur
from motion_imitation.robots import robot_config
from motion_imitation.envs import locomotion_gym_config
NUM_MOTORS = 12
NUM_LEGS = 4
MOTOR_NAMES = [
"FR_hip_motor_2_chassis_joint",
"FR_upper_leg_2_hip_motor_joint",
"FR_lower_leg_2_upper_leg_joint",
"FL_hip_motor_2_chassis_joint",
"FL_upper_leg_2_hip_motor_joint",
"FL_lower_leg_2_upper_leg_joint",
"RR_hip_motor_2_chassis_joint",
"RR_upper_leg_2_hip_motor_joint",
"RR_lower_leg_2_upper_leg_joint",
"RL_hip_motor_2_chassis_joint",
"RL_upper_leg_2_hip_motor_joint",
"RL_lower_leg_2_upper_leg_joint",
]
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.48]
JOINT_DIRECTIONS = np.array([-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1])
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = -0.6
KNEE_JOINT_OFFSET = 0.66
DOFS_PER_LEG = 3
JOINT_OFFSETS = np.array(
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)
PI = math.pi
MAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.2
_DEFAULT_HIP_POSITIONS = (
(0.21, -0.1157, 0),
(0.21, 0.1157, 0),
(-0.21, -0.1157, 0),
(-0.21, 0.1157, 0),
)
ABDUCTION_P_GAIN = 220.0
ABDUCTION_D_GAIN = 0.3
HIP_P_GAIN = 220.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 220.0
KNEE_D_GAIN = 2.0
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([
laikago_pose_utils.LAIKAGO_DEFAULT_ABDUCTION_ANGLE,
laikago_pose_utils.LAIKAGO_DEFAULT_HIP_ANGLE,
laikago_pose_utils.LAIKAGO_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
_CHASSIS_NAME_PATTERN = re.compile(r"\w+_chassis_\w+")
_MOTOR_NAME_PATTERN = re.compile(r"\w+_hip_motor_\w+")
_KNEE_NAME_PATTERN = re.compile(r"\w+_lower_leg_\w+")
_TOE_NAME_PATTERN = re.compile(r"jtoe\d*")
URDF_FILENAME = "laikago/laikago_toes_limits.urdf"
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
UPPER_BOUND = 6.28318548203
LOWER_BOUND = -6.28318548203
class Laikago(minitaur.Minitaur):
"""A simulation for the Laikago robot."""
MPC_BODY_MASS = 215/9.8
MPC_BODY_INERTIA = (0.07335, 0, 0, 0, 0.25068, 0, 0, 0, 0.25447)
MPC_BODY_HEIGHT = 0.42
ACTION_CONFIG = [
locomotion_gym_config.ScalarField(name="motor_angle_0",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_1",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_2",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_3",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_4",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_5",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_6",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_7",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_8",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_9",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_10",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND),
locomotion_gym_config.ScalarField(name="motor_angle_11",
upper_bound=UPPER_BOUND,
lower_bound=LOWER_BOUND)
]
def __init__(
self,
pybullet_client,
motor_control_mode,
urdf_filename=URDF_FILENAME,
enable_clip_motor_commands=False,
time_step=0.001,
action_repeat=33,
sensors=None,
control_latency=0.002,
on_rack=False,
enable_action_interpolation=True,
enable_action_filter=False,
reset_time=-1,
allow_knee_contact=False,
):
self._urdf_filename = urdf_filename
self._allow_knee_contact = allow_knee_contact
self._enable_clip_motor_commands = enable_clip_motor_commands
motor_kp = [
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN
]
motor_kd = [
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN
]
super(Laikago, self).__init__(
pybullet_client=pybullet_client,
time_step=time_step,
action_repeat=action_repeat,
num_motors=NUM_MOTORS,
dofs_per_leg=DOFS_PER_LEG,
motor_direction=JOINT_DIRECTIONS,
motor_offset=JOINT_OFFSETS,
motor_overheat_protection=False,
motor_control_mode=motor_control_mode,
motor_model_class=laikago_motor.LaikagoMotorModel,
sensors=sensors,
motor_kp=motor_kp,
motor_kd=motor_kd,
control_latency=control_latency,
on_rack=on_rack,
enable_action_interpolation=enable_action_interpolation,
enable_action_filter=enable_action_filter,
reset_time=reset_time)
def _LoadRobotURDF(self):
laikago_urdf_path = self.GetURDFFile()
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
laikago_urdf_path,
self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation(),
flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(
laikago_urdf_path, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
def _SettleDownForReset(self, default_motor_angles, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=robot_config.MotorControlMode.POSITION)
if default_motor_angles is not None:
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self._StepInternal(
default_motor_angles,
motor_control_mode=robot_config.MotorControlMode.POSITION)
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetFootContacts(self):
all_contacts = self._pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Because of the default rotation in the Laikago URDF, we need to reorder
# the rows in the Jacobian matrix.
return super(Laikago, self).ComputeJacobian(leg_id)[(2, 0, 1), :]
def ResetPose(self, add_constraint):
del add_constraint
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_motor_2_chassis_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_leg_2_hip_motor_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_leg_2_upper_leg_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id[name],
angle,
targetVelocity=0)
def GetURDFFile(self):
return self._urdf_filename
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._knee_link_ids = []
self._foot_link_ids = []
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if _CHASSIS_NAME_PATTERN.match(joint_name):
self._chassis_link_ids.append(joint_id)
elif _MOTOR_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif _KNEE_NAME_PATTERN.match(joint_name):
self._knee_link_ids.append(joint_id)
elif _TOE_NAME_PATTERN.match(joint_name):
self._foot_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._knee_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
if self._allow_knee_contact:
self._foot_link_ids.extend(self._knee_link_ids)
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
def _GetMotorNames(self):
return MOTOR_NAMES
def _GetDefaultInitPosition(self):
if self._on_rack:
return INIT_RACK_POSITION
else:
return INIT_POSITION
def _GetDefaultInitOrientation(self):
# The Laikago URDF assumes the initial pose of heading towards z axis,
# and belly towards y axis. The following transformation is to transform
# the Laikago initial orientation to our commonly used orientation: heading
# towards -x direction, and z axis is the up direction.
init_orientation = pyb.getQuaternionFromEuler(
[math.pi / 2.0, 0, math.pi / 2.0])
return init_orientation
def GetDefaultInitPosition(self):
"""Get default initial base position."""
return self._GetDefaultInitPosition()
def GetDefaultInitOrientation(self):
"""Get default initial base orientation."""
return self._GetDefaultInitOrientation()
def GetDefaultInitJointPose(self):
"""Get default initial joint pose."""
joint_pose = (INIT_MOTOR_ANGLES + JOINT_OFFSETS) * JOINT_DIRECTIONS
return joint_pose
def ApplyAction(self, motor_commands, motor_control_mode):
"""Clips and then apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).N
motor_control_mode: A MotorControlMode enum.
"""
if self._enable_clip_motor_commands:
motor_commands = self._ClipMotorCommands(motor_commands)
super(Laikago, self).ApplyAction(motor_commands, motor_control_mode)
def _ClipMotorCommands(self, motor_commands):
"""Clips motor commands.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).
Returns:
Clipped motor commands.
"""
# clamp the motor command by the joint limit, in case weired things happens
max_angle_change = MAX_MOTOR_ANGLE_CHANGE_PER_STEP
current_motor_angles = self.GetMotorAngles()
motor_commands = np.clip(motor_commands,
current_motor_angles - max_angle_change,
current_motor_angles + max_angle_change)
return motor_commands
@classmethod
def GetConstants(cls):
del cls
return laikago_constants
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/a1_robot.py | motion_imitation/robots/a1_robot.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: disable=attribute-error
"""Real robot interface of A1 robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import logging
import math
import re
import multiprocessing
import numpy as np
import time
from motion_imitation.robots import laikago_pose_utils
from motion_imitation.robots import a1
from motion_imitation.robots import minitaur
from motion_imitation.robots import robot_config
from motion_imitation.envs import locomotion_gym_config
# from robot_interface import RobotInterface # pytype: disable=import-error
NUM_MOTORS = 12
NUM_LEGS = 4
MOTOR_NAMES = [
"FR_hip_joint",
"FR_upper_joint",
"FR_lower_joint",
"FL_hip_joint",
"FL_upper_joint",
"FL_lower_joint",
"RR_hip_joint",
"RR_upper_joint",
"RR_lower_joint",
"RL_hip_joint",
"RL_upper_joint",
"RL_lower_joint",
]
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.48]
JOINT_DIRECTIONS = np.ones(12)
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0.0
KNEE_JOINT_OFFSET = 0.0
DOFS_PER_LEG = 3
JOINT_OFFSETS = np.array(
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)
PI = math.pi
_DEFAULT_HIP_POSITIONS = (
(0.17, -0.135, 0),
(0.17, 0.13, 0),
(-0.195, -0.135, 0),
(-0.195, 0.13, 0),
)
ABDUCTION_P_GAIN = 100.0
ABDUCTION_D_GAIN = 1.0
HIP_P_GAIN = 100.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 100.0
KNEE_D_GAIN = 2.0
MOTOR_KPS = [ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN] * 4
MOTOR_KDS = [ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN] * 4
# If any motor is above this temperature (Celsius), a warning will be printed.
# At 60C, Unitree will shut down a motor until it cools off.
MOTOR_WARN_TEMP_C = 50.0
COMMAND_CHANNEL_NAME = 'LCM_Low_Cmd'
STATE_CHANNEL_NAME = 'LCM_Low_State'
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([
laikago_pose_utils.LAIKAGO_DEFAULT_ABDUCTION_ANGLE,
laikago_pose_utils.LAIKAGO_DEFAULT_HIP_ANGLE,
laikago_pose_utils.LAIKAGO_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
HIP_NAME_PATTERN = re.compile(r"\w+_hip_\w+")
UPPER_NAME_PATTERN = re.compile(r"\w+_upper_\w+")
LOWER_NAME_PATTERN = re.compile(r"\w+_lower_\w+")
TOE_NAME_PATTERN = re.compile(r"\w+_toe\d*")
IMU_NAME_PATTERN = re.compile(r"imu\d*")
URDF_FILENAME = "a1/a1.urdf"
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
class A1Robot(a1.A1):
"""Interface for real A1 robot."""
MPC_BODY_MASS = 108 / 9.8
MPC_BODY_INERTIA = np.array((0.24, 0, 0, 0, 0.80, 0, 0, 0, 1.00))
MPC_BODY_HEIGHT = 0.24
ACTION_CONFIG = [
locomotion_gym_config.ScalarField(name="FR_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="FR_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="FR_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="FL_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="FL_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="FL_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="RR_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="RR_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="RR_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="RL_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="RL_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="RL_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
]
# Strictly enforce joint limits on the real robot, for safety.
JOINT_EPSILON = 0.0
def __init__(self,
pybullet_client,
time_step=0.001,
enable_clip_motor_commands=True,
reset_func_name='_StandupReset',
**kwargs):
"""Initializes the robot class."""
# Initialize pd gain vector
self._pybullet_client = pybullet_client
self.time_step = time_step
# Robot state variables
self._init_complete = False
self._base_position = np.zeros((3,))
self._base_orientation = None
self._last_position_update_time = time.time()
self._raw_state = None
self._last_raw_state = None
self._motor_angles = np.zeros(12)
self._motor_velocities = np.zeros(12)
self._motor_temperatures = np.zeros(12)
self._joint_states = None
self._last_reset_time = time.time()
# Initiate UDP for robot state and actions
self._robot_interface = RobotInterface()
self._robot_interface.send_command(np.zeros(60, dtype=np.float32))
# Re-entrant lock to ensure one process commands the robot at a time.
self._robot_command_lock = multiprocessing.RLock()
self._pipe = None
self._child_pipe = None
self._hold_process = None
if 'velocity_source' in kwargs:
del kwargs['velocity_source']
super(A1Robot, self).__init__(
pybullet_client,
time_step=time_step,
enable_clip_motor_commands=enable_clip_motor_commands,
velocity_source=a1.VelocitySource.IMU_FOOT_CONTACT,
reset_func_name=reset_func_name,
**kwargs)
self._init_complete = True
def ReceiveObservation(self):
"""Receives observation from robot.
Synchronous ReceiveObservation is not supported in A1,
so changging it to noop instead.
"""
state = self._robot_interface.receive_observation()
self._raw_state = state
# Convert quaternion from wxyz to xyzw, which is default for Pybullet.
q = state.imu.quaternion
self._base_orientation = np.array([q[1], q[2], q[3], q[0]])
self._accelerometer_reading = np.array(state.imu.accelerometer)
self._motor_angles = np.array([motor.q for motor in state.motorState[:12]])
self._motor_velocities = np.array(
[motor.dq for motor in state.motorState[:12]])
self._joint_states = np.array(
list(zip(self._motor_angles, self._motor_velocities)))
self._observed_motor_torques = np.array(
[motor.tauEst for motor in state.motorState[:12]])
self._motor_temperatures = np.array(
[motor.temperature for motor in state.motorState[:12]])
if self._init_complete:
# self._SetRobotStateInSim(self._motor_angles, self._motor_velocities)
self._velocity_estimator.update(state.tick / 1000.)
self._UpdatePosition()
def _CheckMotorTemperatures(self):
if any(self._motor_temperatures > MOTOR_WARN_TEMP_C):
print("WARNING: Motors are getting hot. Temperatures:")
for name, temp in zip(MOTOR_NAMES, self._motor_temperatures.astype(int)):
print(f"{name}: {temp} C")
def _UpdatePosition(self):
now = time.time()
self._base_position += self.GetBaseVelocity() * (now - self._last_position_update_time)
self._last_position_update_time = now
def _SetRobotStateInSim(self, motor_angles, motor_velocities):
self._pybullet_client.resetBasePositionAndOrientation(
self.quadruped, self.GetBasePosition(), self.GetBaseOrientation())
for i, motor_id in enumerate(self._motor_id_list):
self._pybullet_client.resetJointState(self.quadruped, motor_id,
motor_angles[i],
motor_velocities[i])
def GetTrueMotorAngles(self):
return self._motor_angles.copy()
def GetMotorAngles(self):
return minitaur.MapToMinusPiToPi(self._motor_angles).copy()
def GetMotorVelocities(self):
return self._motor_velocities.copy()
def GetBasePosition(self):
return self._base_position.copy()
def GetBaseRollPitchYaw(self):
return self._pybullet_client.getEulerFromQuaternion(self._base_orientation)
def GetTrueBaseRollPitchYaw(self):
return self._pybullet_client.getEulerFromQuaternion(self._base_orientation)
def GetBaseRollPitchYawRate(self):
return self.GetTrueBaseRollPitchYawRate()
def GetTrueBaseRollPitchYawRate(self):
return np.array(self._raw_state.imu.gyroscope).copy()
def GetBaseVelocity(self):
return self._velocity_estimator.estimated_velocity.copy()
def GetFootContacts(self):
return np.array(self._raw_state.footForce) > 20
def GetTimeSinceReset(self):
return time.time() - self._last_reset_time
def GetBaseOrientation(self):
return self._base_orientation.copy()
@property
def motor_velocities(self):
return self._motor_velocities.copy()
@property
def motor_temperatures(self):
return self._motor_temperatures.copy()
def ApplyAction(self, motor_commands, motor_control_mode=None):
"""Clips and then apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).
motor_control_mode: A MotorControlMode enum.
"""
if motor_control_mode is None:
motor_control_mode = self._motor_control_mode
motor_commands = self._ClipMotorCommands(motor_commands, motor_control_mode)
command = np.zeros(60, dtype=np.float32)
if motor_control_mode == robot_config.MotorControlMode.POSITION:
for motor_id in range(NUM_MOTORS):
command[motor_id * 5] = motor_commands[motor_id]
command[motor_id * 5 + 1] = MOTOR_KPS[motor_id]
command[motor_id * 5 + 3] = MOTOR_KDS[motor_id]
elif motor_control_mode == robot_config.MotorControlMode.TORQUE:
for motor_id in range(NUM_MOTORS):
command[motor_id * 5 + 4] = motor_commands[motor_id]
elif motor_control_mode == robot_config.MotorControlMode.HYBRID:
command = np.array(motor_commands, dtype=np.float32)
else:
raise ValueError('Unknown motor control mode for A1 robot: {}.'.format(
motor_control_mode))
with self._robot_command_lock:
self._robot_interface.send_command(command)
def _HoldPose(self, pose, pipe):
"""Continually sends position command `pose` until `pipe` has a message.
This method is intended to be run in its own process by HoldCurrentPose().
"""
# Clear self._hold_process to make ReleasePose() a no-op in this process
# (it must be called in the parent process). This does not affect the parent
# process's self._hold_process.
self._hold_process = None
error = None
with self._robot_command_lock:
while not pipe.poll():
self._Nap()
# If a safety error has been encountered, spin without applying actions
# until signalled to stop. This way self._robot_command_lock is retained
# to avoid another process accidentally commanding the robot.
if error is not None:
continue
try:
self._ValidateMotorStates()
except (robot_config.SafetyError) as e:
error = e
continue
self.ApplyAction(
pose, motor_control_mode=robot_config.MotorControlMode.POSITION)
pipe.send(error)
def HoldCurrentPose(self):
"""Starts a process to continually command the A1's current joint angles.
Calling Step(), Brake(), or ReleasePose() will kill the subprocess and stop
holding the pose. Ending the main python process (for example with a normal
return or ctrl-c) will also kill the subprocess.
"""
if self._hold_process is not None:
return
# Set self._child_pipe to prevent its being garbage collected.
self._pipe, self._child_pipe = multiprocessing.Pipe()
self._hold_process = multiprocessing.Process(
target=self._HoldPose, args=(self.GetMotorAngles(), self._child_pipe))
self._hold_process.start()
def ReleasePose(self):
"""If a subprocess is holding a pose, stops the subprocess."""
if self._hold_process is None:
return
self._pipe.send(None)
self._hold_process.join()
maybe_error = self._pipe.recv()
if maybe_error is not None:
print(maybe_error)
self._is_safe = False
self._pipe.close()
self._child_pipe.close()
self._hold_process.close()
self._pipe = None
self._child_pipe = None
self._hold_process = None
def Step(self, action, control_mode=None):
"""Steps simulation."""
self.ReleasePose()
super().Step(action, control_mode)
self._CheckMotorTemperatures()
def _StandupReset(self, default_motor_angles, reset_time):
if reset_time <= 0:
return
# Stand up in 1.5 seconds, and keep the behavior in this way.
standup_time = 1.5
if not default_motor_angles:
default_motor_angles = a1.INIT_MOTOR_ANGLES
current_motor_angles = self.GetMotorAngles()
for t in np.arange(0, standup_time, self.time_step * self._action_repeat):
blend_ratio = min(t / standup_time, 1)
action = blend_ratio * default_motor_angles + (
1 - blend_ratio) * current_motor_angles
self.Step(action, robot_config.MotorControlMode.POSITION)
def Reset(self, reload_urdf=True, default_motor_angles=None, reset_time=3.0):
"""Reset the robot to default motor angles."""
self._base_position[2] = 0
self._last_position_update_time = time.time()
super(A1Robot, self).Reset(reload_urdf=reload_urdf,
default_motor_angles=default_motor_angles,
reset_time=-1)
self._currently_resetting = True
self._reset_func(default_motor_angles, reset_time)
if self._enable_action_filter:
self._ResetActionFilter()
self._velocity_estimator.reset()
self._state_action_counter = 0
self._step_counter = 0
self._last_reset_time = time.time()
self._currently_resetting = False
self._last_action = None
def Terminate(self):
self.Brake()
self._is_alive = False
def _StepInternal(self, action, motor_control_mode=None):
if self._is_safe:
self.ApplyAction(action, motor_control_mode)
self.ReceiveObservation()
self._state_action_counter += 1
if not self._is_safe:
return
try:
self._ValidateMotorStates()
except(robot_config.SafetyError) as e:
print(e)
if self.running_reset_policy:
# Let the resetter handle retries.
raise e
self._is_safe = False
return
self._Nap()
def _Nap(self):
"""Sleep for the remainder of self.time_step."""
now = time.time()
sleep_time = self.time_step - (now - self._last_step_time_wall)
if self._timesteps is not None:
self._timesteps.append(now - self._last_step_time_wall)
self._last_step_time_wall = now
if sleep_time >= 0:
time.sleep(sleep_time)
def Brake(self):
self.ReleasePose()
self._robot_interface.brake()
self.LogTimesteps()
self._Nap()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/laikago_constants.py | motion_imitation/robots/laikago_constants.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the laikago robot related constants and URDF specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import pybullet as pyb # pytype: disable=import-error
NUM_MOTORS = 12
NUM_LEGS = 4
MOTORS_PER_LEG = 3
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.48]
# Will be default to (0, 0, 0, 1) once the new laikago_toes_zup.urdf checked in.
INIT_ORIENTATION = pyb.getQuaternionFromEuler([math.pi / 2.0, 0, math.pi / 2.0])
# Can be different from the motors, although for laikago they are the same list.
JOINT_NAMES = (
# front right leg
"FR_hip_motor_2_chassis_joint",
"FR_upper_leg_2_hip_motor_joint",
"FR_lower_leg_2_upper_leg_joint",
# front left leg
"FL_hip_motor_2_chassis_joint",
"FL_upper_leg_2_hip_motor_joint",
"FL_lower_leg_2_upper_leg_joint",
# rear right leg
"RR_hip_motor_2_chassis_joint",
"RR_upper_leg_2_hip_motor_joint",
"RR_lower_leg_2_upper_leg_joint",
# rear left leg
"RL_hip_motor_2_chassis_joint",
"RL_upper_leg_2_hip_motor_joint",
"RL_lower_leg_2_upper_leg_joint",
)
INIT_ABDUCTION_ANGLE = 0
INIT_HIP_ANGLE = 0.67
INIT_KNEE_ANGLE = -1.25
# Note this matches the Laikago SDK/control convention, but is different from
# URDF's internal joint angles which needs to be computed using the joint
# offsets and directions. The conversion formula is (sdk_joint_angle + offset) *
# joint direction.
INIT_JOINT_ANGLES = collections.OrderedDict(
zip(JOINT_NAMES,
(INIT_ABDUCTION_ANGLE, INIT_HIP_ANGLE, INIT_KNEE_ANGLE) * NUM_LEGS))
# Used to convert the robot SDK joint angles to URDF joint angles.
JOINT_DIRECTIONS = collections.OrderedDict(
zip(JOINT_NAMES, (-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1)))
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = -0.6
KNEE_JOINT_OFFSET = 0.66
# Used to convert the robot SDK joint angles to URDF joint angles.
JOINT_OFFSETS = collections.OrderedDict(
zip(JOINT_NAMES,
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] *
NUM_LEGS))
LEG_NAMES = (
"front_right",
"front_left",
"rear_right",
"rear_left",
)
LEG_ORDER = (
"front_right",
"front_left",
"back_right",
"back_left",
)
END_EFFECTOR_NAMES = (
"jtoeFR",
"jtoeFL",
"jtoeRR",
"jtoeRL",
)
MOTOR_NAMES = JOINT_NAMES
MOTOR_GROUP = collections.OrderedDict((
(LEG_NAMES[0], JOINT_NAMES[0:3]),
(LEG_NAMES[1], JOINT_NAMES[3:6]),
(LEG_NAMES[2], JOINT_NAMES[6:9]),
(LEG_NAMES[3], JOINT_NAMES[9:12]),
))
# Regulates the joint angle change when in position control mode.
MAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.12
# The hip joint location in the CoM frame.
HIP_POSITIONS = collections.OrderedDict((
(LEG_NAMES[0], (0.21, -0.1157, 0)),
(LEG_NAMES[1], (0.21, 0.1157, 0)),
(LEG_NAMES[2], (-0.21, -0.1157, 0)),
(LEG_NAMES[3], (-0.21, 0.1157, 0)),
))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/minitaur_constants.py | motion_imitation/robots/minitaur_constants.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the minitaur robot related constants and URDF specs."""
LEG_ORDER = ["front_left", "back_left", "front_right", "back_right"]
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/__init__.py | motion_imitation/robots/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/laikago_pose_utils.py | motion_imitation/robots/laikago_pose_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to calculate Laikago's pose and motor angles."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import attr
LAIKAGO_DEFAULT_ABDUCTION_ANGLE = 0
LAIKAGO_DEFAULT_HIP_ANGLE = 0.67
LAIKAGO_DEFAULT_KNEE_ANGLE = -1.25
@attr.s
class LaikagoPose(object):
"""Default pose of the Laikago.
Leg order:
0 -> Front Right.
1 -> Front Left.
2 -> Rear Right.
3 -> Rear Left.
"""
abduction_angle_0 = attr.ib(type=float, default=0)
hip_angle_0 = attr.ib(type=float, default=0)
knee_angle_0 = attr.ib(type=float, default=0)
abduction_angle_1 = attr.ib(type=float, default=0)
hip_angle_1 = attr.ib(type=float, default=0)
knee_angle_1 = attr.ib(type=float, default=0)
abduction_angle_2 = attr.ib(type=float, default=0)
hip_angle_2 = attr.ib(type=float, default=0)
knee_angle_2 = attr.ib(type=float, default=0)
abduction_angle_3 = attr.ib(type=float, default=0)
hip_angle_3 = attr.ib(type=float, default=0)
knee_angle_3 = attr.ib(type=float, default=0)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/kinematics.py | motion_imitation/robots/kinematics.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The inverse kinematic utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import typing
_IDENTITY_ORIENTATION = (0, 0, 0, 1)
def joint_angles_from_link_position(
robot: typing.Any,
link_position: typing.Sequence[float],
link_id: int,
joint_ids: typing.Sequence[int],
base_translation: typing.Sequence[float] = (0, 0, 0),
base_rotation: typing.Sequence[float] = (0, 0, 0, 1)):
"""Uses Inverse Kinematics to calculate joint angles.
Args:
robot: A robot instance.
link_position: The (x, y, z) of the link in the body frame. This local frame
is transformed relative to the COM frame using a given translation and
rotation.
link_id: The link id as returned from loadURDF.
joint_ids: The positional index of the joints. This can be different from
the joint unique ids.
base_translation: Additional base translation.
base_rotation: Additional base rotation.
Returns:
A list of joint angles.
"""
# Projects to local frame.
base_position, base_orientation = robot.GetBasePosition(
), robot.GetBaseOrientation()
base_position, base_orientation = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, base_translation, base_rotation)
# Projects to world space.
world_link_pos, _ = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, link_position, _IDENTITY_ORIENTATION)
ik_solver = 0
all_joint_angles = robot.pybullet_client.calculateInverseKinematics(
robot.quadruped, link_id, world_link_pos, solver=ik_solver)
# Extract the relevant joint angles.
joint_angles = [all_joint_angles[i] for i in joint_ids]
return joint_angles
def link_position_in_base_frame(
robot: typing.Any,
link_id: int,
):
"""Computes the link's local position in the robot frame.
Args:
robot: A robot instance.
link_id: The link to calculate its relative position.
Returns:
The relative position of the link.
"""
base_position, base_orientation = robot.GetBasePosition(
), robot.GetBaseOrientation()
inverse_translation, inverse_rotation = robot.pybullet_client.invertTransform(
base_position, base_orientation)
link_state = robot.pybullet_client.getLinkState(robot.quadruped, link_id)
link_position = link_state[0]
link_local_position, _ = robot.pybullet_client.multiplyTransforms(
inverse_translation, inverse_rotation, link_position, (0, 0, 0, 1))
return np.array(link_local_position)
def compute_jacobian(
robot: typing.Any,
link_id: int,
):
"""Computes the Jacobian matrix for the given link.
Args:
robot: A robot instance.
link_id: The link id as returned from loadURDF.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3].
"""
all_joint_angles = [state[0] for state in robot.joint_states]
zero_vec = [0] * len(all_joint_angles)
jv, _ = robot.pybullet_client.calculateJacobian(robot.quadruped, link_id,
(0, 0, 0), all_joint_angles,
zero_vec, zero_vec)
jacobian = np.array(jv)
return jacobian
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/a1.py | motion_imitation/robots/a1.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pybullet simulation of a Laikago robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import enum
import math
import re
import numba
import numpy as np
import pybullet as pyb # pytype: disable=import-error
import time
from motion_imitation.robots import a1_robot_velocity_estimator
from motion_imitation.robots import laikago_constants
from motion_imitation.robots import laikago_motor
from motion_imitation.robots import minitaur
from motion_imitation.robots import robot_config
from motion_imitation.envs import locomotion_gym_config
NUM_MOTORS = 12
NUM_LEGS = 4
MOTOR_NAMES = [
"FR_hip_joint",
"FR_upper_joint",
"FR_lower_joint",
"FL_hip_joint",
"FL_upper_joint",
"FL_lower_joint",
"RR_hip_joint",
"RR_upper_joint",
"RR_lower_joint",
"RL_hip_joint",
"RL_upper_joint",
"RL_lower_joint",
]
JOINT_DIRECTIONS = np.ones(12)
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0.0
KNEE_JOINT_OFFSET = 0.0
DOFS_PER_LEG = 3
JOINT_OFFSETS = np.array(
[HIP_JOINT_OFFSET, UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)
PI = math.pi
MAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.2
# TODO: Find appropriate limits.
MAX_JOINT_VELOCITY = np.inf # rad/s (was 11)
MAX_TORQUE = 35.5 # N-m
_DEFAULT_HIP_POSITIONS = (
(0.17, -0.135, 0),
(0.17, 0.13, 0),
(-0.195, -0.135, 0),
(-0.195, 0.13, 0),
)
COM_OFFSET = -np.array([0.012731, 0.002186, 0.000515])
HIP_OFFSETS = np.array([[0.183, -0.047, 0.], [0.183, 0.047, 0.],
[-0.183, -0.047, 0.], [-0.183, 0.047, 0.]
]) + COM_OFFSET
ABDUCTION_P_GAIN = 100.0
ABDUCTION_D_GAIN = 1.
HIP_P_GAIN = 100.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 100.0
KNEE_D_GAIN = 2.0
# Bases on the readings from Laikago's default pose.
INIT_MOTOR_ANGLES = np.array([0, 0.9, -1.8] * NUM_LEGS)
HIP_NAME_PATTERN = re.compile(r"\w+_hip_\w+")
UPPER_NAME_PATTERN = re.compile(r"\w+_upper_\w+")
LOWER_NAME_PATTERN = re.compile(r"\w+_lower_\w+")
TOE_NAME_PATTERN = re.compile(r"\w+_toe\d*")
IMU_NAME_PATTERN = re.compile(r"imu\d*")
URDF_FILENAME = os.path.join(parentdir, "motion_imitation/utilities/a1/a1.urdf")
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
# Empirical values from real A1.
ACCELEROMETER_VARIANCE = 0.03059
JOINT_VELOCITY_VARIANCE = 0.006206
class VelocitySource(enum.Enum):
PYBULLET = 0
IMU_FOOT_CONTACT = 1
# Found that these numba.jit decorators slow down the timestep from 1ms without
# to 5ms with decorators.
# @numba.jit(nopython=True, cache=True)
def foot_position_in_hip_frame_to_joint_angle(foot_position, l_hip_sign=1):
l_up = 0.2
l_low = 0.2
l_hip = 0.08505 * l_hip_sign
x, y, z = foot_position[0], foot_position[1], foot_position[2]
theta_knee = -np.arccos(
(x**2 + y**2 + z**2 - l_hip**2 - l_low**2 - l_up**2) /
(2 * l_low * l_up))
l = np.sqrt(l_up**2 + l_low**2 + 2 * l_up * l_low * np.cos(theta_knee))
theta_hip = np.arcsin(-x / l) - theta_knee / 2
c1 = l_hip * y - l * np.cos(theta_hip + theta_knee / 2) * z
s1 = l * np.cos(theta_hip + theta_knee / 2) * y + l_hip * z
theta_ab = np.arctan2(s1, c1)
return np.array([theta_ab, theta_hip, theta_knee])
# @numba.jit(nopython=True, cache=True)
def foot_position_in_hip_frame(angles, l_hip_sign=1):
theta_ab, theta_hip, theta_knee = angles[0], angles[1], angles[2]
l_up = 0.2
l_low = 0.2
l_hip = 0.08505 * l_hip_sign
leg_distance = np.sqrt(l_up**2 + l_low**2 +
2 * l_up * l_low * np.cos(theta_knee))
eff_swing = theta_hip + theta_knee / 2
off_x_hip = -leg_distance * np.sin(eff_swing)
off_z_hip = -leg_distance * np.cos(eff_swing)
off_y_hip = l_hip
off_x = off_x_hip
off_y = np.cos(theta_ab) * off_y_hip - np.sin(theta_ab) * off_z_hip
off_z = np.sin(theta_ab) * off_y_hip + np.cos(theta_ab) * off_z_hip
return np.array([off_x, off_y, off_z])
# @numba.jit(nopython=True, cache=True)
def analytical_leg_jacobian(leg_angles, leg_id):
"""
Computes the analytical Jacobian.
Args:
` leg_angles: a list of 3 numbers for current abduction, hip and knee angle.
l_hip_sign: whether it's a left (1) or right(-1) leg.
"""
l_up = 0.2
l_low = 0.2
l_hip = 0.08505 * (-1)**(leg_id + 1)
t1, t2, t3 = leg_angles[0], leg_angles[1], leg_angles[2]
l_eff = np.sqrt(l_up**2 + l_low**2 + 2 * l_up * l_low * np.cos(t3))
t_eff = t2 + t3 / 2
J = np.zeros((3, 3))
J[0, 0] = 0
J[0, 1] = -l_eff * np.cos(t_eff)
J[0, 2] = l_low * l_up * np.sin(t3) * np.sin(t_eff) / l_eff - l_eff * np.cos(
t_eff) / 2
J[1, 0] = -l_hip * np.sin(t1) + l_eff * np.cos(t1) * np.cos(t_eff)
J[1, 1] = -l_eff * np.sin(t1) * np.sin(t_eff)
J[1, 2] = -l_low * l_up * np.sin(t1) * np.sin(t3) * np.cos(
t_eff) / l_eff - l_eff * np.sin(t1) * np.sin(t_eff) / 2
J[2, 0] = l_hip * np.cos(t1) + l_eff * np.sin(t1) * np.cos(t_eff)
J[2, 1] = l_eff * np.sin(t_eff) * np.cos(t1)
J[2, 2] = l_low * l_up * np.sin(t3) * np.cos(t1) * np.cos(
t_eff) / l_eff + l_eff * np.sin(t_eff) * np.cos(t1) / 2
return J
# For JIT compilation
foot_position_in_hip_frame_to_joint_angle(np.random.uniform(size=3), 1)
foot_position_in_hip_frame_to_joint_angle(np.random.uniform(size=3), -1)
# @numba.jit(nopython=True, cache=True, parallel=True)
def foot_positions_in_base_frame(foot_angles):
foot_angles = foot_angles.reshape((4, 3))
foot_positions = np.zeros((4, 3))
for i in range(4):
foot_positions[i] = foot_position_in_hip_frame(foot_angles[i],
l_hip_sign=(-1)**(i + 1))
return foot_positions + HIP_OFFSETS
class A1(minitaur.Minitaur):
"""A simulation for the Laikago robot."""
# At high replanning frequency, inaccurate values of BODY_MASS/INERTIA
# doesn't seem to matter much. However, these values should be better tuned
# when the replan frequency is low (e.g. using a less beefy CPU).
MPC_BODY_MASS = 108 / 9.8
MPC_BODY_INERTIA = np.array((0.017, 0, 0, 0, 0.057, 0, 0, 0, 0.064)) * 4.
MPC_BODY_HEIGHT = 0.24
MPC_VELOCITY_MULTIPLIER = 0.5
ACTION_CONFIG = [
locomotion_gym_config.ScalarField(name="FR_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="FR_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="FR_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="FL_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="FL_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="FL_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="RR_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="RR_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="RR_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
locomotion_gym_config.ScalarField(name="RL_hip_motor",
upper_bound=0.802851455917,
lower_bound=-0.802851455917),
locomotion_gym_config.ScalarField(name="RL_upper_joint",
upper_bound=4.18879020479,
lower_bound=-1.0471975512),
locomotion_gym_config.ScalarField(name="RL_lower_joint",
upper_bound=-0.916297857297,
lower_bound=-2.69653369433),
]
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.25870023]
INIT_ORIENTATION = (0, 0, 0, 1)
# Joint angles are allowed to be JOINT_EPSILON outside their nominal range.
# This accounts for imprecision seen in either pybullet's enforcement of joint
# limits or its reporting of joint angles.
JOINT_EPSILON = 0.02
def __init__(
self,
pybullet_client,
urdf_filename=URDF_FILENAME,
enable_clip_motor_commands=False,
time_step=0.001,
action_repeat=10,
self_collision_enabled=False,
sensors=None,
control_latency=0.002,
on_rack=False,
reset_at_current_position=False,
reset_func_name="_PybulletReset",
enable_action_interpolation=True,
enable_action_filter=False,
motor_control_mode=None,
motor_torque_limits=MAX_TORQUE,
reset_time=1,
allow_knee_contact=False,
log_time_per_step=False,
observation_noise_stdev=(0.0,) * 6,
velocity_source=VelocitySource.PYBULLET,
):
"""Constructor.
Args:
observation_noise_stdev: The standard deviation of a Gaussian noise model
for the sensor. It should be an array for separate sensors in the
following order [motor_angle, motor_velocity, motor_torque,
base_roll_pitch_yaw, base_angular_velocity, base_linear_acceleration]
velocity_source: How to determine the velocity returned by
self.GetBaseVelocity().
"""
self.running_reset_policy = False
self._urdf_filename = urdf_filename
self._allow_knee_contact = allow_knee_contact
self._enable_clip_motor_commands = enable_clip_motor_commands
motor_kp = [
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN, ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN
]
motor_kd = [
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN, ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN
]
self._joint_angle_lower_limits = np.array(
[field.lower_bound for field in self.ACTION_CONFIG])
self._joint_angle_upper_limits = np.array(
[field.upper_bound for field in self.ACTION_CONFIG])
if log_time_per_step:
self._timesteps = []
else:
self._timesteps = None
self._last_step_time_wall = 0
self._currently_resetting = False
self._max_vel = 0
self._max_tau = 0
self._velocity_estimator = None
if velocity_source is VelocitySource.IMU_FOOT_CONTACT:
self._velocity_estimator = a1_robot_velocity_estimator.VelocityEstimator(
robot=self,
accelerometer_variance=ACCELEROMETER_VARIANCE,
sensor_variance=JOINT_VELOCITY_VARIANCE)
super(A1, self).__init__(
pybullet_client=pybullet_client,
time_step=time_step,
action_repeat=action_repeat,
self_collision_enabled=self_collision_enabled,
num_motors=NUM_MOTORS,
dofs_per_leg=DOFS_PER_LEG,
motor_direction=JOINT_DIRECTIONS,
motor_offset=JOINT_OFFSETS,
motor_overheat_protection=False,
motor_control_mode=motor_control_mode,
motor_model_class=laikago_motor.LaikagoMotorModel,
motor_torque_limits=motor_torque_limits,
sensors=sensors,
motor_kp=motor_kp,
motor_kd=motor_kd,
control_latency=control_latency,
observation_noise_stdev=observation_noise_stdev,
on_rack=on_rack,
reset_at_current_position=reset_at_current_position,
reset_func_name=reset_func_name,
enable_action_interpolation=enable_action_interpolation,
enable_action_filter=enable_action_filter,
reset_time=reset_time)
def __del__(self):
self.LogTimesteps()
def _LoadRobotURDF(self):
a1_urdf_path = self.GetURDFFile()
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
a1_urdf_path,
self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation(),
flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(
a1_urdf_path, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
def _SettleDownForReset(self, default_motor_angles, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=robot_config.MotorControlMode.POSITION)
if default_motor_angles is not None:
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self._StepInternal(
default_motor_angles,
motor_control_mode=robot_config.MotorControlMode.POSITION)
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetFootContacts(self):
all_contacts = self._pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def _SafeJointsReset(self, default_motor_angles=None, reset_time=None):
super()._SafeJointsReset(default_motor_angles, reset_time)
self.HoldCurrentPose()
def ResetPose(self, add_constraint):
del add_constraint
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id[name],
angle,
targetVelocity=0)
def GetURDFFile(self):
return self._urdf_filename
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self.pybullet_client.getNumJoints(self.quadruped)
self._hip_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._lower_link_ids = []
self._foot_link_ids = []
self._imu_link_ids = []
for i in range(num_joints):
joint_info = self.pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if HIP_NAME_PATTERN.match(joint_name):
self._hip_link_ids.append(joint_id)
elif UPPER_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif LOWER_NAME_PATTERN.match(joint_name):
self._lower_link_ids.append(joint_id)
elif TOE_NAME_PATTERN.match(joint_name):
#assert self._urdf_filename == URDF_WITH_TOES
self._foot_link_ids.append(joint_id)
elif IMU_NAME_PATTERN.match(joint_name):
self._imu_link_ids.append(joint_id)
else:
raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._lower_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
#assert len(self._foot_link_ids) == NUM_LEGS
self._hip_link_ids.sort()
self._motor_link_ids.sort()
self._lower_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
def _GetMotorNames(self):
return MOTOR_NAMES
def GetDefaultInitPosition(self):
"""Get default initial base position."""
return self._GetDefaultInitPosition()
def GetDefaultInitOrientation(self):
"""Get default initial base orientation."""
return self._GetDefaultInitOrientation()
def GetDefaultInitJointPose(self):
"""Get default initial joint pose."""
joint_pose = (INIT_MOTOR_ANGLES + JOINT_OFFSETS) * JOINT_DIRECTIONS
return joint_pose
def ApplyAction(self, motor_commands, motor_control_mode=None):
"""Clips and then apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).
motor_control_mode: A MotorControlMode enum.
"""
if motor_control_mode is None:
motor_control_mode = self._motor_control_mode
motor_commands = self._ClipMotorCommands(motor_commands, motor_control_mode)
super(A1, self).ApplyAction(motor_commands, motor_control_mode)
def Reset(self, reload_urdf=True, default_motor_angles=None, reset_time=3.0):
self._currently_resetting = True
super().Reset(
reload_urdf=reload_urdf,
default_motor_angles=default_motor_angles,
reset_time=reset_time)
self._currently_resetting = False
def _CollapseReset(self, default_motor_angles, reset_time):
"""Sets joint torques to 0, then moves joints within bounds."""
del default_motor_angles
del reset_time
# Important to fill the observation buffer.
self.ReceiveObservation()
# Spend 1 second collapsing.
half_steps_to_reset = int(0.5 / self.time_step)
for _ in range(half_steps_to_reset):
self.Brake()
for _ in range(half_steps_to_reset):
self._StepInternal(
np.zeros((self.num_motors,)),
motor_control_mode=robot_config.MotorControlMode.TORQUE)
self._SafeJointsReset()
def _ClipMotorAngles(self, desired_angles, current_angles):
if self._enable_clip_motor_commands:
angle_ub = np.minimum(self._joint_angle_upper_limits,
current_angles + MAX_MOTOR_ANGLE_CHANGE_PER_STEP)
angle_lb = np.maximum(self._joint_angle_lower_limits,
current_angles - MAX_MOTOR_ANGLE_CHANGE_PER_STEP)
else:
angle_ub = self._joint_angle_upper_limits
angle_lb = self._joint_angle_lower_limits
return np.clip(desired_angles, angle_lb, angle_ub)
def _ClipMotorCommands(self, motor_commands, motor_control_mode):
"""Clips commands to respect any set joint angle and torque limits.
Always clips position to be within ACTION_CONFIG. If
self._enable_clip_motor_commands, also clips positions to be within
MAX_MOTOR_ANGLE_CHANGE_PER_STEP of current positions.
Always clips torques to be within self._motor_torque_limits (but the torque
limits can be infinity).
Args:
motor_commands: np.array. Can be motor angles, torques, or hybrid.
motor_control_mode: A MotorControlMode enum.
Returns:
Clipped motor commands.
"""
if motor_control_mode == robot_config.MotorControlMode.TORQUE:
return np.clip(motor_commands, -1 * self._motor_torque_limits, self._motor_torque_limits)
if motor_control_mode == robot_config.MotorControlMode.POSITION:
return self._ClipMotorAngles(
desired_angles=motor_commands,
current_angles=self.GetTrueMotorAngles())
if motor_control_mode == robot_config.MotorControlMode.HYBRID:
# Clip angles
angles = motor_commands[np.array(range(NUM_MOTORS)) * 5]
clipped_positions = self._ClipMotorAngles(
desired_angles=angles,
current_angles=self.GetTrueMotorAngles())
motor_commands[np.array(range(NUM_MOTORS)) * 5] = clipped_positions
# Clip torques
torques = motor_commands[np.array(range(NUM_MOTORS)) * 5 + 4]
clipped_torques = np.clip(torques, -1 * self._motor_torque_limits, self._motor_torque_limits)
motor_commands[np.array(range(NUM_MOTORS)) * 5 + 4] = clipped_torques
return motor_commands
def Brake(self):
# Braking on the real robot has more resistance than this.
# Call super to avoid doing safety checks while braking.
super()._StepInternal(
np.zeros((self.num_motors,)),
motor_control_mode=robot_config.MotorControlMode.TORQUE)
self.LogTimesteps()
def HoldCurrentPose(self):
"""For compatibility with A1Robot."""
pass
def _ValidateMotorStates(self):
# Check torque.
if any(np.abs(self.GetTrueMotorTorques()) > self._motor_torque_limits):
raise robot_config.SafetyError(
"Torque limits exceeded\ntorques: {}".format(
self.GetTrueMotorTorques()))
# Check joint velocities.
if any(np.abs(self.GetTrueMotorVelocities()) > MAX_JOINT_VELOCITY):
raise robot_config.SafetyError(
"Velocity limits exceeded\nvelocities: {}".format(
self.GetTrueMotorVelocities()))
# Joints often start out of bounds (in sim they're 0 and on real they're
# slightly out of bounds), so we don't check angles during reset.
if self._currently_resetting or self.running_reset_policy:
return
# Check joint positions.
if (any(self.GetTrueMotorAngles() > (self._joint_angle_upper_limits +
self.JOINT_EPSILON)) or
any(self.GetTrueMotorAngles() < (self._joint_angle_lower_limits -
self.JOINT_EPSILON))):
raise robot_config.SafetyError(
"Joint angle limits exceeded\nangles: {}".format(
self.GetTrueMotorAngles()))
def _StepInternal(self, action, motor_control_mode=None):
if self._timesteps is not None:
now = time.time()
self._timesteps.append(now - self._last_step_time_wall)
self._last_step_time_wall = now
if not self._is_safe:
return
super()._StepInternal(action, motor_control_mode)
# real world
try:
self._ValidateMotorStates()
except robot_config.SafetyError as e:
print(e)
self.Brake()
self._is_safe = False
def ReceiveObservation(self):
super().ReceiveObservation()
if self._velocity_estimator:
self._velocity_estimator.update(self.GetTimeSinceReset())
def GetBaseVelocity(self):
if self._velocity_estimator:
return self._velocity_estimator.estimated_velocity
return super().GetBaseVelocity()
def LogTimesteps(self):
if self._timesteps is None or not len(self._timesteps):
return
timesteps = np.asarray(self._timesteps[1:])
print('=====\nTimestep stats (secs)\nlen: ', len(timesteps), '\nmean: ',
np.mean(timesteps), "\nmin: ", np.min(timesteps), "\nmax: ",
np.max(timesteps), "\nstd: ", np.std(timesteps), "\n=====")
@classmethod
def GetConstants(cls):
del cls
return laikago_constants
def ComputeMotorAnglesFromFootLocalPosition(self, leg_id,
foot_local_position):
"""Use IK to compute the motor angles, given the foot link's local position.
Args:
leg_id: The leg index.
foot_local_position: The foot link's position in the base frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
assert len(self._foot_link_ids) == self.num_legs
motors_per_leg = self.num_motors // self.num_legs
joint_position_idxs = list(
range(leg_id * motors_per_leg,
leg_id * motors_per_leg + motors_per_leg))
joint_angles = foot_position_in_hip_frame_to_joint_angle(
foot_local_position - HIP_OFFSETS[leg_id],
l_hip_sign=(-1)**(leg_id + 1))
# Joint offset is necessary for Laikago.
joint_angles = np.multiply(
np.asarray(joint_angles) -
np.asarray(self._motor_offset)[joint_position_idxs],
self._motor_direction[joint_position_idxs])
# Return the joing index (the same as when calling GetMotorAngles) as well
# as the angles.
return joint_position_idxs, joint_angles.tolist()
def GetFootPositionsInBaseFrame(self):
"""Get the robot's foot position in the base frame."""
motor_angles = self.GetMotorAngles()
return foot_positions_in_base_frame(motor_angles)
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Does not work for Minitaur which has the four bar mechanism for now.
motor_angles = self.GetMotorAngles()[leg_id * 3:(leg_id + 1) * 3]
return analytical_leg_jacobian(motor_angles, leg_id)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/robots/gamepad/gamepad_reader.py | motion_imitation/robots/gamepad/gamepad_reader.py | from absl import app
from absl import flags
from inputs import get_gamepad
import threading
import time
FLAGS = flags.FLAGS
MAX_ABS_RX = 32768
MAX_ABS_RY = 32768
def _interpolate(raw_reading, max_raw_reading, new_scale):
return raw_reading / max_raw_reading * new_scale
class Gamepad:
"""Interface for reading commands from Logitech F710 Gamepad.
The control works as following:
1) Press LB+RB at any time for emergency stop
2) Use the left joystick for forward/backward/left/right walking.
3) Use the right joystick for rotation around the z-axis.
"""
def __init__(self, vel_scale_x=.4, vel_scale_y=.4, vel_scale_rot=1.):
"""Initialize the gamepad controller.
Args:
vel_scale_x: maximum absolute x-velocity command.
vel_scale_y: maximum absolute y-velocity command.
vel_scale_rot: maximum absolute yaw-dot command.
"""
self._vel_scale_x = vel_scale_x
self._vel_scale_y = vel_scale_y
self._vel_scale_rot = vel_scale_rot
self._lb_pressed = False
self._rb_pressed = False
# Controller states
self.vx, self.vy, self.wz = 0., 0., 0.
self.estop_flagged = False
self.is_running = True
self.read_thread = threading.Thread(target=self.read_loop)
self.read_thread.start()
def read_loop(self):
"""The read loop for events.
This funnction should be executed in a separate thread for continuous
event recording.
"""
while self.is_running and not self.estop_flagged:
events = get_gamepad()
for event in events:
self.update_command(event)
def update_command(self, event):
"""Update command based on event readings."""
if event.ev_type == 'Key' and event.code == 'BTN_TL':
self._lb_pressed = bool(event.state)
elif event.ev_type == 'Key' and event.code == 'BTN_TR':
self._rb_pressed = bool(event.state)
elif event.ev_type == 'Absolute' and event.code == 'ABS_X':
# Left Joystick L/R axis
self.vy = _interpolate(-event.state, MAX_ABS_RX, self._vel_scale_y)
elif event.ev_type == 'Absolute' and event.code == 'ABS_Y':
# Left Joystick F/B axis; need to flip sign for consistency
self.vx = _interpolate(-event.state, MAX_ABS_RY, self._vel_scale_x)
elif event.ev_type == 'Absolute' and event.code == 'ABS_RX':
self.wz = _interpolate(event.state, MAX_ABS_RX, self._vel_scale_rot)
if self._lb_pressed and self._rb_pressed:
self.estop_flagged = True
self.vx, self.vy, self.wz = 0., 0., 0.
def get_command(self, time_since_reset):
del time_since_reset # unused
return (self.vx, self.vy, 0), self.wz, self.estop_flagged
def stop(self):
self.is_running = False
def main(_):
gamepad = Gamepad()
while True:
print("Vx: {}, Vy: {}, Wz: {}, Estop: {}".format(gamepad.vx, gamepad.vy,
gamepad.wz,
gamepad.estop_flagged))
time.sleep(0.1)
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/test_robot_interface.py | motion_imitation/examples/test_robot_interface.py | """Test the C++ robot interface.
Follow the
"""
from robot_interface import RobotInterface # pytype: disable=import-error
i = RobotInterface()
o = i.receive_observation()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/mpc_example.py | motion_imitation/examples/mpc_example.py |
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
import scipy.interpolate
import numpy as np
import pybullet_data as pd
from pybullet_utils import bullet_client
import time
import pybullet
import random
from motion_imitation.envs import env_builder as env_builder
from motion_imitation.robots import robot_config
from mpc_controller import com_velocity_estimator
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import locomotion_controller
from mpc_controller import openloop_gait_generator
from mpc_controller import raibert_swing_leg_controller
from mpc_controller import torque_stance_leg_controller
from mpc_controller import laikago_sim
FLAGS = flags.FLAGS
_NUM_SIMULATION_ITERATION_STEPS = 300
_BODY_HEIGHT = 0.42
_STANCE_DURATION_SECONDS = [
0.3
] * 4 # For faster trotting (v > 1.5 ms reduce this to 0.13s).
_DUTY_FACTOR = [0.6] * 4
_INIT_PHASE_FULL_CYCLE = [0.9, 0, 0, 0.9]
_MAX_TIME_SECONDS = 25
_MOTOR_KD = [1.0, 2.0, 2.0] * 4
LAIKAGO_STANDING = (
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
)
def _generate_example_linear_angular_speed(t):
"""Creates an example speed profile based on time for demo purpose."""
vx = 0.6
vy = 0.2
wz = 0.8
time_points = (0, 5, 10, 15, 20, 25,30)
speed_points = ((0, 0, 0, 0), (0, 0, 0, wz), (vx, 0, 0, 0), (0, 0, 0, -wz), (0, -vy, 0, 0),
(0, 0, 0, 0), (0, 0, 0, wz))
speed = scipy.interpolate.interp1d(
time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(
t)
return speed[0:3], speed[3]
def _setup_controller(robot):
"""Demonstrates how to create a locomotion controller."""
desired_speed = (0, 0)
desired_twisting_speed = 0
gait_generator = openloop_gait_generator.OpenloopGaitGenerator(
robot,
stance_duration=_STANCE_DURATION_SECONDS,
duty_factor=_DUTY_FACTOR,
initial_leg_phase=_INIT_PHASE_FULL_CYCLE)
state_estimator = com_velocity_estimator.COMVelocityEstimator(robot)
sw_controller = raibert_swing_leg_controller.RaibertSwingLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_height=_BODY_HEIGHT,
foot_clearance=0.01
)
st_controller = torque_stance_leg_controller.TorqueStanceLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_body_height=_BODY_HEIGHT,
body_mass=215 / 9.8,
body_inertia=(0.07335, 0, 0, 0, 0.25068, 0, 0, 0, 0.25447),
)
controller = locomotion_controller.LocomotionController(
robot=robot,
gait_generator=gait_generator,
state_estimator=state_estimator,
swing_leg_controller=sw_controller,
stance_leg_controller=st_controller,
clock=robot.GetTimeSinceReset)
return controller
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def _run_example(max_time=_MAX_TIME_SECONDS):
"""Runs the locomotion controller example."""
env = env_builder.build_laikago_env( motor_control_mode = robot_config.MotorControlMode.HYBRID, enable_rendering=True)
env.reset()
controller = _setup_controller(env.robot)
controller.reset()
current_time = env.robot.GetTimeSinceReset()
while current_time < max_time:
# Updates the controller behavior parameters.
lin_speed, ang_speed = _generate_example_linear_angular_speed(current_time)
_update_controller_params(controller, lin_speed, ang_speed)
# Needed before every call to get_action().
controller.update()
hybrid_action, info = controller.get_action()
env.step(hybrid_action)
current_time = env.robot.GetTimeSinceReset()
def main(argv):
del argv
_run_example()
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/a1_robot_sim_to_real.py | motion_imitation/examples/a1_robot_sim_to_real.py | """Apply the same action to the simulated and real A1 robot.
As a basic debug tool, this script allows you to execute the same action
(which you choose from the pybullet GUI) on the simulation and real robot
simultaneouly. Make sure to put the real robot on rack before testing.
"""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import logging
import numpy as np
import pybullet as p # pytype: disable=import-error
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1
from motion_imitation.robots import a1_robot
from motion_imitation.robots import robot_config
def main(_):
logging.info("WARNING: this code executes low-level controller on the robot.")
logging.info("Make sure the robot is hang on rack before proceeding.")
input("Press enter to continue...")
# Construct sim env and real robot
sim_env = env_builder.build_regular_env(
robot_class=a1.A1,
motor_control_mode=robot_config.MotorControlMode.POSITION,
on_rack=True,
enable_rendering=True,
wrap_trajectory_generator=False)
real_env = env_builder.build_regular_env(
robot_class=a1_robot.A1Robot,
motor_control_mode=robot_config.MotorControlMode.POSITION,
on_rack=False,
enable_rendering=False,
wrap_trajectory_generator=False)
# Add debug sliders
action_low, action_high = sim_env.action_space.low, sim_env.action_space.high
dim_action = action_low.shape[0]
action_selector_ids = []
robot_motor_angles = real_env.robot.GetMotorAngles()
for dim in range(dim_action):
action_selector_id = p.addUserDebugParameter(
paramName='dim{}'.format(dim),
rangeMin=action_low[dim],
rangeMax=action_high[dim],
startValue=robot_motor_angles[dim])
action_selector_ids.append(action_selector_id)
# Visualize debug slider in sim
for _ in range(10000):
# Get user action input
action = np.zeros(dim_action)
for dim in range(dim_action):
action[dim] = sim_env.pybullet_client.readUserDebugParameter(
action_selector_ids[dim])
real_env.step(action)
sim_env.step(action)
real_env.Terminate()
if __name__ == '__main__':
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/a1_print_angles.py | motion_imitation/examples/a1_print_angles.py | """Reads and prints joint angles from A1 robot without powering them.
By default prints all joint angles. To select specific joints:
`python a1_print_angles.py --joint FR_hip_motor --joint RL_upper_joint`
"""
import inspect
import os
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandparentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, grandparentdir)
from absl import app
from absl import flags
from motion_imitation.robots import a1_robot
import pybullet
import pybullet_data
from pybullet_utils import bullet_client
JOINT_DICT = {
"FR_hip_motor": 0, "FR_upper_joint": 1, "FR_lower_joint": 2,
"FL_hip_motor": 3, "FL_upper_joint": 4, "FL_lower_joint": 5,
"RR_hip_motor": 6, "RR_upper_joint": 7, "RR_lower_joint": 8,
"RL_hip_motor": 9, "RL_upper_joint": 10, "RL_lower_joint": 11,
}
flags.DEFINE_multi_string("joint", JOINT_DICT.keys(),
"Names of joints to measure.")
FLAGS = flags.FLAGS
def main(_):
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
robot = a1_robot.A1Robot(pybullet_client=p, action_repeat=1)
while True:
robot.ReceiveObservation()
angles = robot.GetTrueMotorAngles()
if len(angles) != 12:
continue
print_list = [
"{}:{:9.6f}".format(joint, angles[JOINT_DICT[joint]])
for joint in FLAGS.joint
]
print(", ".join(print_list))
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/replay_actions.py | motion_imitation/examples/replay_actions.py | """Replays pre-recorded actions on the robot."""
from absl import app
from absl import flags
import numpy as np
import pybullet # pytype:disable=import-error
import pybullet_data
from pybullet_utils import bullet_client
import time
# from motion_imitation.robots import a1
from motion_imitation.robots import a1_robot
# from motion_imitation.envs import env_builder
from motion_imitation.robots import robot_config
flags.DEFINE_string('traj_dir', None, 'directory of trajectory file.')
FLAGS = flags.FLAGS
def main(_):
traj = dict(np.load(FLAGS.traj_dir))
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setPhysicsEngineParameter(numSolverIterations=30)
p.setTimeStep(0.001)
p.setGravity(0, 0, -10)
p.setPhysicsEngineParameter(enableConeFriction=0)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
robot = a1_robot.A1Robot(
p,
motor_control_mode=robot_config.MotorControlMode.HYBRID,
enable_action_interpolation=False,
reset_time=2)
# env = env_builder.build_regular_env(
# a1.A1,
# motor_control_mode=robot_config.MotorControlMode.HYBRID,
# enable_rendering=True,
# on_rack=False,
# wrap_trajectory_generator=False)
# robot = env.robot
input("Press Enter Key to Start...")
for action in traj['action'][:100]:
robot.Step(action)
time.sleep(0.01)
robot.Terminate()
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/example.py | motion_imitation/examples/example.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import argparse
import numpy as np
import os
import random
import time
from motion_imitation.envs import env_builder as env_builder
from motion_imitation.robots import robot_config
from motion_imitation.robots import laikago
def test(env):
o = env.reset()
while 1:
a = laikago.INIT_MOTOR_ANGLES
o, r, done, info = env.step(a)
if done:
o = env.reset()
return
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--seed", dest="seed", type=int, default=None)
arg_parser.add_argument("--visualize", dest="visualize", action="store_true", default=True)
args = arg_parser.parse_args()
env = env_builder.build_laikago_env( motor_control_mode = robot_config.MotorControlMode.POSITION, enable_rendering=args.visualize)
test(env=env)
return
if __name__ == '__main__':
main()
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/whole_body_controller_example.py | motion_imitation/examples/whole_body_controller_example.py | """Example of whole body controller on A1 robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
import numpy as np
import os
import scipy.interpolate
import time
import pybullet_data
from pybullet_utils import bullet_client
import pybullet # pytype:disable=import-error
from mpc_controller import com_velocity_estimator
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import locomotion_controller
from mpc_controller import openloop_gait_generator
from mpc_controller import raibert_swing_leg_controller
#from mpc_controller import torque_stance_leg_controller
from mpc_controller import torque_stance_leg_controller_quadprog as torque_stance_leg_controller
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
from motion_imitation.robots.gamepad import gamepad_reader
flags.DEFINE_string("logdir", None, "where to log trajectories.")
flags.DEFINE_bool("use_gamepad", False,
"whether to use gamepad to provide control input.")
flags.DEFINE_bool("use_real_robot", False,
"whether to use real robot or simulation")
flags.DEFINE_bool("show_gui", False, "whether to show GUI.")
flags.DEFINE_float("max_time_secs", 1., "maximum time to run the robot.")
FLAGS = flags.FLAGS
_NUM_SIMULATION_ITERATION_STEPS = 300
_MAX_TIME_SECONDS = 30.
_STANCE_DURATION_SECONDS = [
0.3
] * 4 # For faster trotting (v > 1.5 ms reduce this to 0.13s).
# Standing
# _DUTY_FACTOR = [1.] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0., 0., 0.]
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# )
# Tripod
# _DUTY_FACTOR = [.8] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0.25, 0.5, 0.]
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.SWING,
# )
# Trotting
_DUTY_FACTOR = [0.6] * 4
_INIT_PHASE_FULL_CYCLE = [0.9, 0, 0, 0.9]
_INIT_LEG_STATE = (
gait_generator_lib.LegState.SWING,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.SWING,
)
def _generate_example_linear_angular_speed(t):
"""Creates an example speed profile based on time for demo purpose."""
vx = 0.6
vy = 0.2
wz = 0.8
time_points = (0, 5, 10, 15, 20, 25, 30)
speed_points = ((0, 0, 0, 0), (0, 0, 0, wz), (vx, 0, 0, 0), (0, 0, 0, -wz),
(0, -vy, 0, 0), (0, 0, 0, 0), (0, 0, 0, wz))
speed = scipy.interpolate.interp1d(time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(t)
return speed[0:3], speed[3], False
def _setup_controller(robot):
"""Demonstrates how to create a locomotion controller."""
desired_speed = (0, 0)
desired_twisting_speed = 0
gait_generator = openloop_gait_generator.OpenloopGaitGenerator(
robot,
stance_duration=_STANCE_DURATION_SECONDS,
duty_factor=_DUTY_FACTOR,
initial_leg_phase=_INIT_PHASE_FULL_CYCLE,
initial_leg_state=_INIT_LEG_STATE)
window_size = 20 if not FLAGS.use_real_robot else 1
state_estimator = com_velocity_estimator.COMVelocityEstimator(
robot, window_size=window_size)
sw_controller = raibert_swing_leg_controller.RaibertSwingLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_height=robot.MPC_BODY_HEIGHT,
foot_clearance=0.01)
st_controller = torque_stance_leg_controller.TorqueStanceLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_body_height=robot.MPC_BODY_HEIGHT)
controller = locomotion_controller.LocomotionController(
robot=robot,
gait_generator=gait_generator,
state_estimator=state_estimator,
swing_leg_controller=sw_controller,
stance_leg_controller=st_controller,
clock=robot.GetTimeSinceReset)
return controller
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def main(argv):
"""Runs the locomotion controller example."""
del argv # unused
# Construct simulator
if FLAGS.show_gui and not FLAGS.use_real_robot:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setPhysicsEngineParameter(numSolverIterations=30)
p.setTimeStep(0.001)
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(enableConeFriction=0)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
# Construct robot class:
if FLAGS.use_real_robot:
from motion_imitation.robots import a1_robot
robot = a1_robot.A1Robot(
pybullet_client=p,
motor_control_mode=robot_config.MotorControlMode.HYBRID,
enable_action_interpolation=False,
log_time_per_step=True,
time_step=0.002,
action_repeat=1)
else:
robot = a1.A1(p,
motor_control_mode=robot_config.MotorControlMode.HYBRID,
enable_action_interpolation=False,
reset_time=2,
log_time_per_step=True,
time_step=0.002,
action_repeat=1)
controller = _setup_controller(robot)
controller.reset()
if FLAGS.use_gamepad:
gamepad = gamepad_reader.Gamepad()
command_function = gamepad.get_command
else:
command_function = _generate_example_linear_angular_speed
if FLAGS.logdir:
logdir = os.path.join(FLAGS.logdir,
datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
os.makedirs(logdir)
start_time = robot.GetTimeSinceReset()
current_time = start_time
com_vels, imu_rates, actions = [], [], []
while current_time - start_time < FLAGS.max_time_secs:
#time.sleep(0.0008) #on some fast computer, works better with sleep on real A1?
start_time_robot = current_time
start_time_wall = time.time()
# Updates the controller behavior parameters.
lin_speed, ang_speed, e_stop = command_function(current_time)
# print(lin_speed)
if e_stop:
logging.info("E-stop kicked, exiting...")
break
_update_controller_params(controller, lin_speed, ang_speed)
controller.update()
hybrid_action, _ = controller.get_action()
com_vels.append(np.array(robot.GetBaseVelocity()).copy())
imu_rates.append(np.array(robot.GetBaseRollPitchYawRate()).copy())
actions.append(hybrid_action)
robot.Step(hybrid_action)
current_time = robot.GetTimeSinceReset()
if not FLAGS.use_real_robot:
expected_duration = current_time - start_time_robot
actual_duration = time.time() - start_time_wall
if actual_duration < expected_duration:
time.sleep(expected_duration - actual_duration)
if FLAGS.use_gamepad:
gamepad.stop()
if FLAGS.logdir:
np.savez(os.path.join(logdir, 'action.npz'),
action=actions,
com_vels=com_vels,
imu_rates=imu_rates)
logging.info("logged to: {}".format(logdir))
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/a1_test_imu.py | motion_imitation/examples/a1_test_imu.py | """Executes scripted motions and logs IMU readings.
Pitches the robot forward and backward and rolls it slightly left and right with
fixed joint positions. By running this in both sim and real, the real robot's
IMU readings can be compared to simulation.
"""
import inspect
import os
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandparentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, grandparentdir)
from absl import app
from absl import flags
from absl import logging
import numpy as np
import time
from tqdm import tqdm
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1_robot
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
FREQ = 0.5
flags.DEFINE_bool('real_robot', True,
'Whether to control a real robot or simulated.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.real_robot:
robot_class = a1_robot.A1Robot
logging.info('WARNING: this code executes low-level control on the robot.')
input('Press enter to continue...')
else:
robot_class = a1.A1
env = env_builder.build_regular_env(
robot_class,
motor_control_mode=robot_config.MotorControlMode.POSITION,
enable_rendering=not FLAGS.real_robot,
on_rack=False,
wrap_trajectory_generator=False)
robot = env.robot
# Move the motors slowly to initial position
robot.ReceiveObservation()
current_motor_angle = np.array(robot.GetMotorAngles())
desired_motor_angle = np.array([0., 0.9, -1.8] * 4)
for t in tqdm(range(300)):
blend_ratio = np.minimum(t / 200., 1)
action = (1 - blend_ratio
) * current_motor_angle + blend_ratio * desired_motor_angle
robot.Step(action, robot_config.MotorControlMode.POSITION)
logging.info(robot.GetTrueBaseRollPitchYaw())
time.sleep(0.005)
# Pitch up
for t in tqdm(range(200)):
angle_hip = 0.25
angle_hip_2 = 1.5
angle_calf = -1
angle_calf_2 = -2
action = np.array([
0., angle_hip, angle_calf, 0., angle_hip, angle_calf, 0., angle_hip_2,
angle_calf_2, 0., angle_hip_2, angle_calf_2
])
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
logging.info(robot.GetTrueBaseRollPitchYaw())
logging.info('pitch up: %f', robot.GetTrueBaseRollPitchYaw()[1])
pitch_up = robot.GetTrueBaseRollPitchYaw()[1]
# Pitch down
for t in tqdm(range(200)):
angle_hip = 0.25
angle_hip_2 = 0.8
angle_calf = -1
angle_calf_2 = -2.4
action = np.array([
0., angle_hip_2, angle_calf_2, 0., angle_hip_2, angle_calf_2, 0.,
angle_hip, angle_calf, 0., angle_hip, angle_calf
])
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
logging.info(robot.GetTrueBaseRollPitchYaw())
logging.info('pitch down: %f', robot.GetTrueBaseRollPitchYaw()[1])
pitch_down = robot.GetTrueBaseRollPitchYaw()[1]
# Roll right
angle_hip = 0.5
angle_hip_2 = 0.9
angle_calf = -1.5
angle_calf_2 = -1.8
action = np.array([
0., angle_hip_2, angle_calf_2, 0., angle_hip, angle_calf, 0., angle_hip_2,
angle_calf_2, 0., angle_hip, angle_calf
])
for t in tqdm(range(200)):
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
logging.info(robot.GetTrueBaseRollPitchYaw())
logging.info('roll right: %f', robot.GetTrueBaseRollPitchYaw()[0])
roll_right = robot.GetTrueBaseRollPitchYaw()[0]
# Roll left
action = np.array([
0.,
angle_hip,
angle_calf,
0.,
angle_hip_2,
angle_calf_2,
0.,
angle_hip,
angle_calf,
0.,
angle_hip_2,
angle_calf_2,
])
for t in tqdm(range(200)):
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
logging.info(robot.GetTrueBaseRollPitchYaw())
logging.info('roll left: %f', robot.GetTrueBaseRollPitchYaw()[0])
roll_left = robot.GetTrueBaseRollPitchYaw()[0]
robot.Terminate()
logging.info(
'\npitch up: %f \npitch down: %f \nroll right: %f \nroll left: %f\n',
pitch_up, pitch_down, roll_right, roll_left)
if __name__ == '__main__':
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/__init__.py | motion_imitation/examples/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/random_action.py | motion_imitation/examples/random_action.py | """Simple script for executing random actions on A1 robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
from tqdm import tqdm
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1
from motion_imitation.robots import laikago
from motion_imitation.robots import robot_config
FLAGS = flags.FLAGS
flags.DEFINE_enum('robot_type', 'A1', ['A1', 'Laikago'], 'Robot Type.')
flags.DEFINE_enum('motor_control_mode', 'Torque',
['Torque', 'Position', 'Hybrid'], 'Motor Control Mode.')
flags.DEFINE_bool('on_rack', False, 'Whether to put the robot on rack.')
ROBOT_CLASS_MAP = {'A1': a1.A1, 'Laikago': laikago.Laikago}
MOTOR_CONTROL_MODE_MAP = {
'Torque': robot_config.MotorControlMode.TORQUE,
'Position': robot_config.MotorControlMode.POSITION,
'Hybrid': robot_config.MotorControlMode.HYBRID
}
def main(_):
robot = ROBOT_CLASS_MAP[FLAGS.robot_type]
motor_control_mode = MOTOR_CONTROL_MODE_MAP[FLAGS.motor_control_mode]
env = env_builder.build_regular_env(robot,
motor_control_mode=motor_control_mode,
enable_rendering=True,
on_rack=FLAGS.on_rack)
env.reset()
for _ in tqdm(range(1000)):
_, _, done, _ = env.step(env.action_space.sample())
if done:
break
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/test_env_gui.py | motion_imitation/examples/test_env_gui.py | """Simple script for executing random actions on A1 robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
import numpy as np
from tqdm import tqdm
import pybullet as p # pytype: disable=import-error
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1
from motion_imitation.robots import laikago
from motion_imitation.robots import robot_config
FLAGS = flags.FLAGS
flags.DEFINE_enum('robot_type', 'A1', ['A1', 'Laikago'], 'Robot Type.')
flags.DEFINE_enum('motor_control_mode', 'Torque',
['Torque', 'Position', 'Hybrid'], 'Motor Control Mode.')
flags.DEFINE_bool('on_rack', False, 'Whether to put the robot on rack.')
flags.DEFINE_string('video_dir', None,
'Where to save video (or None for not saving).')
ROBOT_CLASS_MAP = {'A1': a1.A1, 'Laikago': laikago.Laikago}
MOTOR_CONTROL_MODE_MAP = {
'Torque': robot_config.MotorControlMode.TORQUE,
'Position': robot_config.MotorControlMode.POSITION,
'Hybrid': robot_config.MotorControlMode.HYBRID
}
def main(_):
robot = ROBOT_CLASS_MAP[FLAGS.robot_type]
motor_control_mode = MOTOR_CONTROL_MODE_MAP[FLAGS.motor_control_mode]
env = env_builder.build_regular_env(robot,
motor_control_mode=motor_control_mode,
enable_rendering=True,
on_rack=FLAGS.on_rack,
wrap_trajectory_generator=False)
action_low, action_high = env.action_space.low, env.action_space.high
action_median = (action_low + action_high) / 2.
dim_action = action_low.shape[0]
action_selector_ids = []
for dim in range(dim_action):
action_selector_id = p.addUserDebugParameter(paramName='dim{}'.format(dim),
rangeMin=action_low[dim],
rangeMax=action_high[dim],
startValue=action_median[dim])
action_selector_ids.append(action_selector_id)
if FLAGS.video_dir:
log_id = p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, FLAGS.video_dir)
for _ in tqdm(range(800)):
action = np.zeros(dim_action)
for dim in range(dim_action):
action[dim] = env.pybullet_client.readUserDebugParameter(
action_selector_ids[dim])
env.step(action)
if FLAGS.video_dir:
p.stopStateLogging(log_id)
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/examples/a1_robot_exercise.py | motion_imitation/examples/a1_robot_exercise.py | """Commands A1 robot to raise and lower its legs so it crouches and stands up.
Can be run in sim by setting --real_robot=False.
"""
import inspect
import os
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandparentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, grandparentdir)
from absl import app
from absl import flags
from absl import logging
import numpy as np
import time
from tqdm import tqdm
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1_robot
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
FREQ = 0.5
flags.DEFINE_bool(
"real_robot", True, "Whether to control a real robot or simulated.")
FLAGS = flags.FLAGS
def main(_):
if FLAGS.real_robot:
robot_class = a1_robot.A1Robot
logging.info("WARNING: this code executes low-level control on the robot.")
input("Press enter to continue...")
else:
robot_class = a1.A1
env = env_builder.build_regular_env(
robot_class,
motor_control_mode=robot_config.MotorControlMode.POSITION,
enable_rendering=not FLAGS.real_robot,
on_rack=False,
wrap_trajectory_generator=False)
robot = env.robot
# Move the motors slowly to initial position
robot.ReceiveObservation()
current_motor_angle = np.array(robot.GetMotorAngles())
desired_motor_angle = np.array([0., 0.9, -1.8] * 4)
for t in tqdm(range(300)):
print(robot.GetBaseOrientation())
blend_ratio = np.minimum(t / 200., 1)
action = (1 - blend_ratio
) * current_motor_angle + blend_ratio * desired_motor_angle
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.005)
# Move the legs in a sinusoidal curve
for t in tqdm(range(1000)):
print(robot.GetBaseOrientation())
angle_hip = 0.9 + 0.2 * np.sin(2 * np.pi * FREQ * 0.01 * t)
angle_calf = -2 * angle_hip
action = np.array([0., angle_hip, angle_calf] * 4)
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
robot.Terminate()
if __name__ == "__main__":
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/motion_util.py | motion_imitation/utilities/motion_util.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for processing motion clips."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
from motion_imitation.utilities import pose3d
from pybullet_utils import transformations
def standardize_quaternion(q):
"""Returns a quaternion where q.w >= 0 to remove redundancy due to q = -q.
Args:
q: A quaternion to be standardized.
Returns:
A quaternion with q.w >= 0.
"""
if q[-1] < 0:
q = -q
return q
def normalize_rotation_angle(theta):
"""Returns a rotation angle normalized between [-pi, pi].
Args:
theta: angle of rotation (radians).
Returns:
An angle of rotation normalized between [-pi, pi].
"""
norm_theta = theta
if np.abs(norm_theta) > np.pi:
norm_theta = np.fmod(norm_theta, 2 * np.pi)
if norm_theta >= 0:
norm_theta += -2 * np.pi
else:
norm_theta += 2 * np.pi
return norm_theta
def calc_heading(q):
"""Returns the heading of a rotation q, specified as a quaternion.
The heading represents the rotational component of q along the vertical
axis (z axis).
Args:
q: A quaternion that the heading is to be computed from.
Returns:
An angle representing the rotation about the z axis.
"""
ref_dir = np.array([1, 0, 0])
rot_dir = pose3d.QuaternionRotatePoint(ref_dir, q)
heading = np.arctan2(rot_dir[1], rot_dir[0])
return heading
def calc_heading_rot(q):
"""Return a quaternion representing the heading rotation of q along the vertical axis (z axis).
Args:
q: A quaternion that the heading is to be computed from.
Returns:
A quaternion representing the rotation about the z axis.
"""
heading = calc_heading(q)
q_heading = transformations.quaternion_about_axis(heading, [0, 0, 1])
return q_heading
def to_matrix(position, roll_pitch_yaw):
cos_yaw, sin_yaw = np.cos(roll_pitch_yaw[2]), np.sin(roll_pitch_yaw[2])
return np.array(((cos_yaw, -sin_yaw, position[0]),
(sin_yaw, cos_yaw, position[1]),
(0.0, 0.0, 1.0)))
def normalize_angle(theta):
return (theta + np.pi) % (2 * np.pi) - np.pi
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/motion_data.py | motion_imitation/utilities/motion_data.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motion data class for processing motion clips."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import json
import logging
import math
import enum
import numpy as np
from motion_imitation.utilities import pose3d
from motion_imitation.utilities import motion_util
from pybullet_utils import transformations
class LoopMode(enum.Enum):
"""Specifies if a motion should loop or stop at the last frame."""
Clamp = 0
Wrap = 1
class MotionData(object):
"""Motion data representing a pose trajectory for a character.
The pose includes:
[root position, root orientation, joint poses (e.g. rotations)]
"""
POS_SIZE = 3
ROT_SIZE = 4
VEL_SIZE = 3
ANG_VEL_SIZE = 3
_LOOP_MODE_KEY = "LoopMode"
_FRAME_DURATION_KEY = "FrameDuration"
_FRAMES_KEY = "Frames"
_ENABLE_CYCLE_OFFSET_POSITION_KEY = "EnableCycleOffsetPosition"
_ENABLE_CYCLE_OFFSET_ROTATION_KEY = "EnableCycleOffsetRotation"
def __init__(self, motion_file):
"""Initialize motion data.
Args:
motion_file: The path to the motion data file.
"""
self._loop_mode = LoopMode.Clamp
self._frame_duration = 0
self._frames = None
self._frame_vels = None
self.load(motion_file)
# precompute the net changes in root position and rotation over the course
# of the motion
self._cycle_delta_pos = self._calc_cycle_delta_pos()
self._cycle_delta_heading = self._calc_cycle_delta_heading()
return
def load(self, motion_file):
"""Load motion data from file.
The file must be in JSON format.
Args:
motion_file: The path to the motion data file.
"""
logging.info("Loading motion from: {:s}".format(motion_file))
with open(motion_file, "r") as f:
motion_json = json.load(f)
self._loop_mode = LoopMode[motion_json[self._LOOP_MODE_KEY]]
self._frame_duration = float(motion_json[self._FRAME_DURATION_KEY])
if self._ENABLE_CYCLE_OFFSET_POSITION_KEY in motion_json:
self._enable_cycle_offset_pos = bool(
motion_json[self._ENABLE_CYCLE_OFFSET_POSITION_KEY])
else:
self._enable_cycle_offset_pos = False
if self._ENABLE_CYCLE_OFFSET_ROTATION_KEY in motion_json:
self._enable_cycle_offset_rot = bool(
motion_json[self._ENABLE_CYCLE_OFFSET_ROTATION_KEY])
else:
self._enable_cycle_offset_rot = False
self._frames = np.array(motion_json[self._FRAMES_KEY])
self._postprocess_frames(self._frames)
self._frame_vels = self._calc_frame_vels()
assert (self._frames.shape[0] > 0), "Must have at least 1 frame."
assert (self._frames.shape[1] > self.POS_SIZE +
self.ROT_SIZE), "Frames have too few degrees of freedom."
assert (self._frame_duration > 0), "Frame duration must be positive."
logging.info("Loaded motion from {:s}.".format(motion_file))
return
def get_num_frames(self):
"""Get the number of frames in the motion data.
Returns:
Number of frames in motion data.
"""
return self._frames.shape[0]
def get_frame_size(self):
"""Get the size of each frame.
Returns:
Size of each frame in motion data.
"""
return self._frames.shape[-1]
def get_frame_vel_size(self):
"""Get the size of the root velocity in each frame.
Returns:
Size of root velocity.
"""
return self.get_frame_size() - self.POS_SIZE - self.ROT_SIZE \
+ self.VEL_SIZE + self.ANG_VEL_SIZE
def get_frame_duration(self):
"""Get the duration (seconds) of a single rame.
Returns:
The duration of a frame.
"""
return self._frame_duration
def get_frame(self, f):
"""Get a specific frame that represents the character's pose at that point
in time.
Args:
f: Index of the frame.
Returns:
The selected frame.
"""
return self._frames[f, :]
def get_frame_vel(self, f):
"""Get the velocities of each joint at a specific frame.
Args:
f: Index of the frame.
Returns:
The selected frame velocity.
"""
return self._frame_vels[f, :]
def get_frame_time(self, f):
"""Get the start time of a specified frame
Args:
f: Index of the frame.
Returns:
Start time of the frame.
"""
return f * self.get_frame_duration()
def get_frames(self):
"""Get all frames.
Returns:
All frames in reference motion.
"""
return self._frames
def get_duration(self):
"""Get the duration (seconds) of the entire motion.
Returns:
The duration of the motion.
"""
frame_dur = self.get_frame_duration()
num_frames = self.get_num_frames()
motion_dur = frame_dur * (num_frames - 1)
return motion_dur
def calc_phase(self, time):
"""Calaculates the phase for a given point in time.
The phase is a scalar
value between [0, 1], with 0 denoting the start of a motion, and 1 the end
of a motion.
Args:
time: The time to be used when computing the phase.
Returns:
The duration of the motion.
"""
dur = self.get_duration()
phase = time / dur
if self.enable_loop():
phase -= np.floor(phase)
else:
phase = np.clip(phase, 0.0, 1.0)
return phase
def calc_cycle_count(self, time):
"""Calculates the number of cycles completed of a motion for a given amount
of time.
Args:
time: The time elapsed since the motion began.
Returns:
The number of motion cycles.
"""
dur = self.get_duration()
phases = time / dur
count = int(math.floor(phases))
if not self.enable_loop():
count = np.clip(count, 0, 1)
return count
def enable_loop(self):
"""Check if looping is enabled for the motion.
Returns:
Boolean indicating if looping is enabled.
"""
loop = (self._loop_mode is LoopMode.Wrap)
return loop
def is_over(self, time):
"""Check if a motion has ended after a specific point in time.
Args:
time: Time elapsed since the motion began.
Returns:
Boolean indicating if the motion is over.
"""
over = (not self.enable_loop()) and (time >= self.get_duration())
return over
def get_frame_root_pos(self, frame):
"""Get the root position from a frame.
Args:
frame: Frame from which the root position is to be extracted.
Returns:
Root position from the given frame.
"""
root_pos = frame[:self.POS_SIZE].copy()
return root_pos
def set_frame_root_pos(self, root_pos, out_frame):
"""Set the root position for a frame.
Args:
root_pos: Root position to be set for a frame
out_frame: Frame in which the root position is to be set.
"""
out_frame[:self.POS_SIZE] = root_pos
return
def get_frame_root_rot(self, frame):
"""Get the root rotation from a frame.
Args:
frame: Frame from which the root rotation is to be extracted.
Returns:
Root rotation (quaternion) from the given frame.
"""
root_rot = frame[self.POS_SIZE:(self.POS_SIZE + self.ROT_SIZE)].copy()
return root_rot
def set_frame_root_rot(self, root_rot, out_frame):
"""Set the root rotation for a frame.
Args:
root_rot: Root rotation to be set for a frame
out_frame: Frame in which the root rotation is to be set.
"""
out_frame[self.POS_SIZE:(self.POS_SIZE + self.ROT_SIZE)] = root_rot
return
def get_frame_joints(self, frame):
"""Get the pose of each joint from a frame.
Args:
frame: Frame from which the joint pose is to be extracted.
Returns:
Array containing the pose of each joint in the given frame.
"""
joints = frame[(self.POS_SIZE + self.ROT_SIZE):].copy()
return joints
def set_frame_joints(self, joints, out_frame):
"""Set the joint pose for a frame.
Args:
joints: Pose of each joint to be set for a frame.
out_frame: Frame in which the joint poses is to be set.
"""
out_frame[(self.POS_SIZE + self.ROT_SIZE):] = joints
return
def get_frame_root_vel(self, frame):
"""Get the root linear velocity from a frame.
Args:
frame: Frame from which the root linear velocity is to be extracted.
Returns:
Root linear velocity from the given frame.
"""
root_vel = frame[:self.VEL_SIZE].copy()
return root_vel
def set_frame_root_vel(self, root_vel, out_frame):
"""Set the root linear velocity for a frame.
Args:
root_vel: Root linear velocity to be set for a frame.
out_frame: Frame in which the root linear velocity is to be set.
"""
out_frame[:self.VEL_SIZE] = root_vel
return
def get_frame_root_ang_vel(self, frame):
"""Get the root angular velocity from a frame.
Args:
frame: Frame from which the root position is to be extracted.
Returns:
Root position from the given frame.
"""
root_ang_vel = frame[self.VEL_SIZE:(self.VEL_SIZE
+ self.ANG_VEL_SIZE)].copy()
return root_ang_vel
def set_frame_root_ang_vel(self, root_ang_vel, out_frame):
"""Set the root angular velocity for a frame.
Args:
root_ang_vel: Root angular velocity to be set for a frame.
out_frame: Frame in which the root angular velocity is to be set.
"""
out_frame[self.VEL_SIZE:(self.VEL_SIZE + self.ANG_VEL_SIZE)] = root_ang_vel
return
def get_frame_joints_vel(self, frame):
"""Get the velocity of each joint from a frame.
Args:
frame: Frame from which the joint velocities is to be extracted.
Returns:
Array containing the velocity of each joint in the given frame.
"""
vel = frame[(self.VEL_SIZE + self.ANG_VEL_SIZE):].copy()
return vel
def set_frame_joints_vel(self, vel, out_frame):
"""Set the joint velocities for a frame.
Args:
vel: Joint velocities to be set for a frame.
out_frame: Frame in which the joint velocities are to be set.
"""
out_frame[(self.VEL_SIZE + self.ANG_VEL_SIZE):] = vel
return
def calc_frame(self, time):
"""Calculates the frame for a given point in time.
Args:
time: Time at which the frame is to be computed.
Return: An array containing the frame for the given point in time,
specifying the pose of the character.
"""
f0, f1, blend = self.calc_blend_idx(time)
frame0 = self.get_frame(f0)
frame1 = self.get_frame(f1)
blend_frame = self.blend_frames(frame0, frame1, blend)
blend_root_pos = self.get_frame_root_pos(blend_frame)
blend_root_rot = self.get_frame_root_rot(blend_frame)
cycle_count = self.calc_cycle_count(time)
cycle_offset_pos = self._calc_cycle_offset_pos(cycle_count)
cycle_offset_rot = self._calc_cycle_offset_rot(cycle_count)
blend_root_pos = pose3d.QuaternionRotatePoint(blend_root_pos,
cycle_offset_rot)
blend_root_pos += cycle_offset_pos
blend_root_rot = transformations.quaternion_multiply(
cycle_offset_rot, blend_root_rot)
blend_root_rot = motion_util.standardize_quaternion(blend_root_rot)
self.set_frame_root_pos(blend_root_pos, blend_frame)
self.set_frame_root_rot(blend_root_rot, blend_frame)
return blend_frame
def calc_frame_vel(self, time):
"""Calculates the frame velocity for a given point in time.
Args:
time: Time at which the velocities are to be computed.
Return: An array containing the frame velocity for the given point in time,
specifying the velocity of the root and all joints.
"""
f0, f1, blend = self.calc_blend_idx(time)
frame_vel0 = self.get_frame_vel(f0)
frame_vel1 = self.get_frame_vel(f1)
blend_frame_vel = self.blend_frame_vels(frame_vel0, frame_vel1, blend)
root_vel = self.get_frame_root_vel(blend_frame_vel)
root_ang_vel = self.get_frame_root_ang_vel(blend_frame_vel)
cycle_count = self.calc_cycle_count(time)
cycle_offset_rot = self._calc_cycle_offset_rot(cycle_count)
root_vel = pose3d.QuaternionRotatePoint(root_vel, cycle_offset_rot)
root_ang_vel = pose3d.QuaternionRotatePoint(root_ang_vel, cycle_offset_rot)
self.set_frame_root_vel(root_vel, blend_frame_vel)
self.set_frame_root_ang_vel(root_ang_vel, blend_frame_vel)
return blend_frame_vel
def blend_frames(self, frame0, frame1, blend):
"""Linearly interpolate between two frames.
Args:
frame0: First frame to be blended corresponds to (blend = 0).
frame1: Second frame to be blended corresponds to (blend = 1).
blend: Float between [0, 1], specifying the interpolation between
the two frames.
Returns:
An interpolation of the two frames.
"""
root_pos0 = self.get_frame_root_pos(frame0)
root_pos1 = self.get_frame_root_pos(frame1)
root_rot0 = self.get_frame_root_rot(frame0)
root_rot1 = self.get_frame_root_rot(frame1)
joints0 = self.get_frame_joints(frame0)
joints1 = self.get_frame_joints(frame1)
blend_root_pos = (1.0 - blend) * root_pos0 + blend * root_pos1
blend_root_rot = transformations.quaternion_slerp(root_rot0, root_rot1,
blend)
blend_joints = (1.0 - blend) * joints0 + blend * joints1
blend_root_rot = motion_util.standardize_quaternion(blend_root_rot)
blend_frame = np.zeros(self.get_frame_size())
self.set_frame_root_pos(blend_root_pos, blend_frame)
self.set_frame_root_rot(blend_root_rot, blend_frame)
self.set_frame_joints(blend_joints, blend_frame)
return blend_frame
def blend_frame_vels(self, frame_vel0, frame_vel1, blend):
"""Linearly interpolate between two frame velocities.
Args:
frame_vel0: First frame velocities to be blended corresponds to
(blend = 0).
frame_vel1: Second frame velocities to be blended corresponds to
(blend = 1).
blend: Float between [0, 1], specifying the interpolation between
the two frames.
Returns:
An interpolation of the two frame velocities.
"""
blend_frame_vel = (1.0 - blend) * frame_vel0 + blend * frame_vel1
return blend_frame_vel
def _postprocess_frames(self, frames):
"""Postprocesses frames to ensure they satisfy certain properties,
such as normalizing and standardizing all quaternions.
Args:
frames: Array containing frames to be processed. Each row of the array
should represent a frame.
Returns: An array containing the post processed frames.
"""
num_frames = frames.shape[0]
if num_frames > 0:
first_frame = self._frames[0]
pos_start = self.get_frame_root_pos(first_frame)
for f in range(num_frames):
curr_frame = frames[f]
root_pos = self.get_frame_root_pos(curr_frame)
root_pos[0] -= pos_start[0]
root_pos[1] -= pos_start[1]
root_rot = self.get_frame_root_rot(curr_frame)
root_rot = pose3d.QuaternionNormalize(root_rot)
root_rot = motion_util.standardize_quaternion(root_rot)
self.set_frame_root_pos(root_pos, curr_frame)
self.set_frame_root_rot(root_rot, curr_frame)
return
def _calc_cycle_delta_pos(self):
"""Calculates the net change in the root position after a cycle.
Returns:
Net translation of the root position.
"""
first_frame = self._frames[0]
last_frame = self._frames[-1]
pos_start = self.get_frame_root_pos(first_frame)
pos_end = self.get_frame_root_pos(last_frame)
cycle_delta_pos = pos_end - pos_start
cycle_delta_pos[2] = 0 # only translate along horizontal plane
return cycle_delta_pos
def _calc_cycle_delta_heading(self):
"""Calculates the net change in the root heading after a cycle.
Returns:
Net change in heading.
"""
first_frame = self._frames[0]
last_frame = self._frames[-1]
rot_start = self.get_frame_root_rot(first_frame)
rot_end = self.get_frame_root_rot(last_frame)
inv_rot_start = transformations.quaternion_conjugate(rot_start)
drot = transformations.quaternion_multiply(rot_end, inv_rot_start)
cycle_delta_heading = motion_util.calc_heading(drot)
return cycle_delta_heading
def _calc_cycle_offset_pos(self, num_cycles):
"""Calculates change in the root position after a given number of cycles.
Args:
num_cycles: Number of cycles since the start of the motion.
Returns:
Net translation of the root position.
"""
if not self._enable_cycle_offset_pos:
cycle_offset_pos = np.zeros(3)
else:
if not self._enable_cycle_offset_rot:
cycle_offset_pos = num_cycles * self._cycle_delta_pos
else:
cycle_offset_pos = np.zeros(3)
for i in range(num_cycles):
curr_heading = i * self._cycle_delta_heading
rot = transformations.quaternion_about_axis(curr_heading, [0, 0, 1])
curr_offset = pose3d.QuaternionRotatePoint(self._cycle_delta_pos, rot)
cycle_offset_pos += curr_offset
return cycle_offset_pos
def _calc_cycle_offset_rot(self, num_cycles):
"""Calculates change in the root rotation after a given number of cycles.
Args:
num_cycles: Number of cycles since the start of the motion.
Returns:
Net rotation of the root orientation.
"""
if not self._enable_cycle_offset_rot:
cycle_offset_rot = np.array([0, 0, 0, 1])
else:
heading_offset = num_cycles * self._cycle_delta_heading
cycle_offset_rot = transformations.quaternion_from_euler(
0, 0, heading_offset)
return cycle_offset_rot
def _calc_frame_vels(self):
"""Calculates the frame velocity of each frame in the motion (self._frames).
Return:
An array containing velocities at each frame in self._frames.
"""
num_frames = self.get_num_frames()
frame_vel_size = self.get_frame_vel_size()
dt = self.get_frame_duration()
frame_vels = np.zeros([num_frames, frame_vel_size])
for f in range(num_frames - 1):
frame0 = self.get_frame(f)
frame1 = self.get_frame(f + 1)
root_pos0 = self.get_frame_root_pos(frame0)
root_pos1 = self.get_frame_root_pos(frame1)
root_rot0 = self.get_frame_root_rot(frame0)
root_rot1 = self.get_frame_root_rot(frame1)
joints0 = self.get_frame_joints(frame0)
joints1 = self.get_frame_joints(frame1)
root_vel = (root_pos1 - root_pos0) / dt
root_rot_diff = transformations.quaternion_multiply(
root_rot1, transformations.quaternion_conjugate(root_rot0))
root_rot_diff_axis, root_rot_diff_angle = \
pose3d.QuaternionToAxisAngle(root_rot_diff)
root_ang_vel = (root_rot_diff_angle / dt) * root_rot_diff_axis
joints_vel = (joints1 - joints0) / dt
curr_frame_vel = np.zeros(frame_vel_size)
self.set_frame_root_vel(root_vel, curr_frame_vel)
self.set_frame_root_ang_vel(root_ang_vel, curr_frame_vel)
self.set_frame_joints_vel(joints_vel, curr_frame_vel)
frame_vels[f, :] = curr_frame_vel
# replicate the velocity at the last frame
if num_frames > 1:
frame_vels[-1, :] = frame_vels[-2, :]
return frame_vels
def calc_blend_idx(self, time):
"""Calculate the indices of the two frames and the interpolation value that
should be used when computing the frame at a given point in time.
Args:
time: Time at which the frame is to be computed.
Return:
f0: Start framed used for blending.
f1: End frame used for blending.
blend: Interpolation value used to blend between the two frames.
"""
dur = self.get_duration()
num_frames = self.get_num_frames()
if not self.enable_loop() and time <= 0:
f0 = 0
f1 = 0
blend = 0
elif not self.enable_loop() and time >= dur:
f0 = num_frames - 1
f1 = num_frames - 1
blend = 0
else:
phase = self.calc_phase(time)
f0 = int(phase * (num_frames - 1))
f1 = min(f0 + 1, num_frames - 1)
norm_time = phase * dur
time0 = self.get_frame_time(f0)
time1 = self.get_frame_time(f1)
assert (norm_time >= time0 - 1e-5) and (norm_time <= time1 + 1e-5)
blend = (norm_time - time0) / (time1 - time0)
return f0, f1, blend
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/moving_window_filter.py | motion_imitation/utilities/moving_window_filter.py | """Moving window filter to smooth out sensor readings."""
import collections
class MovingWindowFilter(object):
"""A stable O(1) moving filter for incoming data streams.
We implement the Neumaier's algorithm to calculate the moving window average,
which is numerically stable.
"""
def __init__(self, window_size: int):
"""Initializes the class.
Args:
window_size: The moving window size.
"""
assert window_size > 0
self._window_size = window_size
self._value_deque = collections.deque(maxlen=window_size)
# The moving window sum.
self._sum = 0
# The correction term to compensate numerical precision loss during
# calculation.
self._correction = 0
def _neumaier_sum(self, value: float):
"""Update the moving window sum using Neumaier's algorithm.
For more details please refer to:
https://en.wikipedia.org/wiki/Kahan_summation_algorithm#Further_enhancements
Args:
value: The new value to be added to the window.
"""
new_sum = self._sum + value
if abs(self._sum) >= abs(value):
# If self._sum is bigger, low-order digits of value are lost.
self._correction += (self._sum - new_sum) + value
else:
# low-order digits of sum are lost
self._correction += (value - new_sum) + self._sum
self._sum = new_sum
def calculate_average(self, new_value: float) -> float:
"""Computes the moving window average in O(1) time.
Args:
new_value: The new value to enter the moving window.
Returns:
The average of the values in the window.
"""
deque_len = len(self._value_deque)
if deque_len < self._value_deque.maxlen:
pass
else:
# The left most value to be subtracted from the moving sum.
self._neumaier_sum(-self._value_deque[0])
self._neumaier_sum(new_value)
self._value_deque.append(new_value)
return (self._sum + self._correction) / self._window_size
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/pose3d.py | motion_imitation/utilities/pose3d.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for 3D pose conversion."""
import math
import numpy as np
from pybullet_utils import transformations
VECTOR3_0 = np.zeros(3, dtype=np.float64)
VECTOR3_1 = np.ones(3, dtype=np.float64)
VECTOR3_X = np.array([1, 0, 0], dtype=np.float64)
VECTOR3_Y = np.array([0, 1, 0], dtype=np.float64)
VECTOR3_Z = np.array([0, 0, 1], dtype=np.float64)
# QUATERNION_IDENTITY is the multiplicative identity 1.0 + 0i + 0j + 0k.
# When interpreted as a rotation, it is the identity rotation.
QUATERNION_IDENTITY = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float64)
def Vector3RandomNormal(sigma, mu=VECTOR3_0):
"""Returns a random 3D vector from a normal distribution.
Each component is selected independently from a normal distribution.
Args:
sigma: Scale (or stddev) of distribution for all variables.
mu: Mean of distribution for each variable.
Returns:
A 3D vector in a numpy array.
"""
random_v3 = np.random.normal(scale=sigma, size=3) + mu
return random_v3
def Vector3RandomUniform(low=VECTOR3_0, high=VECTOR3_1):
"""Returns a 3D vector selected uniformly from the input box.
Args:
low: The min-value corner of the box.
high: The max-value corner of the box.
Returns:
A 3D vector in a numpy array.
"""
random_x = np.random.uniform(low=low[0], high=high[0])
random_y = np.random.uniform(low=low[1], high=high[1])
random_z = np.random.uniform(low=low[2], high=high[2])
return np.array([random_x, random_y, random_z])
def Vector3RandomUnit():
"""Returns a random 3D vector with unit length.
Generates a 3D vector selected uniformly from the unit sphere.
Returns:
A normalized 3D vector in a numpy array.
"""
longitude = np.random.uniform(low=-math.pi, high=math.pi)
sin_latitude = np.random.uniform(low=-1.0, high=1.0)
cos_latitude = math.sqrt(1.0 - sin_latitude * sin_latitude)
x = math.cos(longitude) * cos_latitude
y = math.sin(longitude) * cos_latitude
z = sin_latitude
return np.array([x, y, z], dtype=np.float64)
def QuaternionNormalize(q):
"""Normalizes the quaternion to length 1.
Divides the quaternion by its magnitude. If the magnitude is too
small, returns the quaternion identity value (1.0).
Args:
q: A quaternion to be normalized.
Raises:
ValueError: If input quaternion has length near zero.
Returns:
A quaternion with magnitude 1 in a numpy array [x, y, z, w].
"""
q_norm = np.linalg.norm(q)
if np.isclose(q_norm, 0.0):
raise ValueError(
'Quaternion may not be zero in QuaternionNormalize: |q| = %f, q = %s' %
(q_norm, q))
return q / q_norm
def QuaternionFromAxisAngle(axis, angle):
"""Returns a quaternion that generates the given axis-angle rotation.
Returns the quaternion: sin(angle/2) * axis + cos(angle/2).
Args:
axis: Axis of rotation, a 3D vector in a numpy array.
angle: The angle of rotation (radians).
Raises:
ValueError: If input axis is not a normalizable 3D vector.
Returns:
A unit quaternion in a numpy array.
"""
if len(axis) != 3:
raise ValueError('Axis vector should have three components: %s' % axis)
axis_norm = np.linalg.norm(axis)
if np.isclose(axis_norm, 0.0):
raise ValueError('Axis vector may not have zero length: |v| = %f, v = %s' %
(axis_norm, axis))
half_angle = angle * 0.5
q = np.zeros(4, dtype=np.float64)
q[0:3] = axis
q[0:3] *= math.sin(half_angle) / axis_norm
q[3] = math.cos(half_angle)
return q
def QuaternionToAxisAngle(quat, default_axis=VECTOR3_Z, direction_axis=None):
"""Calculates axis and angle of rotation performed by a quaternion.
Calculates the axis and angle of the rotation performed by the quaternion.
The quaternion should have four values and be normalized.
Args:
quat: Unit quaternion in a numpy array.
default_axis: 3D vector axis used if the rotation is near to zero. Without
this default, small rotations would result in an exception. It is
reasonable to use a default axis for tiny rotations, because zero angle
rotations about any axis are equivalent.
direction_axis: Used to disambiguate rotation directions. If the
direction_axis is specified, the axis of the rotation will be chosen such
that its inner product with the direction_axis is non-negative.
Raises:
ValueError: If quat is not a normalized quaternion.
Returns:
axis: Axis of rotation.
angle: Angle in radians.
"""
if len(quat) != 4:
raise ValueError(
'Quaternion should have four components [x, y, z, w]: %s' % quat)
if not np.isclose(1.0, np.linalg.norm(quat)):
raise ValueError('Quaternion should have unit length: |q| = %f, q = %s' %
(np.linalg.norm(quat), quat))
axis = quat[:3].copy()
axis_norm = np.linalg.norm(axis)
min_axis_norm = 1e-8
if axis_norm < min_axis_norm:
axis = default_axis
if len(default_axis) != 3:
raise ValueError('Axis vector should have three components: %s' % axis)
if not np.isclose(np.linalg.norm(axis), 1.0):
raise ValueError('Axis vector should have unit length: |v| = %f, v = %s' %
(np.linalg.norm(axis), axis))
else:
axis /= axis_norm
sin_half_angle = axis_norm
if direction_axis is not None and np.inner(axis, direction_axis) < 0:
sin_half_angle = -sin_half_angle
axis = -axis
cos_half_angle = quat[3]
half_angle = math.atan2(sin_half_angle, cos_half_angle)
angle = half_angle * 2
return axis, angle
def QuaternionRandomRotation(max_angle=math.pi):
"""Creates a random small rotation around a random axis.
Generates a small rotation with the axis vector selected uniformly
from the unit sphere and an angle selected from a uniform
distribution over [0, max_angle].
If the max_angle is not specified, the rotation should be selected
uniformly over all possible rotation angles.
Args:
max_angle: The maximum angle of rotation (radians).
Returns:
A unit quaternion in a numpy array.
"""
angle = np.random.uniform(low=0, high=max_angle)
axis = Vector3RandomUnit()
return QuaternionFromAxisAngle(axis, angle)
def QuaternionRotatePoint(point, quat):
"""Performs a rotation by quaternion.
Rotate the point by the quaternion using quaternion multiplication,
(q * p * q^-1), without constructing the rotation matrix.
Args:
point: The point to be rotated.
quat: The rotation represented as a quaternion [x, y, z, w].
Returns:
A 3D vector in a numpy array.
"""
q_point = np.array([point[0], point[1], point[2], 0.0])
quat_inverse = transformations.quaternion_inverse(quat)
q_point_rotated = transformations.quaternion_multiply(
transformations.quaternion_multiply(quat, q_point), quat_inverse)
return q_point_rotated[:3]
def IsRotationMatrix(m):
"""Returns true if the 3x3 submatrix represents a rotation.
Args:
m: A transformation matrix.
Raises:
ValueError: If input is not a matrix of size at least 3x3.
Returns:
True if the 3x3 submatrix is a rotation (orthogonal).
"""
if len(m.shape) != 2 or m.shape[0] < 3 or m.shape[1] < 3:
raise ValueError('Matrix should be 3x3 or 4x4: %s\n %s' % (m.shape, m))
rot = m[:3, :3]
eye = np.matmul(rot, np.transpose(rot))
return np.isclose(eye, np.identity(3), atol=1e-4).all()
def PoseTransformPoint(point, position, quat):
"""Transforms point by rotating it and adding the position."""
return QuaternionRotatePoint(point, quat) + position
# def ZAxisAlignedRobotPoseTool(robot_pose_tool):
# """Returns the current gripper pose rotated for alignment with the z-axis.
# Args:
# robot_pose_tool: a pose3d.Pose3d() instance.
# Returns:
# An instance of pose.Transform representing the current gripper pose
# rotated for alignment with the z-axis.
# """
# # Align the current pose to the z-axis.
# robot_pose_tool.quaternion = transformations.quaternion_multiply(
# RotationBetween(
# robot_pose_tool.matrix4x4[0:3, 0:3].dot(np.array([0, 0, 1])),
# np.array([0.0, 0.0, -1.0])), robot_pose_tool.quaternion)
# return robot_pose_tool
# def RotationBetween(a_translation_b, a_translation_c):
# """Computes the rotation from one vector to another.
# The computed rotation has the property that:
# a_translation_c = a_rotation_b_to_c * a_translation_b
# Args:
# a_translation_b: vec3, vector to rotate from
# a_translation_c: vec3, vector to rotate to
# Returns:
# a_rotation_b_to_c: new Orientation
# """
# rotation = rotation3.Rotation3.rotation_between(
# a_translation_b, a_translation_c, err_msg='RotationBetween')
# return rotation.quaternion.xyzw
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/__init__.py | motion_imitation/utilities/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/utilities/a1/a1.py | motion_imitation/utilities/a1/a1.py | import pybullet as p
import time
import pybullet_data as pd
import numpy as np
p.connect(p.GUI)
p.setAdditionalSearchPath(pd.getDataPath())
dt = 1./240.
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)
p.loadURDF("plane.urdf")
robot = p.loadURDF("a1/a1.urdf",[0,0,0.5])
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
p.setGravity(0,0,-9.8)
A1_DEFAULT_ABDUCTION_ANGLE = 0
A1_DEFAULT_HIP_ANGLE = 0.9
A1_DEFAULT_KNEE_ANGLE = -1.8
NUM_LEGS = 4
INIT_MOTOR_ANGLES = np.array([
A1_DEFAULT_ABDUCTION_ANGLE,
A1_DEFAULT_HIP_ANGLE,
A1_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
MOTOR_NAMES = [
"FR_hip_joint",
"FR_upper_joint",
"FR_lower_joint",
"FL_hip_joint",
"FL_upper_joint",
"FL_lower_joint",
"RR_hip_joint",
"RR_upper_joint",
"RR_lower_joint",
"RL_hip_joint",
"RL_upper_joint",
"RL_lower_joint",
]
motor_ids = []
for j in range (p.getNumJoints(robot)):
joint_info = p.getJointInfo(robot,j)
name = joint_info[1].decode('utf-8')
print("joint_info[1]=",name)
if name in MOTOR_NAMES:
motor_ids.append(j)
for index in range (12):
joint_id = motor_ids[index]
p.setJointMotorControl2(robot, joint_id, p.POSITION_CONTROL, INIT_MOTOR_ANGLES[index])
p.resetJointState(robot, joint_id, INIT_MOTOR_ANGLES[index])
print("motor_ids=",motor_ids)
while p.isConnected():
p.stepSimulation()
time.sleep(dt)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/motion_imitation/data/__init__.py | motion_imitation/data/__init__.py | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false | |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/run.py | sac_dev/run.py | import argparse
import gym
import numpy as np
import os
import random
import sys
import tensorflow as tf
import time
import sac_configs
import learning.sac_agent as sac_agent
import util.mpi_util as mpi_util
arg_parser = None
def parse_args(args):
parser = argparse.ArgumentParser(description="Train or test control policies.")
parser.add_argument("--env", dest="env", default="")
parser.add_argument("--train", dest="train", action="store_true", default=True)
parser.add_argument("--test", dest="train", action="store_false", default=True)
parser.add_argument("--max_samples", dest="max_samples", type=int, default=np.inf)
parser.add_argument("--test_episodes", dest="test_episodes", type=int, default=32)
parser.add_argument("--output_dir", dest="output_dir", default="output")
parser.add_argument("--output_iters", dest="output_iters", type=int, default=20)
parser.add_argument("--model_file", dest="model_file", default="")
parser.add_argument("--visualize", dest="visualize", action="store_true", default=False)
parser.add_argument("--gpu", dest="gpu", default="")
arg_parser = parser.parse_args()
return arg_parser
def enable_gpus(gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
return
def build_env(env_id):
assert(env_id is not ""), "Unspecified environment."
env = gym.make(env_id)
return env
def build_agent(env):
env_id = arg_parser.env
agent_configs = {}
if (env_id in sac_configs.SAC_CONFIGS):
agent_configs = sac_configs.SAC_CONFIGS[env_id]
graph = tf.Graph()
sess = tf.Session(graph=graph)
agent = sac_agent.SACAgent(env=env, sess=sess, **agent_configs)
return agent
def set_rand_seed(seed):
seed += 97 * mpi_util.get_proc_rank()
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
return
def main(args):
global arg_parser
arg_parser = parse_args(args)
enable_gpus(arg_parser.gpu)
set_rand_seed(int(time.time()))
env = build_env(arg_parser.env)
agent = build_agent(env)
agent.visualize = arg_parser.visualize
if (arg_parser.model_file is not ""):
agent.load_model(arg_parser.model_file)
if (arg_parser.train):
agent.train(max_samples=arg_parser.max_samples,
test_episodes=arg_parser.test_episodes,
output_dir=arg_parser.output_dir,
output_iters=arg_parser.output_iters)
else:
agent.eval(num_episodes=arg_parser.test_episodes)
return
if __name__ == "__main__":
main(sys.argv)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/sac_configs.py | sac_dev/sac_configs.py | SAC_CONFIGS = {
"Ant-v2":
{
"actor_net": "fc_2layers_256units",
"critic_net": "fc_2layers_256units",
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.2,
"critic_stepsize": 0.0003,
"critic_batch_size": 256,
"critic_steps": 256,
"discount": 0.99,
"samples_per_iter": 512,
"replay_buffer_size": 1000000,
"normalizer_samples": 300000,
"num_action_samples": 1,
"tar_stepsize": 0.01,
"steps_per_tar_update": 1,
"init_samples": 25000
},
"Hopper-v2":
{
"actor_net": "fc_2layers_256units",
"critic_net": "fc_2layers_256units",
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.2,
"critic_stepsize": 0.0003,
"critic_batch_size": 256,
"critic_steps": 256,
"discount": 0.99,
"samples_per_iter": 512,
"replay_buffer_size": 1000000,
"normalizer_samples": 300000,
"num_action_samples": 1,
"tar_stepsize": 0.01,
"steps_per_tar_update": 1,
"init_samples": 25000
},
"HalfCheetah-v2":
{
"actor_net": "fc_2layers_256units",
"critic_net": "fc_2layers_256units",
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.2,
"critic_stepsize": 0.0003,
"critic_batch_size": 256,
"critic_steps": 256,
"discount": 0.99,
"samples_per_iter": 512,
"replay_buffer_size": 1000000,
"normalizer_samples": 300000,
"num_action_samples": 1,
"tar_stepsize": 0.01,
"steps_per_tar_update": 1,
"init_samples": 25000
},
"Walker2d-v2":
{
"actor_net": "fc_2layers_256units",
"critic_net": "fc_2layers_256units",
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.2,
"critic_stepsize": 0.0003,
"critic_batch_size": 256,
"critic_steps": 256,
"discount": 0.99,
"samples_per_iter": 512,
"replay_buffer_size": 1000000,
"normalizer_samples": 300000,
"num_action_samples": 1,
"tar_stepsize": 0.01,
"steps_per_tar_update": 1,
"init_samples": 25000
},
"Humanoid-v2":
{
"actor_net": "fc_2layers_256units",
"critic_net": "fc_2layers_256units",
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.2,
"critic_stepsize": 0.0003,
"critic_batch_size": 256,
"critic_steps": 256,
"discount": 0.99,
"samples_per_iter": 512,
"replay_buffer_size": 1000000,
"normalizer_samples": 300000,
"num_action_samples": 1,
"tar_stepsize": 0.01,
"steps_per_tar_update": 1,
"init_samples": 25000
},
"A1-Motion-Imitation-REDQ-Pretrain":
{
"actor_net": "fc_2layers_512units",
"critic_net": "fc_2layers_512units",
"use_MPI_solver": False,
"parallel_ensemble": True,
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 512,
"actor_steps": 512,
"action_std": 0.15,
"num_critic_nets": 10,
"critic_stepsize": 0.0003,
"critic_batch_size": 512,
"critic_steps": 20*512,
"num_ensemble_subset": 2,
"discount": 0.95,
"samples_per_iter": 512,
"replay_buffer_size": int(1e6),
"normalizer_samples": 30000,
"enable_val_norm": False,
"num_action_samples": 1,
"tar_stepsize": 5e-3,
"steps_per_tar_update": 1,
"init_samples": 20000
},
"A1-Motion-Imitation-REDQ-Finetune":
{
"actor_net": "fc_2layers_512units",
"critic_net": "fc_2layers_512units",
"use_MPI_solver": False,
"parallel_ensemble": True,
"actor_stepsize": 0.0001,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.15,
"num_critic_nets": 10,
"critic_stepsize": 0.0001,
"critic_batch_size": 256,
"critic_steps": 2*256,
"num_ensemble_subset": 2,
"discount": 0.95,
"samples_per_iter": 512,
"replay_buffer_size": int(1e6),
"normalizer_samples": 0,
"enable_val_norm": False,
"num_action_samples": 1,
"tar_stepsize": 1e-3,
"steps_per_tar_update": 1,
"init_samples": 5000
},
"A1-Motion-Imitation-Vanilla-SAC-Pretrain":
{
"actor_net": "fc_2layers_512units",
"critic_net": "fc_2layers_512units",
"use_MPI_solver": False,
"parallel_ensemble": True,
"actor_stepsize": 0.0003,
"actor_init_output_scale": 0.01,
"actor_batch_size": 512,
"actor_steps": 512,
"action_std": 0.15,
"num_critic_nets": 2,
"critic_stepsize": 0.0003,
"critic_batch_size": 512,
"critic_steps": 512,
"num_ensemble_subset": 2,
"discount": 0.95,
"samples_per_iter": 512,
"replay_buffer_size": int(1e6),
"normalizer_samples": 30000,
"enable_val_norm": False,
"num_action_samples": 1,
"tar_stepsize": 5e-3,
"steps_per_tar_update": 1,
"init_samples": 20000
},
"A1-Motion-Imitation-Vanilla-SAC-Finetune":
{
"actor_net": "fc_2layers_512units",
"critic_net": "fc_2layers_512units",
"use_MPI_solver": False,
"parallel_ensemble": True,
"actor_stepsize": 0.0001,
"actor_init_output_scale": 0.01,
"actor_batch_size": 256,
"actor_steps": 256,
"action_std": 0.15,
"num_critic_nets": 2,
"critic_stepsize": 0.0001,
"critic_batch_size": 256,
"critic_steps": 256,
"num_ensemble_subset": 2,
"discount": 0.95,
"samples_per_iter": 512,
"replay_buffer_size": int(1e6),
"normalizer_samples": 0,
"enable_val_norm": False,
"num_action_samples": 1,
"tar_stepsize": 1e-3,
"steps_per_tar_update": 1,
"init_samples": 5000
}
} | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/mpi_util.py | sac_dev/util/mpi_util.py | import copy
import numpy as np
# from mpi4py import MPI
ROOT_PROC_RANK = 0
class MockMPI(object):
class MockCommWorld(object):
def Get_size(self):
return 1
def Get_rank(self):
return 0
def Bcast(self, x, root):
return x
def Allreduce(self, x_buf, buffer, op):
# buffer = x_buf.copy()
np.copyto(buffer, x_buf)
def Allgather(self, x_buf, buffer):
# buffer = x_buf.copy()
np.copyto(buffer, x_buf)
def __init__(self):
self.COMM_WORLD = self.MockCommWorld()
self.SUM = None
self.PROD = None
self.MIN = None
self.MAX = None
MPI = MockMPI()
def get_num_procs():
return MPI.COMM_WORLD.Get_size()
def get_proc_rank():
return MPI.COMM_WORLD.Get_rank()
def is_root_proc():
rank = get_proc_rank()
return rank == ROOT_PROC_RANK
def bcast(x):
MPI.COMM_WORLD.Bcast(x, root=ROOT_PROC_RANK)
return
def reduce_sum(x):
return reduce_all(x, MPI.SUM)
def reduce_sum_inplace(x, destination):
MPI.COMM_WORLD.Allreduce(x, destination, op=MPI.SUM)
def reduce_prod(x):
return reduce_all(x, MPI.PROD)
def reduce_mean(x):
buffer = reduce_sum(x)
buffer /= get_num_procs()
return buffer
def reduce_min(x):
return reduce_all(x, MPI.MIN)
def reduce_max(x):
return reduce_all(x, MPI.MAX)
def reduce_all(x, op):
is_array = isinstance(x, np.ndarray)
x_buf = x if is_array else np.array([x])
buffer = np.zeros_like(x_buf)
MPI.COMM_WORLD.Allreduce(x_buf, buffer, op=op)
buffer = buffer if is_array else buffer[0]
return buffer
def gather_all(x):
x_buf = np.array([x])
buffer = np.zeros_like(x_buf)
buffer = np.repeat(buffer, get_num_procs(), axis=0)
MPI.COMM_WORLD.Allgather(x_buf, buffer)
buffer = list(buffer)
return buffer
def reduce_dict_mean(local_dict):
keys = sorted(local_dict.keys())
local_vals = np.array([local_dict[k] for k in keys])
global_vals = reduce_mean(local_vals)
new_dict = copy.deepcopy(local_dict)
for i, k in enumerate(keys):
val = global_vals[i]
new_dict[k] = val
return new_dict
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/rl_path.py | sac_dev/util/rl_path.py | import enum
import numpy as np
import time
class Terminate(enum.Enum):
Null = 0
Fail = 1
class RLPath(object):
def __init__(self):
self.states = []
self.actions = []
self.logps = []
self.rewards = []
self.max_torques = []
self.terminate = Terminate.Null
self.clear()
return
def pathlength(self):
return len(self.actions)
def is_valid(self):
valid = True
l = self.pathlength()
valid &= len(self.states) == l + 1
valid &= len(self.actions) == l
valid &= len(self.logps) == l
valid &= len(self.rewards) == l
valid &= len(self.max_torques) == l
valid |= (l == 0)
return valid
def check_vals(self):
for key, vals in vars(self).items():
if type(vals) is list and len(vals) > 0:
for v in vals:
if not np.isfinite(v).all():
return False
return True
def clear(self):
for key, vals in vars(self).items():
if type(vals) is list:
vals.clear()
self.terminate = Terminate.Null
return
def calc_return(self):
return sum(self.rewards)
def terminated(self):
return self.terminate == Terminate.Null
def calc_max_torque(self):
return max(self.max_torques)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/saved_policy.py | sac_dev/util/saved_policy.py | """Run inference with a saved policy."""
import numpy as np
import tensorflow.compat.v1 as tf
class SavedPolicy(object):
"""Load a policy saved with sac_agent.py."""
def __init__(self, export_dir):
"""Constructor.
Args:
export_dir: Directory and model identifier for reloading policy. Must
contain a metagraph file, checkpoint index, and data. E.g. if
initialized as SavedPolicy('policies/model.ckpt'), then 'policies/'
must contain 'model.ckpt.meta', 'model.ckpt.index', and
'model.ckpt.data-00000-of-00001'.
"""
self._sess = tf.Session()
saver = tf.train.import_meta_graph("{}.meta".format(export_dir))
saver.restore(self._sess, export_dir)
def __call__(self, state):
"""Runs inference with the policy.
Makes strong assumptions about the names of tensors in the policy.
Args:
state: Array of floats, the input to the policy. It is assumed that only
one state is being passed in and that the state is 1-dimensional.
Returns:
Action from the policy as a 1D np array.
"""
state = np.reshape(state, (1, -1))
action = self._sess.run("add_1:0", feed_dict={"s:0": state})
return np.reshape(action, (-1,))
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/logger.py | sac_dev/util/logger.py | """
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_file() to start logging to a
tab-separated-values file (some_file_name.txt)
To load the learning curves, you can do, for example
A = np.genfromtxt('/tmp/expt_1468984536/log.txt',delimiter='\t',dtype=None, names=True)
A['EpRewMean']
"""
import os.path as osp, shutil, time, atexit, os, subprocess
import sac_dev.util.mpi_util as mpi_util
import json
import errno
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
elif isinstance(o, Enum):
return {
'$enum': o.__module__ + "." + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {
'$function': o.__module__ + "." + o.__name__
}
return json.JSONEncoder.default(self, o)
def dict_to_safe_json(d):
"""
Convert each value in the dictionary into a JSON'able primitive.
:param d:
:return:
"""
new_d = {}
for key, item in d.items():
if safe_json(item):
new_d[key] = item
else:
if isinstance(item, dict):
new_d[key] = dict_to_safe_json(item)
else:
new_d[key] = str(item)
return new_d
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class Logger:
class Entry:
def __init__(self, val, quiet=False):
self.val = val
self.quiet = quiet
return
def print(str):
if (Logger.is_root()):
print(str)
return
def is_root():
return mpi_util.is_root_proc()
def __init__(self):
self.output_file = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self._dump_str_template = ""
self._max_key_len = 0
return
def reset(self):
self.first_row = True
self.log_headers = []
self.log_current_row = {}
if self.output_file is not None:
self.output_file = open(output_path, 'w')
return
def configure_output_file(self, filename=None, variant=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
self.first_row = True
self.log_headers = []
self.log_current_row = {}
output_path = filename or "output/log_%i.txt"%int(time.time())
out_dir = os.path.dirname(output_path)
if not os.path.exists(out_dir) and Logger.is_root():
os.makedirs(out_dir)
if (Logger.is_root()):
self.output_file = open(output_path, 'w')
assert osp.exists(output_path)
atexit.register(self.output_file.close)
Logger.print("Logging data to " + self.output_file.name)
if variant is not None:
variant_log_path = osp.join(out_dir, "variant.json")
mkdir_p(out_dir)
with open(variant_log_path, "w") as f:
json.dump(variant, f, indent=2, sort_keys=True, cls=MyEncoder)
return
def log_tabular(self, key, val, quiet=False):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if self.first_row and key not in self.log_headers:
self.log_headers.append(key)
self._max_key_len = max(self._max_key_len, len(key))
else:
assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
self.log_current_row[key] = Logger.Entry(val, quiet)
return
def get_num_keys(self):
return len(self.log_headers)
def print_tabular(self):
"""
Print all of the diagnostics from the current iteration
"""
key_spacing = self._max_key_len
format_str = "| %" + str(key_spacing) + "s | %15s |"
if (Logger.is_root()):
vals = []
Logger.print("-" * (22 + key_spacing))
for key in self.log_headers:
entry = self.log_current_row.get(key, "")
if not (entry.quiet):
val = entry.val
if isinstance(val, float):
valstr = "%8.3g"%val
elif isinstance(val, int):
valstr = str(val)
else:
valstr = val
Logger.print(format_str%(key, valstr))
vals.append(val)
Logger.print("-" * (22 + key_spacing))
return
def dump_tabular(self):
"""
Write all of the diagnostics from the current iteration
"""
if (Logger.is_root()):
if (self.first_row):
self._dump_str_template = self._build_str_template()
vals = []
for key in self.log_headers:
entry = self.log_current_row.get(key, "")
val = entry.val
vals.append(val)
if self.output_file is not None:
if self.first_row:
header_str = self._dump_str_template.format(*self.log_headers)
self.output_file.write(header_str + "\r\n")
val_str = self._dump_str_template.format(*map(str,vals))
self.output_file.write(val_str + "\r\n")
self.output_file.flush()
self.log_current_row.clear()
self.first_row=False
return
def has_key(self, key):
return key in self.log_headers
def get_current_val(self, key):
val = None
if (key in self.log_current_row.keys()):
entry = self.log_current_row[key]
val = entry.val
return val
def _build_str_template(self):
num_keys = self.get_num_keys()
template = "{:<25}" * num_keys
return template | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/net_util.py | sac_dev/util/net_util.py | import tensorflow as tf
def build_fc_net(input_tfs, layers,
activation=tf.nn.relu,
weight_init=tf.contrib.layers.xavier_initializer(),
reuse=False):
curr_tf = tf.concat(axis=-1, values=input_tfs)
for i, size in enumerate(layers):
with tf.variable_scope(str(i), reuse=reuse):
curr_tf = tf.layers.dense(inputs=curr_tf,
units=size,
kernel_initializer=weight_init,
activation=activation)
return curr_tf
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/__init__.py | sac_dev/util/__init__.py | from . import * | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/tf_util.py | sac_dev/util/tf_util.py | import tensorflow as tf
import numpy as np
def var_shape(x):
out = [k.value for k in x.get_shape()]
assert all(isinstance(a, int) for a in out), "shape function assumes that shape is fully known"
return out
def intprod(x):
return int(np.prod(x))
def numel(x):
n = intprod(var_shape(x))
return n
class SetFromFlat(object):
def __init__(self, sess, var_list):
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self._sess = sess
self.theta_ph = tf.placeholder(var_list[0].dtype,[total_size])
start=0
assigns = []
for (shape,v) in zip(shapes,var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(self.theta_ph[start:start+size],shape)))
start+=size
self.op = tf.group(*assigns)
def __call__(self, theta):
self._sess.run(self.op, feed_dict={self.theta_ph:theta})
class GetFlat(object):
def __init__(self, sess, var_list):
self._sess = sess
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return self._sess.run(self.op) | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/replay_buffer.py | sac_dev/util/replay_buffer.py | import numpy as np
import copy
import sac_dev.util.logger as logger
import sac_dev.util.rl_path as rl_path
INVALID_IDX = -1
class ReplayBuffer(object):
TERMINATE_KEY = "terminate"
PATH_START_KEY = "path_start"
PATH_END_KEY = "path_end"
def __init__(self, buffer_size):
assert buffer_size > 0
self._buffer_size = buffer_size
self._total_count = 0
self._buffer_head = 0
self._buffer_tail = INVALID_IDX
self._num_paths = 0
self._buffers = None
self.clear()
return
def sample(self, n, filter_end=True):
curr_size = self.get_current_size()
assert curr_size > 0
if (filter_end):
idx = np.empty(n, dtype=int)
# makes sure that the end states are not sampled
for i in range(n):
while True:
curr_idx = np.random.randint(0, curr_size, size=1)[0]
curr_idx += self._buffer_tail
curr_idx = np.mod(curr_idx, self._buffer_size)
if not self.is_path_end(curr_idx):
break
idx[i] = curr_idx
else:
idx = np.random.randint(0, curr_size, size=n)
idx += self._buffer_tail
idx = np.mod(idx, self._buffer_size)
return idx
def get(self, key, idx):
return self._buffers[key][idx]
def get_all(self, key):
return self._buffers[key]
def get_unrolled_indices(self):
indices = None
if self._buffer_tail == INVALID_IDX:
indices = []
elif self._buffer_tail < self._buffer_head:
indices = list(range(self._buffer_tail, self._buffer_head))
else:
indices = list(range(self._buffer_tail, self._buffer_size))
indices += list(range(0, self._buffer_head))
return indices
def get_path_start(self, idx):
return self._buffers[self.PATH_START_KEY][idx]
def get_path_end(self, idx):
return self._buffers[self.PATH_END_KEY][idx]
def get_subpath_indices(self, idx):
assert(isinstance(idx, int))
start_idx = idx
end_idx = self.get_path_end(idx)
if (start_idx <= end_idx):
path_indices = list(range(start_idx, end_idx + 1))
else:
path_indices = list(range(start_idx, self._buffer_size))
path_indices += list(range(0, end_idx + 1))
return path_indices
def get_pathlen(self, idx):
is_array = isinstance(idx, np.ndarray) or isinstance(idx, list)
if not is_array:
idx = [idx]
n = len(idx)
start_idx = self.get_path_start(idx)
end_idx = self.get_path_end(idx)
pathlen = np.empty(n, dtype=int)
for i in range(n):
curr_start = start_idx[i]
curr_end = end_idx[i]
if curr_start < curr_end:
curr_len = curr_end - curr_start
else:
curr_len = self._buffer_size - curr_start + curr_end
pathlen[i] = curr_len
if not is_array:
pathlen = pathlen[0]
return pathlen
def get_subpath_indices(self, start_idx):
end_idx = self.get_path_end(start_idx)
if start_idx <= end_idx:
path_indices = list(range(start_idx, end_idx + 1))
else:
path_indices = list(range(start_idx, self._buffer_size))
path_indices += list(range(0, end_idx + 1))
return path_indices
def is_valid_path(self, idx):
start_idx = self.get_path_start(idx)
valid = start_idx != INVALID_IDX
return valid
def store(self, path):
start_idx = INVALID_IDX
n = path.pathlength()
if (n > 0):
assert path.is_valid()
if path.check_vals():
if self._buffers is None:
self._init_buffers(path)
idx = self._request_idx(n + 1)
self._store_path(path, idx)
self._num_paths += 1
self._total_count += n + 1
start_idx = idx[0]
else:
logger.Logger.print("Invalid path data value detected")
return start_idx
def clear(self):
self._buffer_head = 0
self._buffer_tail = INVALID_IDX
self._num_paths = 0
return
def get_total_count(self):
return self._total_count
def get_prev_idx(self, idx):
prev_idx = idx - 1
prev_idx[prev_idx < 0] += self._buffer_size
is_start = self.is_path_start(idx)
prev_idx[is_start] = idx[is_start]
return prev_idx
def get_next_idx(self, idx):
next_idx = np.mod(idx + 1, self._buffer_size)
is_end = self.is_path_end(idx)
next_idx[is_end] = idx[is_end]
return next_idx
def is_terminal_state(self, idx):
terminate_flags = self._buffers[self.TERMINATE_KEY][idx]
terminate = terminate_flags != rl_path.Terminate.Null.value
is_end = self.is_path_end(idx)
terminal_state = np.logical_and(terminate, is_end)
return terminal_state
def check_terminal_flag(self, idx, flag):
terminate_flags = self._buffers[self.TERMINATE_KEY][idx]
terminate = (terminate_flags == flag.value)
return terminate
def is_path_start(self, idx):
is_end = self._buffers[self.PATH_START_KEY][idx] == idx
return is_end
def is_path_end(self, idx):
is_end = self._buffers[self.PATH_END_KEY][idx] == idx
return is_end
def get_current_size(self):
if self._buffer_tail == INVALID_IDX:
return 0
elif self._buffer_tail < self._buffer_head:
return self._buffer_head - self._buffer_tail
else:
return self._buffer_size - self._buffer_tail + self._buffer_head
def get_valid_idx(self):
valid_idx = np.argwhere(self._buffers[self.PATH_START_KEY] != INVALID_IDX)
is_end = self.is_path_end(valid_idx)
valid_idx = valid_idx[np.logical_not(is_end)]
return valid_idx
def _init_buffers(self, path):
self._buffers = dict()
self._buffers[self.PATH_START_KEY] = INVALID_IDX * np.ones(self._buffer_size, dtype=int);
self._buffers[self.PATH_END_KEY] = INVALID_IDX * np.ones(self._buffer_size, dtype=int);
self._buffers[self.TERMINATE_KEY] = np.zeros(shape=[self._buffer_size], dtype=int)
for key, val in vars(path).items():
if type(val) is list:
val_type = type(val[0])
is_array = val_type == np.ndarray
if is_array:
shape = [self._buffer_size, val[0].shape[0]]
dtype = val[0].dtype
else:
shape = [self._buffer_size]
dtype = val_type
self._buffers[key] = np.zeros(shape, dtype=dtype)
return
def _request_idx(self, n):
assert n + 1 < self._buffer_size # bad things can happen if path is too long
remainder = n
idx = []
start_idx = self._buffer_head
while remainder > 0:
end_idx = np.minimum(start_idx + remainder, self._buffer_size)
remainder -= (end_idx - start_idx)
free_idx = list(range(start_idx, end_idx))
self._free_idx(free_idx)
idx += free_idx
start_idx = 0
self._buffer_head = (self._buffer_head + n) % self._buffer_size
return idx
def _free_idx(self, idx):
assert(idx[0] <= idx[-1])
n = len(idx)
if self._buffer_tail != INVALID_IDX:
update_tail = idx[0] <= idx[-1] and idx[0] <= self._buffer_tail and idx[-1] >= self._buffer_tail
update_tail |= idx[0] > idx[-1] and (idx[0] <= self._buffer_tail or idx[-1] >= self._buffer_tail)
if update_tail:
i = 0
while i < n:
curr_idx = idx[i]
if self.is_valid_path(curr_idx):
start_idx = self.get_path_start(curr_idx)
end_idx = self.get_path_end(curr_idx)
pathlen = self.get_pathlen(curr_idx)
if start_idx < end_idx:
self._buffers[self.PATH_START_KEY][start_idx:end_idx + 1] = INVALID_IDX
else:
self._buffers[self.PATH_START_KEY][start_idx:self._buffer_size] = INVALID_IDX
self._buffers[self.PATH_START_KEY][0:end_idx + 1] = INVALID_IDX
self._num_paths -= 1
i += pathlen + 1
self._buffer_tail = (end_idx + 1) % self._buffer_size;
else:
i += 1
else:
self._buffer_tail = idx[0]
return
def _store_path(self, path, idx):
n = path.pathlength()
for key, data in self._buffers.items():
if key != self.PATH_START_KEY and key != self.PATH_END_KEY and key != self.TERMINATE_KEY:
val = getattr(path, key)
val_len = len(val)
assert val_len == n or val_len == n + 1
data[idx[:val_len]] = val
self._buffers[self.TERMINATE_KEY][idx[:-1]] = rl_path.Terminate.Null.value
self._buffers[self.TERMINATE_KEY][idx[-1]] = path.terminate.value
self._buffers[self.PATH_START_KEY][idx] = idx[0]
self._buffers[self.PATH_END_KEY][idx] = idx[-1]
return | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/util/normalizer.py | sac_dev/util/normalizer.py | import copy
import numpy as np
import tensorflow as tf
from sac_dev.util.logger import Logger
import sac_dev.util.mpi_util as mpi_util
class Normalizer(object):
CHECK_SYNC_COUNT = 50000 # check synchronization after a certain number of entries
def __init__(self,
sess,
scope,
size,
init_mean=None,
init_std=None,
eps=0.01,
clip=np.inf):
self._sess = sess
self._scope = scope
self._eps = eps
self._clip = clip
self._mean = np.zeros(size)
self._std = np.ones(size)
self._count = 0
if init_mean is not None:
if not isinstance(init_mean, np.ndarray):
assert(size == 1)
init_mean = np.array([init_mean])
assert init_mean.size == size, \
Logger.print('Normalizer init mean shape mismatch, expecting size {:d}, but got {:d}'.format(size, init_mean.size))
self._mean = init_mean
if init_std is not None:
if not isinstance(init_std, np.ndarray):
assert(size == 1)
init_std = np.array([init_std])
assert init_std.size == size, \
Logger.print('Normalizer init std shape mismatch, expecting size {:d}, but got {:d}'.format(size, init_std.size))
self._std = init_std
self._mean_sq = self.calc_mean_sq(self._mean, self._std)
self._new_count = 0
self._new_sum = np.zeros_like(self._mean)
self._new_sum_sq = np.zeros_like(self._mean_sq)
with tf.variable_scope(self._scope):
self._build_resource_tf()
return
def record(self, x):
size = self.get_size()
is_array = isinstance(x, np.ndarray)
if not is_array:
assert(size == 1)
x = np.array([[x]])
assert x.shape[-1] == size, \
Logger.print('Normalizer shape mismatch, expecting size {:d}, but got {:d}'.format(size, x.shape[-1]))
x = np.reshape(x, [-1, size])
self._new_count += x.shape[0]
self._new_sum += np.sum(x, axis=0)
self._new_sum_sq += np.sum(np.square(x), axis=0)
return
def update(self):
new_count = mpi_util.reduce_sum(self._new_count)
new_sum = mpi_util.reduce_sum(self._new_sum)
new_sum_sq = mpi_util.reduce_sum(self._new_sum_sq)
if (new_count > 0):
new_total = self._count + new_count
if (self._count // self.CHECK_SYNC_COUNT != new_total // self.CHECK_SYNC_COUNT):
assert self._check_synced(), Logger.print("Normalizer parameters desynchronized")
new_mean = new_sum / new_count
new_mean_sq = new_sum_sq / new_count
w_old = float(self._count) / new_total
w_new = float(new_count) / new_total
self._mean = w_old * self._mean + w_new * new_mean
self._mean_sq = w_old * self._mean_sq + w_new * new_mean_sq
self._count = new_total
self._std = self.calc_std(self._mean, self._mean_sq)
self._new_count = 0
self._new_sum.fill(0)
self._new_sum_sq.fill(0)
self._update_resource_tf()
return
def get_size(self):
return self._mean.size
def set_mean_std(self, mean, std):
size = self.get_size()
is_array = isinstance(mean, np.ndarray) and isinstance(std, np.ndarray)
if not is_array:
assert(size == 1)
mean = np.array([mean])
std = np.array([std])
assert len(mean) == size and len(std) == size, \
Logger.print('Normalizer shape mismatch, expecting size {:d}, but got {:d} and {:d}'.format(size, len(mean), len(std)))
self._mean = mean
self._std = std
self._mean_sq = self.calc_mean_sq(self._mean, self._std)
self._update_resource_tf()
return
def normalize(self, x):
norm_x = (x - self._mean) / self._std
norm_x = np.clip(norm_x, -self._clip, self._clip)
return norm_x
def unnormalize(self, norm_x):
x = norm_x * self._std + self._mean
return x
def calc_std(self, mean, mean_sq):
var = mean_sq - np.square(mean)
# some time floating point errors can lead to small negative numbers
var = np.maximum(var, 0)
std = np.sqrt(var)
std = np.maximum(std, self._eps)
return std
def calc_mean_sq(self, mean, std):
return np.square(std) + np.square(self._mean)
def load(self):
count, mean, std = self._sess.run([self._count_tf, self._mean_tf, self._std_tf])
self._count = count[0]
self._mean = mean
self._std = std
self._mean_sq = self.calc_mean_sq(self._mean, self._std)
return
def normalize_tf(self, x):
norm_x = (x - self._mean_tf) / self._std_tf
norm_x = tf.clip_by_value(norm_x, -self._clip, self._clip)
return norm_x
def unnormalize_tf(self, norm_x):
x = norm_x * self._std_tf + self._mean_tf
return x
def need_update(self):
return self._new_count > 0
def _build_resource_tf(self):
self._count_tf = tf.get_variable(dtype=tf.int32, name="count", initializer=np.array([self._count], dtype=np.int32), trainable=False)
self._mean_tf = tf.get_variable(dtype=tf.float32, name="mean", initializer=self._mean.astype(np.float32), trainable=False)
self._std_tf = tf.get_variable(dtype=tf.float32, name="std", initializer=self._std.astype(np.float32), trainable=False)
self._count_ph = tf.get_variable(dtype=tf.int32, name="count_ph", shape=[1])
self._mean_ph = tf.get_variable(dtype=tf.float32, name="mean_ph", shape=self._mean.shape)
self._std_ph = tf.get_variable(dtype=tf.float32, name="std_ph", shape=self._std.shape)
self._update_op = tf.group(
self._count_tf.assign(self._count_ph),
self._mean_tf.assign(self._mean_ph),
self._std_tf.assign(self._std_ph)
)
return
def _update_resource_tf(self):
feed = {
self._count_ph: np.array([self._count], dtype=np.int32),
self._mean_ph: self._mean,
self._std_ph: self._std
}
self._sess.run(self._update_op, feed_dict=feed)
return
def _check_synced(self):
synced = True
if (mpi_util.is_root_proc()):
vars = np.concatenate([self._mean, self._std])
mpi_util.bcast(vars)
else:
vars_local = np.concatenate([self._mean, self._std])
vars_root = np.empty_like(vars_local)
mpi_util.bcast(vars_root)
synced = (vars_local == vars_root).all()
return synced | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/rl_agent.py | sac_dev/learning/rl_agent.py | import abc
import collections
import copy
import gym
import numpy as np
import os
import tensorflow as tf
import time
import sac_dev.util.logger as logger
import sac_dev.util.mpi_util as mpi_util
import sac_dev.util.normalizer as normalizer
import sac_dev.util.replay_buffer as replay_buffer
import sac_dev.util.rl_path as rl_path
import pybullet as p
import moviepy.editor as mpy
import pickle
'''
Reinforcement Learning Agent
'''
class RLAgent(abc.ABC):
MAIN_SCOPE = "main"
TARGET_SCOPE = "target"
ACTOR_SCOPE = "actor"
CRITIC_SCOPE = "critic"
SOLVER_SCOPE = "solver"
RESOURCE_SCOPE = "resource"
def __init__(self,
env,
sess,
discount=0.99,
samples_per_iter=2048,
replay_buffer_size=50000,
normalizer_samples=100000,
enable_val_norm=False,
visualize=False):
self._env = env
self._sess = sess
self._discount = discount
self._samples_per_iter = samples_per_iter
self._normalizer_samples = normalizer_samples
self._enable_val_norm = enable_val_norm
num_procs = mpi_util.get_num_procs()
local_replay_buffer_size = int(np.ceil(replay_buffer_size / num_procs))
self._replay_buffer = self._build_replay_buffer(local_replay_buffer_size)
self.visualize = visualize
self._logger = None
self._tf_writer = None
with self._sess.as_default(), self._sess.graph.as_default():
with tf.variable_scope(self.RESOURCE_SCOPE):
self._build_normalizers()
self._build_nets()
with tf.variable_scope(self.SOLVER_SCOPE):
self._build_losses()
self._build_solvers()
self._init_vars()
self._build_saver()
return
def get_state_size(self):
state_size = np.prod(self._env.observation_space.shape)
return state_size
def get_action_size(self):
action_size = 0
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
action_size = np.prod(action_space.shape)
elif (isinstance(action_space, gym.spaces.Discrete)):
action_size = 1
else:
assert False, "Unsupported action space: " + str(self._env.action_space)
return action_size
def get_action_space(self):
return self._env.action_space
def get_total_samples(self):
total_samples = self._replay_buffer.get_total_count()
total_samples = int(mpi_util.reduce_sum(total_samples))
return total_samples
def eval(self, num_episodes):
num_procs = mpi_util.get_num_procs()
local_num_episodes = int(np.ceil(num_episodes / num_procs))
test_return, test_path_count = self._rollout_test(num_episodes, print_info=True)
test_return = mpi_util.reduce_mean(test_return)
test_path_count = mpi_util.reduce_sum(test_path_count)
logger.Logger.print("Test_Return: {:.3f}".format(test_return))
logger.Logger.print("Test_Paths: {:.3f}".format(test_path_count))
return
def _log(self, info_dict, iter):
for label, value in info_dict.items():
self._logger.log_tabular(os.path.basename(label), value)
if self._tf_writer is not None:
value_list = []
for label, value in info_dict.items():
# Catch-all category so tensorboard will show a grid rather than
# a single column of graphs.
if "/" not in label:
label = "Metrics/" + label
value_list.append(tf.Summary.Value(tag=label, simple_value=value))
self._tf_writer.add_summary(tf.Summary(value=value_list), iter)
def train(self, max_samples, test_episodes, output_dir, output_iters, variant=None):
log_file = os.path.join(output_dir, "log.txt")
self._logger = logger.Logger()
self._logger.configure_output_file(log_file, variant=variant)
video_dir = os.path.join(output_dir, "videos")
if (mpi_util.is_root_proc()):
os.makedirs(video_dir, exist_ok=True)
model_dir = os.path.join(output_dir, "train")
os.makedirs(model_dir, exist_ok=True)
self._tf_writer = tf.summary.FileWriter(
os.path.join(output_dir, "tensorboard"), graph=self._sess.graph)
iter = 0
total_train_path_count = 0
test_return = 0
total_test_path_count = 0
start_time = time.time()
self._init_train()
num_procs = mpi_util.get_num_procs()
local_samples_per_iter = int(np.ceil(self._samples_per_iter / num_procs))
local_test_episodes = int(np.ceil(test_episodes / num_procs))
total_samples = 0
print("Training")
while (total_samples < max_samples):
update_normalizer = self._enable_normalizer_update(total_samples)
train_return, train_path_count, new_sample_count, metrics = self._rollout_train(local_samples_per_iter, update_normalizer)
train_return = mpi_util.reduce_mean(train_return)
train_path_count = mpi_util.reduce_sum(train_path_count)
new_sample_count = mpi_util.reduce_sum(new_sample_count)
total_train_path_count += train_path_count
total_samples = self.get_total_samples()
wall_time = time.time() - start_time
wall_time /= 60 * 60 # store time in hours
log_dict = {
"Iteration": iter,
"Wall_Time": wall_time,
"Samples": total_samples,
"Train_Return": train_return,
"Train_Paths": total_train_path_count,
"Test_Return": test_return,
"Test_Paths": total_test_path_count}
for metric_name in sorted(metrics.keys()):
value = metrics[metric_name]
if metric_name == "max_torque":
log_dict["Max_Torque"] = mpi_util.reduce_max(value)
continue
log_dict[metric_name] = mpi_util.reduce_mean(value)
self._log(log_dict, iter)
if (self._need_normalizer_update() and iter == 0):
self._update_normalizers()
self._update(iter, new_sample_count)
if (self._need_normalizer_update()):
self._update_normalizers()
if (iter % output_iters == 0):
test_return, test_path_count = self._rollout_test(local_test_episodes, print_info=False)
test_return = mpi_util.reduce_mean(test_return)
total_test_path_count += mpi_util.reduce_sum(test_path_count)
self._log({
"Test_Return": test_return,
"Test_Paths": total_test_path_count,
}, iter)
if (mpi_util.is_root_proc()):
model_file = os.path.join(model_dir, "model-{:06d}.ckpt".format(iter))
self.save_model(model_file)
self.save_video(os.path.join(video_dir, "iter-" + str(iter) + ".gif"))
buffer_file = os.path.join(model_dir, "buffer.pkl")
file = open(buffer_file, "wb")
pickle.dump(self._replay_buffer, file)
file.close()
self._logger.print_tabular()
self._logger.dump_tabular()
else:
self._logger.print_tabular()
iter += 1
self._tf_writer.close()
self._tf_writer = None
return
def save_model(self, out_path):
try:
save_path = self._saver.save(self._sess, out_path, write_meta_graph=False, write_state=False)
logger.Logger.print("Model saved to: " + save_path)
except:
logger.Logger.print("Failed to save model to: " + out_path)
return
def save_video(self, out_path):
try:
_, video_frames, _ = self._rollout_path(test=True, return_video=True)
video_frames.extend([np.zeros_like(video_frames[0]) for _ in range(15)])
clip = mpy.ImageSequenceClip(video_frames, fps=(1/(.033)))
clip.write_gif(out_path)
logger.Logger.print("Video saved to: " + out_path)
except:
logger.Logger.print("Failed to save video to: " + out_path)
return
def load_model(self, in_path):
self._saver.restore(self._sess, in_path)
# load in pickled buffer
try:
self._replay_buffer = pickle.load(open(in_path[:-17]+"buffer.pkl", "rb"))
except:
logger.Logger.print("NO REPLAY BUFFER FOUND")
self._load_normalizers()
self._sync_tar_vars()
logger.Logger.print("Model loaded from: " + in_path)
return
def get_state_bound_min(self):
return self._env.observation_space.low
def get_state_bound_max(self):
return self._env.observation_space.high
def get_action_bound_min(self):
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
bound_min = self._env.action_space.low
else:
bound_min = -np.inf * np.ones(1)
return bound_min
def get_action_bound_max(self):
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
bound_max = self._env.action_space.high
else:
bound_max = np.inf * np.ones(1)
return bound_max
def render_env(self):
self._env.render()
return
def _build_normalizers(self):
self._s_norm = self._build_normalizer_state()
self._a_norm = self._build_normalizer_action()
self._val_norm = self._build_normalizer_val()
return
def _need_normalizer_update(self):
return self._s_norm.need_update()
def _build_normalizer_state(self):
size = self.get_state_size()
high = self.get_state_bound_max().copy()
low = self.get_state_bound_min().copy()
inf_mask = np.logical_or((high >= np.finfo(np.float32).max), (low <= np.finfo(np.float32).min))
high[inf_mask] = 1.0
low[inf_mask] = -1.0
mean = 0.5 * (high + low)
std = 0.5 * (high - low)
norm = normalizer.Normalizer(sess=self._sess, scope="s_norm", size=size, init_mean=mean, init_std=std)
return norm
def _build_normalizer_action(self):
size = self.get_action_size()
high = self.get_action_bound_max().copy()
low = self.get_action_bound_min().copy()
inf_mask = np.logical_or((high >= np.finfo(np.float32).max), (low <= np.finfo(np.float32).min))
assert(not any(inf_mask)), "actions must be bounded"
mean = 0.5 * (high + low)
std = 0.5 * (high - low)
norm = normalizer.Normalizer(sess=self._sess, scope="a_norm", size=size, init_mean=mean, init_std=std)
return norm
def _build_normalizer_val(self):
mean = 0.0
if (self._enable_val_norm):
std = 1.0 / (1.0 - self._discount)
else:
std = 1.0
norm = normalizer.Normalizer(sess=self._sess, scope="val_norm", size=1, init_mean=mean, init_std=std)
return norm
def _build_replay_buffer(self, buffer_size):
buffer = replay_buffer.ReplayBuffer(buffer_size=buffer_size)
return buffer
@abc.abstractmethod
def sample_action(self, s, test):
pass
@abc.abstractmethod
def _build_nets(self):
pass
@abc.abstractmethod
def _build_losses(self):
pass
@abc.abstractmethod
def _build_solvers(self):
pass
@abc.abstractmethod
def _update(self, iter, new_sample_count):
pass
def _init_vars(self):
self._sess.run(tf.global_variables_initializer())
return
def _build_saver(self):
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
vars = [v for v in vars if self.SOLVER_SCOPE + '/' not in v.name]
assert len(vars) > 0
self._saver = tf.train.Saver(vars, max_to_keep=0)
return
def _init_train(self):
self._replay_buffer.clear()
return
def _rollout_train(self, num_samples, update_normalizer):
new_sample_count = 0
total_return = 0
path_count = 0
all_metrics = collections.defaultdict(list)
while (new_sample_count < num_samples):
path, _, metrics = self._rollout_path(test=False)
path_id = self._replay_buffer.store(path)
valid_path = path_id != replay_buffer.INVALID_IDX
if not valid_path:
assert False, "Invalid path detected"
path_return = path.calc_return()
if (update_normalizer):
self._record_normalizers(path)
for metric_name in metrics:
all_metrics[metric_name].append(metrics[metric_name][0])
all_metrics["max_torque"].append(path.calc_max_torque())
new_sample_count += path.pathlength()
total_return += path_return
path_count += 1
avg_return = total_return / path_count
metrics["max_torque"] = (None, np.max)
aggregate_metrics = {}
for metric_name, val_list in all_metrics.items():
aggregate_fn = metrics[metric_name][1]
aggregate_metrics[metric_name] = aggregate_fn(val_list)
return avg_return, path_count, new_sample_count, aggregate_metrics
def _rollout_test(self, num_episodes, print_info=False):
total_return = 0
for e in range(num_episodes):
path, _, _ = self._rollout_path(test=True)
path_return = path.calc_return()
total_return += path_return
if (print_info):
logger.Logger.print("Episode: {:d}".format(e))
logger.Logger.print("Curr_Return: {:.3f}".format(path_return))
logger.Logger.print("Avg_Return: {:.3f}\n".format(total_return / (e + 1)))
avg_return = total_return / num_episodes
return avg_return, num_episodes
def _rollout_path(self, test, return_video=False):
path = rl_path.RLPath()
s = self._env.reset()
s = np.array(s)
path.states.append(s)
video_frames = []
done = False
while not done:
a, logp = self.sample_action(s, test)
s, r, done, info = self._step_env(a)
s = np.array(s)
path.states.append(s)
path.actions.append(a)
path.rewards.append(r)
path.max_torques.append(info['max_torque'])
path.logps.append(logp)
if (self.visualize):
self.render_env()
if return_video:
video_frames.append(self._env.render(mode="rgb_array"))
path.terminate = self._check_env_termination()
return path, video_frames, info.get("metrics", {})
def _step_env(self, a):
if (isinstance(self._env.action_space, gym.spaces.Discrete)):
a = int(a[0])
output = self._env.step(a)
return output
def _check_env_termination(self):
if (self._env._env_step_counter >= self._env._max_episode_steps):
term = rl_path.Terminate.Null
else:
term = rl_path.Terminate.Fail
return term
def _record_normalizers(self, path):
states = np.array(path.states)
self._s_norm.record(states)
return
def _update_normalizers(self):
self._s_norm.update()
return
def _load_normalizers(self):
self._s_norm.load()
self._a_norm.load()
self._val_norm.load()
return
def _build_action_pd(self, input_tf, init_output_scale, mean_activation=None, reuse=False):
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
output_size = self.get_action_size()
mean_kernel_init = tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale)
mean_bias_init = tf.zeros_initializer()
logstd_kernel_init = tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale)
logstd_bias_init = np.log(self._action_std) * np.ones(output_size)
logstd_bias_init = logstd_bias_init.astype(np.float32)
with tf.variable_scope("mean", reuse=reuse):
mean_tf = tf.layers.dense(inputs=input_tf, units=output_size,
kernel_initializer=mean_kernel_init,
bias_initializer=mean_bias_init,
activation=None)
if (mean_activation is not None):
mean_tf = mean_activation(mean_tf)
with tf.variable_scope("logstd", reuse=reuse):
logstd_tf = tf.get_variable(dtype=tf.float32, name="bias", initializer=logstd_bias_init,
trainable=False)
logstd_tf = tf.broadcast_to(logstd_tf, tf.shape(mean_tf))
std_tf = tf.exp(logstd_tf)
a_pd_tf = tf.contrib.distributions.MultivariateNormalDiag(loc=mean_tf, scale_diag=std_tf)
elif (isinstance(action_space, gym.spaces.Discrete)):
output_size = self._env.action_space.n
kernel_init = tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale)
bias_init = tf.zeros_initializer()
with tf.variable_scope("logits", reuse=reuse):
logits_tf = tf.layers.dense(inputs=input_tf, units=output_size,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
activation=None)
a_pd_tf = tf.contrib.distributions.Categorical(logits=logits_tf)
else:
assert False, "Unsupported action space: " + str(self._env.action_space)
return a_pd_tf
def _tf_vars(self, scope=""):
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
assert len(vars) > 0
return vars
def _enable_normalizer_update(self, total_samples):
enable_update = total_samples < self._normalizer_samples
return enable_update
def _action_l2_loss(self, a_pd_tf):
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
val = a_pd_tf.mean()
elif (isinstance(action_space, gym.spaces.Discrete)):
val = a_pd_tf.logits
else:
assert False, "Unsupported action space: " + str(self._env.action_space)
loss = tf.reduce_sum(tf.square(val), axis=-1)
loss = 0.5 * tf.reduce_mean(loss)
return loss
def _action_bound_loss(self, a_pd_tf):
action_space = self.get_action_space()
if (isinstance(action_space, gym.spaces.Box)):
axis = -1
a_bound_min = self.get_action_bound_min()
a_bound_max = self.get_action_bound_max()
assert(np.all(np.isfinite(a_bound_min)) and np.all(np.isfinite(a_bound_max))), "Actions must be bounded."
norm_a_bound_min = self._a_norm.normalize(a_bound_min)
norm_a_bound_max = self._a_norm.normalize(a_bound_max)
val = a_pd_tf.mean()
violation_min = tf.minimum(val - norm_a_bound_min, 0)
violation_max = tf.maximum(val - norm_a_bound_max, 0)
violation = tf.reduce_sum(tf.square(violation_min), axis=axis) \
+ tf.reduce_sum(tf.square(violation_max), axis=axis)
a_bound_loss = 0.5 * tf.reduce_mean(violation)
else:
a_bound_loss = tf.zeros(shape=[])
return a_bound_loss
def _action_entropy(self, a_pd_tf):
loss = a_pd_tf.entropy()
loss = tf.reduce_mean(loss)
return loss
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/sac_agent.py | sac_dev/learning/sac_agent.py | import gym
import numpy as np
import tensorflow as tf
import time
import sac_dev.learning.nets.net_builder as net_builder
import sac_dev.learning.rl_agent as rl_agent
import sac_dev.learning.mpi_solver as mpi_solver
import sac_dev.util.rl_path as rl_path
import sac_dev.util.mpi_util as mpi_util
import sac_dev.util.net_util as net_util
import tqdm
'''
Soft Actor-Critic Agent
'''
class SACAgent(rl_agent.RLAgent):
ADV_EPS = 1e-5
def __init__(self,
env,
sess,
actor_net="fc_2layers_256units",
critic_net="fc_2layers_256units",
use_MPI_solver=False,
parallel_ensemble=False,
profile=False,
actor_stepsize=0.0003,
actor_init_output_scale=0.01,
actor_batch_size=256,
actor_steps=256,
action_std=0.2,
action_l2_weight=0.0,
action_entropy_weight=0.0,
num_critic_nets=2,
critic_stepsize=0.0003,
critic_batch_size=256,
critic_steps=256,
num_ensemble_subset=2,
discount=0.99,
samples_per_iter=512,
replay_buffer_size=50000,
normalizer_samples=300000,
enable_val_norm=False,
num_action_samples=1,
tar_stepsize=0.01,
steps_per_tar_update=1,
init_samples=25000,
visualize=False):
self._actor_net = actor_net
self._critic_net = critic_net
self._use_MPI_solver = use_MPI_solver
self._parallel_ensemble = parallel_ensemble
self._profile = profile
self._actor_stepsize = actor_stepsize
self._actor_init_output_scale = actor_init_output_scale
self._actor_batch_size = actor_batch_size
self._actor_steps = actor_steps
self._action_std = action_std
self._action_l2_weight = action_l2_weight
self._action_entropy_weight = action_entropy_weight
self._num_critic_nets = num_critic_nets
self._critic_stepsize = critic_stepsize
self._critic_batch_size = critic_batch_size
self._critic_steps = critic_steps
self._num_ensemble_subset = num_ensemble_subset
self._num_action_samples = num_action_samples
self._tar_stepsize = tar_stepsize
self._steps_per_tar_update = steps_per_tar_update
self._init_samples = init_samples
self._actor_bound_loss_weight = 10.0
super().__init__(env=env,
sess=sess,
discount=discount,
samples_per_iter=samples_per_iter,
replay_buffer_size=replay_buffer_size,
normalizer_samples=normalizer_samples,
enable_val_norm=enable_val_norm,
visualize=visualize)
return
def sample_action(self, s, test):
n = len(s.shape)
s = np.reshape(s, [-1, self.get_state_size()])
feed = {
self._s_ph : s
}
if (test):
run_tfs = [self._mode_a_tf, self._mode_a_logp_tf]
else:
run_tfs = [self._sample_a_tf, self._sample_a_logp_tf]
a, logp = self._sess.run(run_tfs, feed_dict=feed)
if n == 1:
a = a[0]
logp = logp[0]
return a, logp
def get_critic_steps(self):
if self._use_MPI_solver:
return self._critic_solver.get_iters()
else:
return self._critic_updates
def get_actor_steps(self):
if self._use_MPI_solver:
return self._actor_solver.get_iters()
else:
return self._actor_updates
def _build_nets(self):
s_size = self.get_state_size()
a_size = self.get_action_size()
action_space = self.get_action_space()
self._build_dataset(s_size, a_size)
self._s_ph = tf.placeholder(tf.float32, shape=[None, s_size], name="s")
self._a_ph = tf.placeholder(tf.float32, shape=[None, a_size], name="a")
self._tar_val_ph = tf.placeholder(tf.float32, shape=[None], name="tar_val")
self._r_ph = tf.placeholder(tf.float32, shape=[None], name="r")
self._next_s_ph = tf.placeholder(tf.float32, shape=[None, s_size], name="next_s")
self._terminate_ph = tf.placeholder(tf.int32, shape=[None], name="terminate")
norm_s_tf = self._s_norm.normalize_tf(self._s_ph)
norm_a_tf = self._a_norm.normalize_tf(self._a_ph)
norm_next_s_tf = self._s_norm.normalize_tf(self._next_s_ph)
with tf.variable_scope(self.MAIN_SCOPE):
self._norm_a_pd_tf = self._build_net_actor(net_name=self._actor_net, input_tfs=[norm_s_tf])
self._sample_norm_a_tfs = [self._norm_a_pd_tf.sample() for _ in range(self._num_action_samples)]
self._sample_norm_a_tfs = tf.stack(self._sample_norm_a_tfs, axis=-2)
self._norm_critic_tf, self._norm_critic_tfs = self._build_net_critic(net_name=self._critic_net, input_tfs=[norm_s_tf, norm_a_tf],
num_nets=self._num_critic_nets)
self._critic_tf = self._val_norm.unnormalize_tf(self._norm_critic_tf)
critic_sample_input_tfs = [tf.stack([norm_s_tf] * self._num_action_samples, axis=-2), self._sample_norm_a_tfs]
self._sample_norm_critic_tf, _ = self._build_net_critic(net_name=self._critic_net, input_tfs=critic_sample_input_tfs,
num_nets=self._num_critic_nets, reuse=True)
with tf.variable_scope(self.TARGET_SCOPE):
self._next_norm_a_pd_tf = self._build_net_actor(net_name=self._actor_net, input_tfs=[norm_next_s_tf])
self._sample_next_norm_a_tfs = [self._next_norm_a_pd_tf.sample() for _ in range(self._num_action_samples)]
self._sample_next_norm_a_tfs = tf.stack(self._sample_next_norm_a_tfs, axis=-2)
critic_sample_next_input_tfs = [tf.stack([norm_next_s_tf] * self._num_action_samples, axis=-2), self._sample_next_norm_a_tfs]
self._sample_next_norm_critic_tar_tf, _ = self._build_net_critic(net_name=self._critic_net, input_tfs=critic_sample_next_input_tfs,
num_nets=self._num_critic_nets)
sample_norm_a_tf = self._norm_a_pd_tf.sample()
self._sample_a_tf = self._a_norm.unnormalize_tf(sample_norm_a_tf)
self._sample_a_logp_tf = self._norm_a_pd_tf.log_prob(sample_norm_a_tf)
mode_norm_a_tf = self._norm_a_pd_tf.mode()
self._mode_a_tf = self._a_norm.unnormalize_tf(mode_norm_a_tf)
self._mode_a_logp_tf = self._norm_a_pd_tf.log_prob(mode_norm_a_tf)
main_critic_vars = self._tf_vars(self.MAIN_SCOPE + "/" + self.CRITIC_SCOPE)
main_actor_vars = self._tf_vars(self.MAIN_SCOPE + "/" + self.ACTOR_SCOPE)
tar_critic_vars = self._tf_vars(self.TARGET_SCOPE + "/" + self.CRITIC_SCOPE)
tar_actor_vars = self._tf_vars(self.TARGET_SCOPE + "/" + self.ACTOR_SCOPE)
assert len(main_critic_vars) == len(tar_critic_vars)
assert len(main_actor_vars) == len(tar_actor_vars)
self._sync_tar_vars_op = list(map(lambda v : v[0].assign(v[1]), zip(tar_critic_vars + tar_actor_vars, main_critic_vars + main_actor_vars)))
self._update_critic_tar_vars_op = list(map(lambda v : v[0].assign((1.0 - self._tar_stepsize) * v[0] + self._tar_stepsize * v[1]), zip(tar_critic_vars, main_critic_vars)))
self._update_actor_tar_vars_op = list(map(lambda v : v[0].assign((1.0 - self._tar_stepsize) * v[0] + self._tar_stepsize * v[1]), zip(tar_actor_vars, main_actor_vars)))
return
def _build_dataset(self, s_size, a_size):
self._all_s_ph = tf.placeholder(tf.float32, shape=[None, s_size], name="all_s")
self._all_a_ph = tf.placeholder(tf.float32, shape=[None, a_size], name="all_a")
self._all_r_ph = tf.placeholder(tf.float32, shape=[None], name="all_r")
self._all_next_s_ph = tf.placeholder(tf.float32, shape=[None, s_size], name="all_next_s")
self._all_terminate_ph = tf.placeholder(tf.int32, shape=[None], name="all_terminate")
train_dataset = tf.data.Dataset.from_tensor_slices((
self._all_s_ph, self._all_a_ph, self._all_r_ph, self._all_next_s_ph, self._all_terminate_ph))
train_dataset = train_dataset.repeat().shuffle(buffer_size=self._replay_buffer._buffer_size, reshuffle_each_iteration=True)
num_procs = mpi_util.get_num_procs()
local_batch_size = int(np.ceil(self._critic_batch_size / num_procs))
train_dataset = train_dataset.batch(local_batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
self._iterator = tf.compat.v1.data.make_initializable_iterator(train_dataset)
self._input_op = self._iterator.get_next()
def _build_losses(self):
val_fail = 0.0
norm_next_val_tf = tf.reduce_mean(self._sample_next_norm_critic_tar_tf, axis=-1)
next_val_tf = self._val_norm.unnormalize_tf(norm_next_val_tf)
next_val_tf = tf.where(tf.math.equal(self._terminate_ph, rl_path.Terminate.Fail.value), val_fail * tf.ones_like(next_val_tf), next_val_tf)
next_val_tf = tf.stop_gradient(next_val_tf)
tar_val_tf = self._r_ph + self._discount * next_val_tf
norm_tar_val_tf = self._val_norm.normalize_tf(tar_val_tf)
norm_tar_val_tf = tf.expand_dims(norm_tar_val_tf, axis=0)
norm_val_diff = norm_tar_val_tf - self._norm_critic_tfs
self._critic_loss_tf = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(norm_val_diff), axis=0))
self._actor_loss_tf = -tf.reduce_mean(self._sample_norm_critic_tf)
if (self._actor_bound_loss_weight != 0.0):
self._actor_loss_tf += self._actor_bound_loss_weight * self._action_bound_loss(self._norm_a_pd_tf)
if (self._action_l2_weight != 0):
self._actor_loss_tf += self._action_l2_weight * self._action_l2_loss(self._norm_a_pd_tf)
self._entropy_tf = self._action_entropy(self._norm_a_pd_tf)
if (self._action_entropy_weight != 0):
self._actor_loss_tf += -self._action_entropy_weight * self._entropy_tf
return
def _build_solvers(self):
critic_vars = self._tf_vars(self.MAIN_SCOPE + "/" + self.CRITIC_SCOPE)
critic_opt = tf.train.AdamOptimizer(learning_rate=self._critic_stepsize)
actor_vars = self._tf_vars(self.MAIN_SCOPE + "/" + self.ACTOR_SCOPE)
actor_opt = tf.train.AdamOptimizer(learning_rate=self._actor_stepsize)
if self._use_MPI_solver:
self._critic_grad_tf = tf.gradients(self._critic_loss_tf, critic_vars)
self._critic_solver = mpi_solver.MPISolver(self._sess, critic_opt, critic_vars)
self._actor_grad_tf = tf.gradients(self._actor_loss_tf, actor_vars)
self._actor_solver = mpi_solver.MPISolver(self._sess, actor_opt, actor_vars)
else:
self._critic_train_op = critic_opt.minimize(self._critic_loss_tf, var_list=critic_vars)
self._actor_train_op = actor_opt.minimize(self._actor_loss_tf, var_list=actor_vars)
self._critic_updates = 0
self._actor_updates = 0
return
def _build_net_actor(self, net_name, input_tfs, reuse=False):
with tf.variable_scope(self.ACTOR_SCOPE, reuse=reuse):
h = net_builder.build_net(net_name=net_name, input_tfs=input_tfs, reuse=reuse)
norm_a_pd_tf = self._build_action_pd(input_tf=h, init_output_scale=self._actor_init_output_scale,
mean_activation=tf.math.tanh, reuse=reuse)
return norm_a_pd_tf
def _build_net_critic(self, net_name, input_tfs, num_nets, reuse=False):
out_size = 1
norm_val_tfs = []
with tf.variable_scope(self.CRITIC_SCOPE, reuse=reuse):
for i in range(num_nets):
with tf.variable_scope(str(i), reuse=reuse):
h = net_builder.build_net(net_name=net_name, input_tfs=input_tfs, reuse=reuse)
curr_norm_val_tf = tf.layers.dense(inputs=h, units=out_size, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse);
curr_norm_val_tf = tf.squeeze(curr_norm_val_tf, axis=-1)
norm_val_tfs.append(curr_norm_val_tf)
norm_val_tfs = tf.stack(norm_val_tfs)
ensemble_subset = norm_val_tfs
if self._num_ensemble_subset < num_nets:
redq_idxs = tf.range(num_nets)
redq_ridxs = tf.random.shuffle(redq_idxs)[:self._num_ensemble_subset]
ensemble_subset = tf.gather(ensemble_subset, redq_ridxs)
norm_val_tf = tf.reduce_min(ensemble_subset, axis=0)
return norm_val_tf, norm_val_tfs
def _init_vars(self):
super()._init_vars()
if self._use_MPI_solver:
self._sync_solvers()
self._sync_tar_vars()
return
def _sync_solvers(self):
self._actor_solver.sync()
self._critic_solver.sync()
return
def _sync_tar_vars(self):
self._sess.run(self._sync_tar_vars_op)
return
def _update_critic_tar_vars(self):
self._sess.run(self._update_critic_tar_vars_op)
return
def _update_actor_tar_vars(self):
self._sess.run(self._update_actor_tar_vars_op)
return
def _init_train(self):
super()._init_train()
self._collect_init_samples(self._init_samples)
return
def _collect_init_samples(self, max_samples):
print("Collecting {} initial samples".format(max_samples))
sample_count = 0
next_benchmark = 1000
update_normalizer = self._enable_normalizer_update(sample_count)
start_time = time.time()
while (sample_count < max_samples):
_, _, new_sample_count, _ = self._rollout_train(1, update_normalizer)
new_sample_count = mpi_util.reduce_sum(new_sample_count)
sample_count += new_sample_count
if (self._need_normalizer_update()):
self._update_normalizers()
print("samples: {}/{}".format(sample_count, max_samples))
if sample_count >= next_benchmark:
print("Collected {} initial samples in {} sec".format(
sample_count, time.time() - start_time))
next_benchmark += 1000
return sample_count
def _update(self, iter, new_sample_count):
assert self._critic_batch_size == self._actor_batch_size
num_procs = mpi_util.get_num_procs()
local_batch_size = int(np.ceil(self._critic_batch_size / num_procs))
all_idx = self._replay_buffer.get_valid_idx()
all_next_idx = self._replay_buffer.get_next_idx(all_idx)
all_s = self._replay_buffer.get("states", all_idx)
all_a = self._replay_buffer.get("actions", all_idx)
all_r = self._replay_buffer.get("rewards", all_idx)
all_next_s = self._replay_buffer.get("states", all_next_idx)
all_terminate = self._replay_buffer.get("terminate", all_next_idx)
feed = {
self._all_s_ph: all_s,
self._all_a_ph: all_a,
self._all_r_ph: all_r,
self._all_next_s_ph: all_next_s,
self._all_terminate_ph: all_terminate
}
self._sess.run(self._iterator.initializer, feed_dict=feed)
critic_steps = int(np.ceil(self._critic_steps * new_sample_count / self._critic_batch_size))
critic_info = self._update_critic(critic_steps, self._input_op)
actor_steps = int(np.ceil(self._actor_steps * new_sample_count / self._actor_batch_size))
actor_info = self._update_actor(actor_steps, self._input_op)
critic_info = mpi_util.reduce_dict_mean(critic_info)
actor_info = mpi_util.reduce_dict_mean(actor_info)
self._log(
{
"Critic/Critic_Loss": critic_info["loss"],
"Critic/Critic_Steps": self.get_critic_steps(),
"Critic/Critic_Time_Per_Update": critic_info["update (s)"],
"Actor/Actor_Loss": actor_info["loss"],
"Actor/Actor_Entropy": actor_info["entropy"],
"Actor/Actor_Steps": self.get_actor_steps(),
}, iter)
info = {"critic_info": critic_info, "actor_info": actor_info}
return info
def _update_critic(self, steps, input_op):
num_procs = mpi_util.get_num_procs()
local_batch_size = int(np.ceil(self._critic_batch_size / num_procs))
info = None
start = time.time()
for b in tqdm.trange(steps):
s, a, r, next_s, terminate = self._sess.run(self._input_op)
curr_info = self._step_critic(s=s, a=a, r=r, next_s=next_s, terminate=terminate)
if (self.get_critic_steps() % self._steps_per_tar_update == 0):
self._update_critic_tar_vars()
if (info is None):
info = curr_info
else:
for k, v in curr_info.items():
info[k] += v
critic_update_time = time.time() - start
info["update (s)"] = critic_update_time
for k in info.keys():
info[k] /= steps
return info
def _update_actor(self, steps, input_op):
info = None
num_procs = mpi_util.get_num_procs()
local_batch_size = int(np.ceil(self._actor_batch_size / num_procs))
for b in tqdm.trange(steps):
s, _, _, _, _ = self._sess.run(self._input_op)
curr_info = self._step_actor(s=s)
if (self.get_actor_steps() % self._steps_per_tar_update == 0):
self._update_actor_tar_vars()
if (info is None):
info = curr_info
else:
for k, v in curr_info.items():
info[k] += v
for k in info.keys():
info[k] /= steps
return info
def _step_critic(self, s, a, r, next_s, terminate):
feed = {
self._s_ph: s,
self._a_ph: a,
self._r_ph: r,
self._next_s_ph: next_s,
self._terminate_ph: terminate
}
if self._use_MPI_solver:
run_tfs = [self._critic_grad_tf, self._critic_loss_tf]
else:
run_tfs = [self._critic_train_op, self._critic_loss_tf]
results = self._sess.run(run_tfs, feed)
if self._use_MPI_solver:
self._critic_solver.update(results[0])
else:
self._critic_updates += 1
info = {"loss": results[1]}
return info
def _step_actor(self, s):
feed = {
self._s_ph: s,
}
if self._use_MPI_solver:
run_tfs = [self._actor_grad_tf, self._actor_loss_tf, self._entropy_tf]
else:
run_tfs = [self._actor_train_op, self._actor_loss_tf, self._entropy_tf]
results = self._sess.run(run_tfs, feed)
if self._use_MPI_solver:
self._actor_solver.update(results[0])
else:
self._actor_updates += 1
info = {
"loss": results[1],
"entropy": results[2],
}
return info
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/__init__.py | sac_dev/learning/__init__.py | from . import * | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/mpi_solver.py | sac_dev/learning/mpi_solver.py | import tensorflow as tf
import numpy as np
import sac_dev.util.tf_util as TFUtil
import sac_dev.util.mpi_util as MPIUtil
from sac_dev.util.logger import Logger
class MPISolver():
CHECK_SYNC_ITERS = 1000
def __init__(self, sess, optimizer, vars):
self._vars = vars
self._sess = sess
self._optimizer = optimizer
self._build_grad_feed(vars)
self._update = optimizer.apply_gradients(zip(self._grad_ph_list, self._vars))
self._set_flat_vars = TFUtil.SetFromFlat(sess, self._vars)
self._get_flat_vars = TFUtil.GetFlat(sess, self._vars)
self._iters = 0
grad_dim = self._calc_grad_dim()
self._flat_grad = np.zeros(grad_dim, dtype=np.float32)
self._global_flat_grad = np.zeros(grad_dim, dtype=np.float32)
self.reset()
return
def get_stepsize(self):
stepsize = None
if (isinstance(self._optimizer, tf.train.MomentumOptimizer)):
stepsize = self._optimizer._learning_rate
elif (isinstance(self._optimizer, tf.train.AdamOptimizer)):
stepsize = self._optimizer._lr
else:
assert False, "Unsupported optimizer"
return stepsize
def update(self, grads, grad_scale=1.0):
self._flat_grad = np.concatenate([np.reshape(g, [-1]) for g in grads], axis=0)
return self.update_flatgrad(self._flat_grad, grad_scale)
def update_flatgrad(self, flat_grad, grad_scale=1.0):
if self._iters % self.CHECK_SYNC_ITERS == 0:
assert self._check_synced(), Logger.print("Network parameters desynchronized")
if grad_scale != 1.0:
flat_grad *= grad_scale
MPIUtil.reduce_sum_inplace(flat_grad,
destination=self._global_flat_grad)
self._global_flat_grad /= MPIUtil.get_num_procs()
self._load_flat_grad(self._global_flat_grad)
self._sess.run([self._update], self._grad_feed)
self._iters += 1
return
def reset(self):
self._iters = 0
return
def get_iters(self):
return self._iters
def sync(self):
vars = self._get_flat_vars()
MPIUtil.bcast(vars)
self._set_flat_vars(vars)
assert(self._check_synced()), Logger.print("Network parameters desynchronized")
return
def _is_root(self):
return MPIUtil.is_root_proc()
def _build_grad_feed(self, vars):
self._grad_ph_list = []
self._grad_buffers = []
for v in self._vars:
shape = v.get_shape()
grad = np.zeros(shape)
grad_ph = tf.placeholder(tf.float32, shape=shape)
self._grad_buffers.append(grad)
self._grad_ph_list.append(grad_ph)
self._grad_feed = dict({g_tf: g for g_tf, g in zip(self._grad_ph_list, self._grad_buffers)})
return
def _calc_grad_dim(self):
grad_dim = 0
for grad in self._grad_buffers:
grad_dim += grad.size
return grad_dim
def _load_flat_grad(self, flat_grad):
start = 0
for g in self._grad_buffers:
size = g.size
np.copyto(g, np.reshape(flat_grad[start:start + size], g.shape))
start += size
return
def _check_synced(self):
synced = True
if self._is_root():
vars = self._get_flat_vars()
MPIUtil.bcast(vars)
else:
vars_local = self._get_flat_vars()
vars_root = np.empty_like(vars_local)
MPIUtil.bcast(vars_root)
synced = (vars_local == vars_root).all()
return synced
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/nets/fc_2layers_256units.py | sac_dev/learning/nets/fc_2layers_256units.py | import tensorflow as tf
import sac_dev.util.net_util as net_util
NAME = "fc_2layers_256units"
def build_net(input_tfs, reuse=False):
layers = [256, 128]
activation = tf.nn.relu
h = net_util.build_fc_net(input_tfs=input_tfs, layers=layers, activation=activation, reuse=reuse)
return h | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/nets/net_builder.py | sac_dev/learning/nets/net_builder.py | import sac_dev.learning.nets.fc_2layers_256units as fc_2layers_256units
import sac_dev.learning.nets.fc_2layers_512units as fc_2layers_512units
def build_net(net_name, input_tfs, reuse=False):
net = None
if (net_name == fc_2layers_256units.NAME):
net = fc_2layers_256units.build_net(input_tfs, reuse)
elif (net_name == fc_2layers_512units.NAME):
net = fc_2layers_512units.build_net(input_tfs, reuse)
else:
assert False, 'Unsupported net: ' + net_name
return net | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/nets/fc_2layers_512units.py | sac_dev/learning/nets/fc_2layers_512units.py | import tensorflow as tf
import sac_dev.util.net_util as net_util
NAME = "fc_2layers_512units"
def build_net(input_tfs, reuse=False):
layers = [512, 256]
activation = tf.nn.relu
h = net_util.build_fc_net(input_tfs=input_tfs, layers=layers, activation=activation, reuse=reuse)
return h | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/sac_dev/learning/nets/__init__.py | sac_dev/learning/nets/__init__.py | from . import * | python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/phasespace/grpc_stream_client.py | phasespace/grpc_stream_client.py | """Basic python library to receive motion capture data over GRPC.
Basic usage:
client = grpc_stream_client.GrpcStreamClient(server_address_and_port)
stream = client.get_marker_data()
for data in stream:
# Do something with marker data in data.
# This loop should be able to run at least at the streaming frequency (960Hz).
"""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
from absl import logging
import grpc
from phasespace import marker_data_pb2
from phasespace import marker_data_pb2_grpc
flags.DEFINE_string(
'mocap_grpc_server', 'localhost:12345',
'Hostname and port of the gRPC server serving marker_data protos.')
FLAGS = flags.FLAGS
class GrpcStreamClient(object):
"""Class to connect to GRPC server with MarkersData."""
def __init__(self, server=None):
"""Creates a GrpcStreamClient instance that connects to the given server.
Args:
server: hostname and port (string) of the gRPC server to connect to
(e.g. "localhost:12345").
"""
logging.set_verbosity(logging.INFO)
logging.info('Connecting to stream from %s', server)
logging.warning('Using insecure GRPC channel.')
self.channel = grpc.insecure_channel(server)
grpc.channel_ready_future(self.channel).result()
self.stub = marker_data_pb2_grpc.MarkerTrackerStub(self.channel)
self.request = marker_data_pb2.MarkerTrackerParams()
logging.info('Connected')
def get_marker_data(self):
"""This is a generator. Every call will yield a new MarkerData object.
This generator should be called at least as fast as the streaming frequency
(typically 960Hz) to ensure the latest MarkerData object is returned.
See grpc_stream_client_multiprocessing.py for an easy to use wrapper that
automatically provides the most recent MarkerData object without hogging the
main loop.
Yields:
MarkerData: A MarkerData proto streamed from the server.
Raises:
grpc.RpcError: When the server dies/gets disconnected.
"""
logging.info('Getting GRPC response')
response = self.stub.TrackMarkers(self.request)
for markers in response:
yield markers
def get_marker_data_adhoc(self):
"""This function uses an alternative MarkerTracker API.
Retrieves MarkerData on an adhoc basis, as opposed to streaming.
Blocking until data is received from the server.
Returns:
A single proto of the most recent MarkerData the server has.
"""
return self.stub.GetLatestMarkerData(self.request)
def main(argv):
del argv
client = GrpcStreamClient(server=FLAGS.mocap_grpc_server)
for data in client.get_marker_data():
print(data)
if __name__ == '__main__':
app.run(main)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/phasespace/marker_data_pb2_grpc.py | phasespace/marker_data_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from phasespace import marker_data_pb2 as marker__data__pb2
class MarkerTrackerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.TrackMarkers = channel.unary_stream(
'/robotics.infrastructure.motion_capture.phasespace.MarkerTracker/TrackMarkers',
request_serializer=marker__data__pb2.MarkerTrackerParams.SerializeToString,
response_deserializer=marker__data__pb2.MarkersData.FromString,
)
self.GetLatestMarkerData = channel.unary_unary(
'/robotics.infrastructure.motion_capture.phasespace.MarkerTracker/GetLatestMarkerData',
request_serializer=marker__data__pb2.MarkerTrackerParams.SerializeToString,
response_deserializer=marker__data__pb2.MarkersData.FromString,
)
class MarkerTrackerServicer(object):
"""Missing associated documentation comment in .proto file."""
def TrackMarkers(self, request, context):
"""Streams MarkerData protobufs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLatestMarkerData(self, request, context):
"""Retrieves the latest singular instance of MarkerData.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MarkerTrackerServicer_to_server(servicer, server):
rpc_method_handlers = {
'TrackMarkers': grpc.unary_stream_rpc_method_handler(
servicer.TrackMarkers,
request_deserializer=marker__data__pb2.MarkerTrackerParams.FromString,
response_serializer=marker__data__pb2.MarkersData.SerializeToString,
),
'GetLatestMarkerData': grpc.unary_unary_rpc_method_handler(
servicer.GetLatestMarkerData,
request_deserializer=marker__data__pb2.MarkerTrackerParams.FromString,
response_serializer=marker__data__pb2.MarkersData.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'robotics.infrastructure.motion_capture.phasespace.MarkerTracker', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MarkerTracker(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def TrackMarkers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/robotics.infrastructure.motion_capture.phasespace.MarkerTracker/TrackMarkers',
marker__data__pb2.MarkerTrackerParams.SerializeToString,
marker__data__pb2.MarkersData.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLatestMarkerData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/robotics.infrastructure.motion_capture.phasespace.MarkerTracker/GetLatestMarkerData',
marker__data__pb2.MarkerTrackerParams.SerializeToString,
marker__data__pb2.MarkersData.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/phasespace/phasespace_robot_tracker.py | phasespace/phasespace_robot_tracker.py | """Class to read in motion capture data and output position and orientation."""
from typing import Sequence, Text
import numpy as np
from phasespace import grpc_stream_client
from pybullet_utils import transformations
_ARRAY = Sequence[float]
MILLIMETER_TO_METER = 0.001
# ID that Phasespace has assigned to each marker.
FRONT_LEFT_ID = 3
FRONT_RIGHT_ID = 7
BACK_LEFT_ID = 5
BACK_RIGHT_ID = 1
MARKER_DICT = {
FRONT_LEFT_ID: 0,
FRONT_RIGHT_ID: 1,
BACK_LEFT_ID: 2,
BACK_RIGHT_ID: 3
}
class PhaseSpaceRobotTracker(object):
"""Reads in motion capture data and outputs position and orientation."""
def __init__(self, server: Text = "localhost:12345"):
"""Constructor.
Args:
server: Hostname and port of the gRPC server outputting marker data protos
(e.g. "localhost:12345").
"""
self._client = grpc_stream_client.GrpcStreamClient(server)
# If the markers have been configured as a rigid body, set this.
self.rigid = None
num_markers = 4 # Assume four markers are attached to the robot's base.
# (x, y, z) -> 3 coordinates for each marker.
self._current_marker_positions = np.zeros(num_markers * 3)
def update(self) -> None:
"""Push the current marker positions to the last and update the current."""
marker_data = self._client.get_marker_data_adhoc()
if marker_data is not None:
if marker_data.rigids:
self.rigid = marker_data.rigids[0]
else:
self._update_current_marker_positions(marker_data)
def _update_current_marker_positions(self, marker_data):
"""Updates the current marker positions by marker indices."""
for marker in marker_data.markers:
index = MARKER_DICT[marker.marker_id] * 3
pos = np.array((marker.position_mm.x, marker.position_mm.y,
marker.position_mm.z)) * MILLIMETER_TO_METER
self._current_marker_positions[index:index + len(pos)] = pos
def get_base_position(self) -> _ARRAY:
"""Returns the base position of the robot in meters."""
if self.rigid:
pos = np.array((self.rigid.position_mm.x, self.rigid.position_mm.y,
self.rigid.position_mm.z)) * MILLIMETER_TO_METER
else:
pos = np.mean(self._current_marker_positions.reshape(-1, 3), axis=0)
return pos
def _get_rotation_matrix(self) -> _ARRAY:
"""Returns robot's current orientation."""
front_left_id = MARKER_DICT[FRONT_LEFT_ID]
front_right_id = MARKER_DICT[FRONT_RIGHT_ID]
back_left_id = MARKER_DICT[BACK_LEFT_ID]
back_right_id = MARKER_DICT[BACK_RIGHT_ID]
assert self._current_marker_positions.shape[0] >= 4 * 3
front_left_pos = self._current_marker_positions[
(front_left_id * 3):((front_left_id + 1) * 3)]
front_right_pos = self._current_marker_positions[
(front_right_id * 3):((front_right_id + 1) * 3)]
back_left_pos = self._current_marker_positions[
(back_left_id * 3):((back_left_id + 1) * 3)]
back_right_pos = self._current_marker_positions[
(back_right_id * 3):((back_right_id + 1) * 3)]
forward = 0.5 * (front_left_pos + front_right_pos) \
- 0.5 * (back_left_pos + back_right_pos)
left = 0.5 * (front_left_pos + back_left_pos) \
- 0.5 * (front_right_pos + back_right_pos)
up = np.cross(forward, left)
left = np.cross(up, forward)
forward /= np.linalg.norm(forward)
up /= np.linalg.norm(up)
left /= np.linalg.norm(left)
return np.transpose(np.array([forward, left, up]))
def get_base_orientation(self) -> _ARRAY:
"""Returns base orientation of the robot as quaternion."""
if self.rigid:
return np.array((self.rigid.quat.x, self.rigid.quat.y, self.rigid.quat.z,
self.rigid.quat.w))
return transformations.quaternion_from_matrix(self._get_rotation_matrix())
def get_base_roll_pitch_yaw(self) -> _ARRAY:
"""Returns base orientation of the robot in radians."""
if self.rigid:
return transformations.euler_from_quaternion(self.get_base_orientation())
return transformations.euler_from_matrix(self._get_rotation_matrix())
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
lauramsmith/fine-tuning-locomotion | https://github.com/lauramsmith/fine-tuning-locomotion/blob/583f1de43e91cdd24d632d783872528eb1337480/phasespace/marker_data_pb2.py | phasespace/marker_data_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: marker_data.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='marker_data.proto',
package='robotics.infrastructure.motion_capture.phasespace',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11marker_data.proto\x12\x31robotics.infrastructure.motion_capture.phasespace\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\"6\n\x08Vector4f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x12\t\n\x01w\x18\x04 \x01(\x02\"+\n\x08Vector3f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"\xb6\x01\n\nMarkerData\x12\x11\n\tmarker_id\x18\x02 \x01(\x05\x12P\n\x0bposition_mm\x18\x03 \x01(\x0b\x32;.robotics.infrastructure.motion_capture.phasespace.Vector3f\x12\x16\n\x0einternal_flags\x18\x04 \x01(\r\x12\x15\n\rinternal_time\x18\x05 \x01(\x03\x12\x14\n\x0cinternalcond\x18\x06 \x01(\x02\"\x81\x02\n\tRigidData\x12\x12\n\ntracker_id\x18\x02 \x01(\x05\x12P\n\x0bposition_mm\x18\x03 \x01(\x0b\x32;.robotics.infrastructure.motion_capture.phasespace.Vector3f\x12I\n\x04quat\x18\x07 \x01(\x0b\x32;.robotics.infrastructure.motion_capture.phasespace.Vector4f\x12\x16\n\x0einternal_flags\x18\x04 \x01(\r\x12\x15\n\rinternal_time\x18\x05 \x01(\x03\x12\x14\n\x0cinternalcond\x18\x06 \x01(\x02\"\x93\x02\n\x0bMarkersData\x12O\n\ttimestamp\x18\x01 \x01(\x0b\x32<.robotics.infrastructure.motion_capture.phasespace.Timestamp\x12N\n\x07markers\x18\x02 \x03(\x0b\x32=.robotics.infrastructure.motion_capture.phasespace.MarkerData\x12L\n\x06rigids\x18\x04 \x03(\x0b\x32<.robotics.infrastructure.motion_capture.phasespace.RigidData\x12\x15\n\rinternal_time\x18\x03 \x01(\x03\"\x15\n\x13MarkerTrackerParams2\xce\x02\n\rMarkerTracker\x12\x9a\x01\n\x0cTrackMarkers\x12\x46.robotics.infrastructure.motion_capture.phasespace.MarkerTrackerParams\x1a>.robotics.infrastructure.motion_capture.phasespace.MarkersData\"\x00\x30\x01\x12\x9f\x01\n\x13GetLatestMarkerData\x12\x46.robotics.infrastructure.motion_capture.phasespace.MarkerTrackerParams\x1a>.robotics.infrastructure.motion_capture.phasespace.MarkersData\"\x00\x62\x06proto3'
)
_TIMESTAMP = _descriptor.Descriptor(
name='Timestamp',
full_name='robotics.infrastructure.motion_capture.phasespace.Timestamp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='robotics.infrastructure.motion_capture.phasespace.Timestamp.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nanos', full_name='robotics.infrastructure.motion_capture.phasespace.Timestamp.nanos', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=115,
)
_VECTOR4F = _descriptor.Descriptor(
name='Vector4f',
full_name='robotics.infrastructure.motion_capture.phasespace.Vector4f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='robotics.infrastructure.motion_capture.phasespace.Vector4f.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='y', full_name='robotics.infrastructure.motion_capture.phasespace.Vector4f.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='z', full_name='robotics.infrastructure.motion_capture.phasespace.Vector4f.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='w', full_name='robotics.infrastructure.motion_capture.phasespace.Vector4f.w', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=171,
)
_VECTOR3F = _descriptor.Descriptor(
name='Vector3f',
full_name='robotics.infrastructure.motion_capture.phasespace.Vector3f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='robotics.infrastructure.motion_capture.phasespace.Vector3f.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='y', full_name='robotics.infrastructure.motion_capture.phasespace.Vector3f.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='z', full_name='robotics.infrastructure.motion_capture.phasespace.Vector3f.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=216,
)
_MARKERDATA = _descriptor.Descriptor(
name='MarkerData',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='marker_id', full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData.marker_id', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position_mm', full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData.position_mm', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internal_flags', full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData.internal_flags', index=2,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internal_time', full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData.internal_time', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internalcond', full_name='robotics.infrastructure.motion_capture.phasespace.MarkerData.internalcond', index=4,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=401,
)
_RIGIDDATA = _descriptor.Descriptor(
name='RigidData',
full_name='robotics.infrastructure.motion_capture.phasespace.RigidData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracker_id', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.tracker_id', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position_mm', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.position_mm', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quat', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.quat', index=2,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internal_flags', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.internal_flags', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internal_time', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.internal_time', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internalcond', full_name='robotics.infrastructure.motion_capture.phasespace.RigidData.internalcond', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=661,
)
_MARKERSDATA = _descriptor.Descriptor(
name='MarkersData',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkersData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='robotics.infrastructure.motion_capture.phasespace.MarkersData.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='markers', full_name='robotics.infrastructure.motion_capture.phasespace.MarkersData.markers', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rigids', full_name='robotics.infrastructure.motion_capture.phasespace.MarkersData.rigids', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='internal_time', full_name='robotics.infrastructure.motion_capture.phasespace.MarkersData.internal_time', index=3,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=664,
serialized_end=939,
)
_MARKERTRACKERPARAMS = _descriptor.Descriptor(
name='MarkerTrackerParams',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkerTrackerParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=941,
serialized_end=962,
)
_MARKERDATA.fields_by_name['position_mm'].message_type = _VECTOR3F
_RIGIDDATA.fields_by_name['position_mm'].message_type = _VECTOR3F
_RIGIDDATA.fields_by_name['quat'].message_type = _VECTOR4F
_MARKERSDATA.fields_by_name['timestamp'].message_type = _TIMESTAMP
_MARKERSDATA.fields_by_name['markers'].message_type = _MARKERDATA
_MARKERSDATA.fields_by_name['rigids'].message_type = _RIGIDDATA
DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP
DESCRIPTOR.message_types_by_name['Vector4f'] = _VECTOR4F
DESCRIPTOR.message_types_by_name['Vector3f'] = _VECTOR3F
DESCRIPTOR.message_types_by_name['MarkerData'] = _MARKERDATA
DESCRIPTOR.message_types_by_name['RigidData'] = _RIGIDDATA
DESCRIPTOR.message_types_by_name['MarkersData'] = _MARKERSDATA
DESCRIPTOR.message_types_by_name['MarkerTrackerParams'] = _MARKERTRACKERPARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), {
'DESCRIPTOR' : _TIMESTAMP,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.Timestamp)
})
_sym_db.RegisterMessage(Timestamp)
Vector4f = _reflection.GeneratedProtocolMessageType('Vector4f', (_message.Message,), {
'DESCRIPTOR' : _VECTOR4F,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.Vector4f)
})
_sym_db.RegisterMessage(Vector4f)
Vector3f = _reflection.GeneratedProtocolMessageType('Vector3f', (_message.Message,), {
'DESCRIPTOR' : _VECTOR3F,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.Vector3f)
})
_sym_db.RegisterMessage(Vector3f)
MarkerData = _reflection.GeneratedProtocolMessageType('MarkerData', (_message.Message,), {
'DESCRIPTOR' : _MARKERDATA,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.MarkerData)
})
_sym_db.RegisterMessage(MarkerData)
RigidData = _reflection.GeneratedProtocolMessageType('RigidData', (_message.Message,), {
'DESCRIPTOR' : _RIGIDDATA,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.RigidData)
})
_sym_db.RegisterMessage(RigidData)
MarkersData = _reflection.GeneratedProtocolMessageType('MarkersData', (_message.Message,), {
'DESCRIPTOR' : _MARKERSDATA,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.MarkersData)
})
_sym_db.RegisterMessage(MarkersData)
MarkerTrackerParams = _reflection.GeneratedProtocolMessageType('MarkerTrackerParams', (_message.Message,), {
'DESCRIPTOR' : _MARKERTRACKERPARAMS,
'__module__' : 'marker_data_pb2'
# @@protoc_insertion_point(class_scope:robotics.infrastructure.motion_capture.phasespace.MarkerTrackerParams)
})
_sym_db.RegisterMessage(MarkerTrackerParams)
_MARKERTRACKER = _descriptor.ServiceDescriptor(
name='MarkerTracker',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkerTracker',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=965,
serialized_end=1299,
methods=[
_descriptor.MethodDescriptor(
name='TrackMarkers',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkerTracker.TrackMarkers',
index=0,
containing_service=None,
input_type=_MARKERTRACKERPARAMS,
output_type=_MARKERSDATA,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLatestMarkerData',
full_name='robotics.infrastructure.motion_capture.phasespace.MarkerTracker.GetLatestMarkerData',
index=1,
containing_service=None,
input_type=_MARKERTRACKERPARAMS,
output_type=_MARKERSDATA,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MARKERTRACKER)
DESCRIPTOR.services_by_name['MarkerTracker'] = _MARKERTRACKER
# @@protoc_insertion_point(module_scope)
| python | Apache-2.0 | 583f1de43e91cdd24d632d783872528eb1337480 | 2026-01-05T07:14:12.892242Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/run_freescale.py | run_freescale.py | import os
import torch
from PIL import Image
from pipeline_freescale import StableDiffusionXLPipeline
from utils import load_prompts
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
prompts_file = 'prompts/demo.txt'
prompts = load_prompts(prompts_file)
# prompts = ['Astronaut on Mars During sunset.']
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
folder_name = 'release_4k'
resolutions_list = [[1024, 1024],
[2048, 2048],
[4096, 4096]]
cosine_scale = 2.0
disable_freeu = 0
fast_mode = False
pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
if not disable_freeu:
register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(123)
os.makedirs(folder_name, exist_ok=True)
for index, prompt in enumerate(prompts):
print("prompt {}:".format(index))
print(prompt)
resuls = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=50, guidance_scale=7.5,
resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
)
for i, result in enumerate(resuls):
image = result.images[0]
image.save("{}/img{}_{}.png".format(folder_name, index, resolutions_list[i][0])) | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/gradio_app.py | gradio_app.py | import gradio as gr
import torch
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
from pipeline_freescale import StableDiffusionXLPipeline
from pipeline_freescale_turbo import StableDiffusionXLPipeline_Turbo
dtype = torch.float16
device = "cuda"
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
model_ckpt_turbo = "stabilityai/sdxl-turbo"
pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=dtype).to(device)
pipe_turbo = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt_turbo, torch_dtype=dtype).to(device)
register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
torch.cuda.empty_cache()
def infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps):
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(seed)
result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
restart_steps=restart_steps,
).images[0]
return result
def infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps):
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(seed)
result = pipe_turbo(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
restart_steps=restart_steps,
).images[0]
return result
def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
print(prompt)
print(negative_prompt)
disable_turbo = 'Disable Turbo' in options
if disable_turbo:
fast_mode = True
if output_size == "2048 x 2048":
resolutions_list = [[1024, 1024],
[2048, 2048]]
elif output_size == "1024 x 2048":
resolutions_list = [[512, 1024],
[1024, 2048]]
elif output_size == "2048 x 1024":
resolutions_list = [[1024, 512],
[2048, 1024]]
restart_steps = [int(ddim_steps * 0.3)]
result = infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps)
else:
fast_mode = False
if output_size == "2048 x 2048":
resolutions_list = [[512, 512],
[1024, 1024],
[2048, 2048]]
elif output_size == "1024 x 2048":
resolutions_list = [[256, 512],
[512, 1024],
[1024, 2048]]
elif output_size == "2048 x 1024":
resolutions_list = [[512, 256],
[1024, 512],
[2048, 1024]]
restart_steps = [int(ddim_steps * 0.5)] * 2
result = infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps)
return result
examples = [
["A cute and adorable fluffy puppy wearing a witch hat in a Halloween autumn evening forest, falling autumn leaves, brown acorns on the ground, Halloween pumpkins spiderwebs, bats, and a witch’s broom.",],
["Brunette pilot girl in a snowstorm, full body, moody lighting, intricate details, depth of field, outdoors, Fujifilm XT3, RAW, 8K UHD, film grain, Unreal Engine 5, ray tracing.",],
["A panda walking and munching bamboo in a bamboo forest.",],
]
css = """
#col-container {max-width: 768px; margin-left: auto; margin-right: auto;}
"""
def mode_update(options):
if 'Disable Turbo' in options:
return [gr.Slider(minimum=5,
maximum=60,
value=50),
gr.Slider(minimum=1.0,
maximum=20.0,
value=7.5),
gr.Row(visible=True)]
else:
return [gr.Slider(minimum=2,
maximum=6,
value=4),
gr.Slider(minimum=0.0,
maximum=1.0,
value=0.0),
gr.Row(visible=False)]
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
<h1 style="text-align: center;">FreeScale (unleash the resolution of SDXL)</h1>
<p style="text-align: center;">
FreeScale: Unleashing the Resolution of Diffusion Models via Tuning-Free Scale Fusion
</p>
<p style="text-align: center;">
<a href="https://arxiv.org/abs/2412.09626" target="_blank"><b>[arXiv]</b></a>
<a href="http://haonanqiu.com/projects/FreeScale.html" target="_blank"><b>[Project Page]</b></a>
<a href="https://github.com/ali-vilab/FreeScale" target="_blank"><b>[Code]</b></a>
</p>
"""
)
prompt_in = gr.Textbox(label="Prompt", placeholder="A panda walking and munching bamboo in a bamboo forest.")
with gr.Row():
with gr.Accordion('Advanced Settings', open=False):
with gr.Row():
output_size = gr.Dropdown(["2048 x 2048", "1024 x 2048", "2048 x 1024"], value="2048 x 2048", label="Output Size (H x W)", info="Due to GPU constraints, run the demo locally for higher resolutions.")
options = gr.CheckboxGroup(['Disable Turbo'], label="Options", info="Disable Turbo will get better results but cost more time.")
with gr.Row():
ddim_steps = gr.Slider(label='DDIM Steps',
minimum=2,
maximum=6,
step=1,
value=4)
guidance_scale = gr.Slider(label='Guidance Scale (Disabled in Turbo)',
minimum=0.0,
maximum=1.0,
step=0.1,
value=0.0)
with gr.Row():
cosine_scale = gr.Slider(label='Cosine Scale',
minimum=0,
maximum=10,
step=0.1,
value=2.0)
seed = gr.Slider(label='Random Seed',
minimum=0,
maximum=10000,
step=1,
value=111)
with gr.Row() as row_neg:
negative_prompt = gr.Textbox(label='Negative Prompt', value='blurry, ugly, duplicate, poorly drawn, deformed, mosaic', visible=False)
options.change(mode_update, options, [ddim_steps, guidance_scale, row_neg])
submit_btn = gr.Button("Generate", variant='primary')
image_result = gr.Image(label="Image Output")
gr.Examples(examples=examples, inputs=[prompt_in, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt])
submit_btn.click(fn=infer,
inputs=[prompt_in, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt],
outputs=[image_result],
api_name="freescalehf")
if __name__ == "__main__":
demo.queue(max_size=8).launch() | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/pipeline_freescale_imgen.py | pipeline_freescale_imgen.py | import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
from inspect import isfunction
from functools import partial
import numpy as np
import torch.nn.functional as F
from diffusers.models.attention import BasicTransformerBlock
from scale_attention import ori_forward, scale_forward
from PIL import Image
import torchvision.transforms as transforms
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
def process_image_to_tensor(image_path):
image = Image.open(image_path).convert("RGB")
transform = transforms.Compose(
[
# transforms.Resize((1024, 1024)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
]
)
image_tensor = transform(image)
return image_tensor
def process_image_to_bitensor(image_path):
image = Image.open(image_path).convert("L")
transform = transforms.ToTensor()
image_tensor = transform(image)
binary_tensor = torch.where(image_tensor != 0, torch.tensor(1.0), torch.tensor(0.0))
return binary_tensor
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
resolutions_list: Optional[Union[int, List[int]]] = None,
restart_steps: Optional[Union[int, List[int]]] = None,
cosine_scale: float = 2.0,
cosine_scale_bg: float = 1.0,
dilate_tau: int = 35,
fast_mode: bool = False,
img_path: Optional[str] = "",
mask_path: Optional[str] = "",
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
| python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | true |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/pipeline_freescale_turbo.py | pipeline_freescale_turbo.py | import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
from inspect import isfunction
from functools import partial
import numpy as np
import torch.nn.functional as F
from diffusers.models.attention import BasicTransformerBlock
from scale_attention_turbo import ori_forward, scale_forward
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline_Turbo(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
resolutions_list: Optional[Union[int, List[int]]] = None,
restart_steps: Optional[Union[int, List[int]]] = None,
cosine_scale: float = 2.0,
dilate_tau: int = 35,
fast_mode: bool = False,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
| python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | true |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/free_lunch_utils.py | free_lunch_utils.py | import torch
import torch.fft as fft
from diffusers.models.unet_2d_condition import logger
from diffusers.utils import is_torch_version
from typing import Any, Dict, List, Optional, Tuple, Union
""" Borrowed from https://github.com/ChenyangSi/FreeU/blob/main/demo/free_lunch_utils.py
"""
def isinstance_str(x: object, cls_name: str):
"""
Checks whether x has any class *named* cls_name in its ancestry.
Doesn't require access to the class's implementation.
Useful for patching!
"""
for _cls in x.__class__.__mro__:
if _cls.__name__ == cls_name:
return True
return False
def Fourier_filter(x, threshold, scale):
dtype = x.dtype
x = x.type(torch.float32)
# FFT
x_freq = fft.fftn(x, dim=(-2, -1))
x_freq = fft.fftshift(x_freq, dim=(-2, -1))
B, C, H, W = x_freq.shape
mask = torch.ones((B, C, H, W)).cuda()
crow, ccol = H // 2, W //2
mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale
x_freq = x_freq * mask
# IFFT
x_freq = fft.ifftshift(x_freq, dim=(-2, -1))
x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real
x_filtered = x_filtered.type(dtype)
return x_filtered
def register_upblock2d(model):
def up_forward(self):
def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
#print(f"in upblock2d, hidden states shape: {hidden_states.shape}")
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
return forward
for i, upsample_block in enumerate(model.unet.up_blocks):
if isinstance_str(upsample_block, "UpBlock2D"):
upsample_block.forward = up_forward(upsample_block)
def register_free_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2):
def up_forward(self):
def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
#print(f"in free upblock2d, hidden states shape: {hidden_states.shape}")
# --------------- FreeU code -----------------------
# Only operate on the first two stages
if hidden_states.shape[1] == 1280:
hidden_states[:,:640] = hidden_states[:,:640] * self.b1
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)
if hidden_states.shape[1] == 640:
hidden_states[:,:320] = hidden_states[:,:320] * self.b2
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)
# ---------------------------------------------------------
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
return forward
for i, upsample_block in enumerate(model.unet.up_blocks):
if isinstance_str(upsample_block, "UpBlock2D"):
upsample_block.forward = up_forward(upsample_block)
setattr(upsample_block, 'b1', b1)
setattr(upsample_block, 'b2', b2)
setattr(upsample_block, 's1', s1)
setattr(upsample_block, 's2', s2)
def register_crossattn_upblock2d(model):
def up_forward(self):
def forward(
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
#print(f"in crossatten upblock2d, hidden states shape: {hidden_states.shape}")
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
None, # timestep
None, # class_labels
cross_attention_kwargs,
attention_mask,
encoder_attention_mask,
**ckpt_kwargs,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
return forward
for i, upsample_block in enumerate(model.unet.up_blocks):
if isinstance_str(upsample_block, "CrossAttnUpBlock2D"):
upsample_block.forward = up_forward(upsample_block)
def register_free_crossattn_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2):
def up_forward(self):
def forward(
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
#print(f"in free crossatten upblock2d, hidden states shape: {hidden_states.shape}")
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# --------------- FreeU code -----------------------
# Only operate on the first two stages
if hidden_states.shape[1] == 1280:
hidden_states[:,:640] = hidden_states[:,:640] * self.b1
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)
if hidden_states.shape[1] == 640:
hidden_states[:,:320] = hidden_states[:,:320] * self.b2
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)
# ---------------------------------------------------------
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
None, # timestep
None, # class_labels
cross_attention_kwargs,
attention_mask,
encoder_attention_mask,
**ckpt_kwargs,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
# hidden_states = attn(
# hidden_states,
# encoder_hidden_states=encoder_hidden_states,
# cross_attention_kwargs=cross_attention_kwargs,
# encoder_attention_mask=encoder_attention_mask,
# return_dict=False,
# )[0]
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
)[0]
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
return forward
for i, upsample_block in enumerate(model.unet.up_blocks):
if isinstance_str(upsample_block, "CrossAttnUpBlock2D"):
upsample_block.forward = up_forward(upsample_block)
setattr(upsample_block, 'b1', b1)
setattr(upsample_block, 'b2', b2)
setattr(upsample_block, 's1', s1)
setattr(upsample_block, 's2', s2) | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/run_sdxl.py | run_sdxl.py | import os
import torch
from PIL import Image
from pipeline_sdxl import StableDiffusionXLPipeline
from utils import load_prompts
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
prompts_file = 'prompts/imgen.txt'
prompts = load_prompts(prompts_file)
# prompts = ['Astronaut on Mars During sunset.']
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
folder_name = 'release_4k_imgen'
height=1024
width=1024
disable_freeu = 0
pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
if not disable_freeu:
register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(123)
os.makedirs(folder_name, exist_ok=True)
for index, prompt in enumerate(prompts):
print("prompt {}:".format(index))
print(prompt)
image = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=50, guidance_scale=7.5,
height=height, width=width,
).images[0]
image.save("{}/img{}_{}.png".format(folder_name, index, height)) | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/utils.py | utils.py | def load_prompts(prompt_file):
f = open(prompt_file, 'r')
prompt_list = []
for idx, line in enumerate(f.readlines()):
l = line.strip()
if len(l) != 0:
prompt_list.append(l)
f.close()
return prompt_list | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/scale_attention.py | scale_attention.py | from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from einops import rearrange
import random
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
x_coord = torch.arange(kernel_size)
gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
gaussian_1d = gaussian_1d / gaussian_1d.sum()
gaussian_3d = gaussian_1d[:, None, None] * gaussian_1d[None, :, None] * gaussian_1d[None, None, :]
kernel = gaussian_3d[None, None, :, :, :].repeat(channels, 1, 1, 1, 1)
return kernel
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
channels = latents.shape[0]
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
blurred_latents = F.conv3d(latents.unsqueeze(0), kernel, padding=kernel_size//2, groups=channels)[0]
return blurred_latents
def get_views(height, width, h_window_size=128, w_window_size=128, scale_factor=8):
height = int(height)
width = int(width)
h_window_stride = h_window_size // 2
w_window_stride = w_window_size // 2
h_window_size = int(h_window_size / scale_factor)
w_window_size = int(w_window_size / scale_factor)
h_window_stride = int(h_window_stride / scale_factor)
w_window_stride = int(w_window_stride / scale_factor)
num_blocks_height = int((height - h_window_size) / h_window_stride - 1e-6) + 2 if height > h_window_size else 1
num_blocks_width = int((width - w_window_size) / w_window_stride - 1e-6) + 2 if width > w_window_size else 1
total_num_blocks = int(num_blocks_height * num_blocks_width)
views = []
for i in range(total_num_blocks):
h_start = int((i // num_blocks_width) * h_window_stride)
h_end = h_start + h_window_size
w_start = int((i % num_blocks_width) * w_window_stride)
w_end = w_start + w_window_size
if h_end > height:
h_start = int(h_start + height - h_end)
h_end = int(height)
if w_end > width:
w_start = int(w_start + width - w_end)
w_end = int(width)
if h_start < 0:
h_end = int(h_end - h_start)
h_start = 0
if w_start < 0:
w_end = int(w_end - w_start)
w_start = 0
random_jitter = True
if random_jitter:
h_jitter_range = h_window_size // 8
w_jitter_range = w_window_size // 8
h_jitter = 0
w_jitter = 0
if (w_start != 0) and (w_end != width):
w_jitter = random.randint(-w_jitter_range, w_jitter_range)
elif (w_start == 0) and (w_end != width):
w_jitter = random.randint(-w_jitter_range, 0)
elif (w_start != 0) and (w_end == width):
w_jitter = random.randint(0, w_jitter_range)
if (h_start != 0) and (h_end != height):
h_jitter = random.randint(-h_jitter_range, h_jitter_range)
elif (h_start == 0) and (h_end != height):
h_jitter = random.randint(-h_jitter_range, 0)
elif (h_start != 0) and (h_end == height):
h_jitter = random.randint(0, h_jitter_range)
h_start += (h_jitter + h_jitter_range)
h_end += (h_jitter + h_jitter_range)
w_start += (w_jitter + w_jitter_range)
w_end += (w_jitter + w_jitter_range)
views.append((h_start, h_end, w_start, w_end))
return views
def scale_forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
# Notice that normalization is always applied before the real computation in the following blocks.
if self.current_hw:
current_scale_num_h, current_scale_num_w = self.current_hw[0] // 1024, self.current_hw[1] // 1024
else:
current_scale_num_h, current_scale_num_w = 1, 1
# 0. Self-Attention
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
ratio_hw = current_scale_num_h / current_scale_num_w
latent_h = int((norm_hidden_states.shape[1] * ratio_hw) ** 0.5)
latent_w = int(latent_h / ratio_hw)
scale_factor = 128 * current_scale_num_h / latent_h
if ratio_hw > 1:
sub_h = 128
sub_w = int(128 / ratio_hw)
else:
sub_h = int(128 * ratio_hw)
sub_w = 128
h_jitter_range = int(sub_h / scale_factor // 8)
w_jitter_range = int(sub_w / scale_factor // 8)
views = get_views(latent_h, latent_w, sub_h, sub_w, scale_factor = scale_factor)
current_scale_num = max(current_scale_num_h, current_scale_num_w)
global_views = [[h, w] for h in range(current_scale_num_h) for w in range(current_scale_num_w)]
if self.fast_mode:
four_window = False
fourg_window = True
else:
four_window = True
fourg_window = False
if four_window:
norm_hidden_states_ = rearrange(norm_hidden_states, 'bh (h w) d -> bh h w d', h = latent_h)
norm_hidden_states_ = F.pad(norm_hidden_states_, (0, 0, w_jitter_range, w_jitter_range, h_jitter_range, h_jitter_range), 'constant', 0)
value = torch.zeros_like(norm_hidden_states_)
count = torch.zeros_like(norm_hidden_states_)
for index, view in enumerate(views):
h_start, h_end, w_start, w_end = view
local_states = norm_hidden_states_[:, h_start:h_end, w_start:w_end, :]
local_states = rearrange(local_states, 'bh h w d -> bh (h w) d')
local_output = self.attn1(
local_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
local_output = rearrange(local_output, 'bh (h w) d -> bh h w d', h = int(sub_h / scale_factor))
value[:, h_start:h_end, w_start:w_end, :] += local_output * 1
count[:, h_start:h_end, w_start:w_end, :] += 1
value = value[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
count = count[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
attn_output = torch.where(count>0, value/count, value)
gaussian_local = gaussian_filter(attn_output, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output_global = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
attn_output_global = rearrange(attn_output_global, 'bh (h w) d -> bh h w d', h = latent_h)
gaussian_global = gaussian_filter(attn_output_global, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output = gaussian_local + (attn_output_global - gaussian_global)
attn_output = rearrange(attn_output, 'bh h w d -> bh (h w) d')
elif fourg_window:
norm_hidden_states = rearrange(norm_hidden_states, 'bh (h w) d -> bh h w d', h = latent_h)
norm_hidden_states_ = F.pad(norm_hidden_states, (0, 0, w_jitter_range, w_jitter_range, h_jitter_range, h_jitter_range), 'constant', 0)
value = torch.zeros_like(norm_hidden_states_)
count = torch.zeros_like(norm_hidden_states_)
for index, view in enumerate(views):
h_start, h_end, w_start, w_end = view
local_states = norm_hidden_states_[:, h_start:h_end, w_start:w_end, :]
local_states = rearrange(local_states, 'bh h w d -> bh (h w) d')
local_output = self.attn1(
local_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
local_output = rearrange(local_output, 'bh (h w) d -> bh h w d', h = int(sub_h / scale_factor))
value[:, h_start:h_end, w_start:w_end, :] += local_output * 1
count[:, h_start:h_end, w_start:w_end, :] += 1
value = value[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
count = count[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
attn_output = torch.where(count>0, value/count, value)
gaussian_local = gaussian_filter(attn_output, kernel_size=(2*current_scale_num-1), sigma=1.0)
value = torch.zeros_like(norm_hidden_states)
count = torch.zeros_like(norm_hidden_states)
for index, global_view in enumerate(global_views):
h, w = global_view
global_states = norm_hidden_states[:, h::current_scale_num_h, w::current_scale_num_w, :]
global_states = rearrange(global_states, 'bh h w d -> bh (h w) d')
global_output = self.attn1(
global_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
global_output = rearrange(global_output, 'bh (h w) d -> bh h w d', h = int(global_output.shape[1] ** 0.5))
value[:, h::current_scale_num_h, w::current_scale_num_w, :] += global_output * 1
count[:, h::current_scale_num_h, w::current_scale_num_w, :] += 1
attn_output_global = torch.where(count>0, value/count, value)
gaussian_global = gaussian_filter(attn_output_global, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output = gaussian_local + (attn_output_global - gaussian_global)
attn_output = rearrange(attn_output, 'bh h w d -> bh (h w) d')
else:
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 2.5 ends
# 3. Cross-Attention
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
ff_output = torch.cat(
[
self.ff(hid_slice)
for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)
],
dim=self._chunk_dim,
)
else:
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states
def ori_forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
# Notice that normalization is always applied before the real computation in the following blocks.
# 0. Self-Attention
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 2.5 ends
# 3. Cross-Attention
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
ff_output = torch.cat(
[
self.ff(hid_slice)
for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)
],
dim=self._chunk_dim,
)
else:
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/scale_attention_turbo.py | scale_attention_turbo.py | from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from einops import rearrange
import random
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
x_coord = torch.arange(kernel_size)
gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
gaussian_1d = gaussian_1d / gaussian_1d.sum()
gaussian_3d = gaussian_1d[:, None, None] * gaussian_1d[None, :, None] * gaussian_1d[None, None, :]
kernel = gaussian_3d[None, None, :, :, :].repeat(channels, 1, 1, 1, 1)
return kernel
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
channels = latents.shape[0]
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
blurred_latents = F.conv3d(latents.unsqueeze(0), kernel, padding=kernel_size//2, groups=channels)[0]
return blurred_latents
def get_views(height, width, h_window_size=64, w_window_size=64, scale_factor=8):
height = int(height)
width = int(width)
h_window_stride = h_window_size // 2
w_window_stride = w_window_size // 2
h_window_size = int(h_window_size / scale_factor)
w_window_size = int(w_window_size / scale_factor)
h_window_stride = int(h_window_stride / scale_factor)
w_window_stride = int(w_window_stride / scale_factor)
num_blocks_height = int((height - h_window_size) / h_window_stride - 1e-6) + 2 if height > h_window_size else 1
num_blocks_width = int((width - w_window_size) / w_window_stride - 1e-6) + 2 if width > w_window_size else 1
total_num_blocks = int(num_blocks_height * num_blocks_width)
views = []
for i in range(total_num_blocks):
h_start = int((i // num_blocks_width) * h_window_stride)
h_end = h_start + h_window_size
w_start = int((i % num_blocks_width) * w_window_stride)
w_end = w_start + w_window_size
if h_end > height:
h_start = int(h_start + height - h_end)
h_end = int(height)
if w_end > width:
w_start = int(w_start + width - w_end)
w_end = int(width)
if h_start < 0:
h_end = int(h_end - h_start)
h_start = 0
if w_start < 0:
w_end = int(w_end - w_start)
w_start = 0
random_jitter = True
if random_jitter:
h_jitter_range = h_window_size // 8
w_jitter_range = w_window_size // 8
h_jitter = 0
w_jitter = 0
if (w_start != 0) and (w_end != width):
w_jitter = random.randint(-w_jitter_range, w_jitter_range)
elif (w_start == 0) and (w_end != width):
w_jitter = random.randint(-w_jitter_range, 0)
elif (w_start != 0) and (w_end == width):
w_jitter = random.randint(0, w_jitter_range)
if (h_start != 0) and (h_end != height):
h_jitter = random.randint(-h_jitter_range, h_jitter_range)
elif (h_start == 0) and (h_end != height):
h_jitter = random.randint(-h_jitter_range, 0)
elif (h_start != 0) and (h_end == height):
h_jitter = random.randint(0, h_jitter_range)
h_start += (h_jitter + h_jitter_range)
h_end += (h_jitter + h_jitter_range)
w_start += (w_jitter + w_jitter_range)
w_end += (w_jitter + w_jitter_range)
views.append((h_start, h_end, w_start, w_end))
return views
def scale_forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
# Notice that normalization is always applied before the real computation in the following blocks.
if self.current_hw:
current_scale_num_h, current_scale_num_w = self.current_hw[0] // 512, self.current_hw[1] // 512
else:
current_scale_num_h, current_scale_num_w = 1, 1
# 0. Self-Attention
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
ratio_hw = current_scale_num_h / current_scale_num_w
latent_h = int((norm_hidden_states.shape[1] * ratio_hw) ** 0.5)
latent_w = int(latent_h / ratio_hw)
scale_factor = 64 * current_scale_num_h / latent_h
if ratio_hw > 1:
sub_h = 64
sub_w = int(64 / ratio_hw)
else:
sub_h = int(64 * ratio_hw)
sub_w = 64
h_jitter_range = int(sub_h / scale_factor // 8)
w_jitter_range = int(sub_w / scale_factor // 8)
views = get_views(latent_h, latent_w, sub_h, sub_w, scale_factor = scale_factor)
current_scale_num = max(current_scale_num_h, current_scale_num_w)
global_views = [[h, w] for h in range(current_scale_num_h) for w in range(current_scale_num_w)]
if self.fast_mode:
four_window = False
fourg_window = True
else:
four_window = True
fourg_window = False
if four_window:
norm_hidden_states_ = rearrange(norm_hidden_states, 'bh (h w) d -> bh h w d', h = latent_h)
norm_hidden_states_ = F.pad(norm_hidden_states_, (0, 0, w_jitter_range, w_jitter_range, h_jitter_range, h_jitter_range), 'constant', 0)
value = torch.zeros_like(norm_hidden_states_)
count = torch.zeros_like(norm_hidden_states_)
for index, view in enumerate(views):
h_start, h_end, w_start, w_end = view
local_states = norm_hidden_states_[:, h_start:h_end, w_start:w_end, :]
local_states = rearrange(local_states, 'bh h w d -> bh (h w) d')
local_output = self.attn1(
local_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
local_output = rearrange(local_output, 'bh (h w) d -> bh h w d', h = int(sub_h / scale_factor))
value[:, h_start:h_end, w_start:w_end, :] += local_output * 1
count[:, h_start:h_end, w_start:w_end, :] += 1
value = value[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
count = count[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
attn_output = torch.where(count>0, value/count, value)
gaussian_local = gaussian_filter(attn_output, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output_global = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
attn_output_global = rearrange(attn_output_global, 'bh (h w) d -> bh h w d', h = latent_h)
gaussian_global = gaussian_filter(attn_output_global, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output = gaussian_local + (attn_output_global - gaussian_global)
attn_output = rearrange(attn_output, 'bh h w d -> bh (h w) d')
elif fourg_window:
norm_hidden_states = rearrange(norm_hidden_states, 'bh (h w) d -> bh h w d', h = latent_h)
norm_hidden_states_ = F.pad(norm_hidden_states, (0, 0, w_jitter_range, w_jitter_range, h_jitter_range, h_jitter_range), 'constant', 0)
value = torch.zeros_like(norm_hidden_states_)
count = torch.zeros_like(norm_hidden_states_)
for index, view in enumerate(views):
h_start, h_end, w_start, w_end = view
local_states = norm_hidden_states_[:, h_start:h_end, w_start:w_end, :]
local_states = rearrange(local_states, 'bh h w d -> bh (h w) d')
local_output = self.attn1(
local_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
local_output = rearrange(local_output, 'bh (h w) d -> bh h w d', h = int(sub_h / scale_factor))
value[:, h_start:h_end, w_start:w_end, :] += local_output * 1
count[:, h_start:h_end, w_start:w_end, :] += 1
value = value[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
count = count[:, h_jitter_range:-h_jitter_range, w_jitter_range:-w_jitter_range, :]
attn_output = torch.where(count>0, value/count, value)
gaussian_local = gaussian_filter(attn_output, kernel_size=(2*current_scale_num-1), sigma=1.0)
value = torch.zeros_like(norm_hidden_states)
count = torch.zeros_like(norm_hidden_states)
for index, global_view in enumerate(global_views):
h, w = global_view
global_states = norm_hidden_states[:, h::current_scale_num_h, w::current_scale_num_w, :]
global_states = rearrange(global_states, 'bh h w d -> bh (h w) d')
global_output = self.attn1(
global_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
global_output = rearrange(global_output, 'bh (h w) d -> bh h w d', h = int(global_output.shape[1] ** 0.5))
value[:, h::current_scale_num_h, w::current_scale_num_w, :] += global_output * 1
count[:, h::current_scale_num_h, w::current_scale_num_w, :] += 1
attn_output_global = torch.where(count>0, value/count, value)
gaussian_global = gaussian_filter(attn_output_global, kernel_size=(2*current_scale_num-1), sigma=1.0)
attn_output = gaussian_local + (attn_output_global - gaussian_global)
attn_output = rearrange(attn_output, 'bh h w d -> bh (h w) d')
else:
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 2.5 ends
# 3. Cross-Attention
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
ff_output = torch.cat(
[
self.ff(hid_slice)
for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)
],
dim=self._chunk_dim,
)
else:
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states
def ori_forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
# Notice that normalization is always applied before the real computation in the following blocks.
# 0. Self-Attention
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 2.5 ends
# 3. Cross-Attention
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
ff_output = torch.cat(
[
self.ff(hid_slice)
for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)
],
dim=self._chunk_dim,
)
else:
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/pipeline_sdxl.py | pipeline_sdxl.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
| python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | true |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/pipeline_freescale.py | pipeline_freescale.py | import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
from inspect import isfunction
from functools import partial
import numpy as np
import torch.nn.functional as F
from diffusers.models.attention import BasicTransformerBlock
from scale_attention import ori_forward, scale_forward
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
resolutions_list: Optional[Union[int, List[int]]] = None,
restart_steps: Optional[Union[int, List[int]]] = None,
cosine_scale: float = 2.0,
dilate_tau: int = 35,
fast_mode: bool = False,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
| python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | true |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/run_freescale_turbo.py | run_freescale_turbo.py | import os
import torch
from PIL import Image
from pipeline_freescale_turbo import StableDiffusionXLPipeline_Turbo
from utils import load_prompts
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
model_ckpt = "stabilityai/sdxl-turbo"
prompts_file = 'prompts/demo.txt'
prompts = load_prompts(prompts_file)
# prompts = ['Astronaut on Mars During sunset.']
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
folder_name = 'release_2k_turbo'
resolutions_list = [[512, 512],
[1024, 1024],
[2048, 2048]]
cosine_scale = 2.0
disable_freeu = 0
fast_mode = False
pipe = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
if not disable_freeu:
register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(123)
os.makedirs(folder_name, exist_ok=True)
for index, prompt in enumerate(prompts):
print("prompt {}:".format(index))
print(prompt)
resuls = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=4, guidance_scale=0.0,
resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
)
for i, result in enumerate(resuls):
image = result.images[0]
image.save("{}/img{}_{}.png".format(folder_name, index, resolutions_list[i][0])) | python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
ali-vilab/FreeScale | https://github.com/ali-vilab/FreeScale/blob/7161bfcf2c180818db6a9f04f65e5b3963582ac7/run_freescale_imgen.py | run_freescale_imgen.py | import os
import torch
from PIL import Image
from pipeline_freescale_imgen import StableDiffusionXLPipeline
from utils import load_prompts
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
img_path = 'imgen_intermediates/tmp_img.png'
mask_path = 'imgen_intermediates/tmp_mask.png'
model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
prompts_file = 'prompts/imgen.txt'
prompts = load_prompts(prompts_file)
# prompts = ['Astronaut on Mars During sunset.']
negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
folder_name = 'release_4k_imgen'
resolutions_list = [[1024, 1024],
[2048, 2048],
[4096, 4096]]
cosine_scale = 3.0
cosine_scale_bg = 0.5
disable_freeu = 0
fast_mode = False
pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
if not disable_freeu:
register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
generator = torch.Generator(device='cuda')
generator = generator.manual_seed(123)
os.makedirs(folder_name, exist_ok=True)
for index, prompt in enumerate(prompts):
print("prompt {}:".format(index))
print(prompt)
resuls = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
num_inference_steps=50, guidance_scale=7.5,
resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
cosine_scale_bg=cosine_scale_bg, img_path=img_path, mask_path=mask_path,
)
for i, result in enumerate(resuls):
image = result.images[0]
image.save("{}/img{}_{}.png".format(folder_name, index, resolutions_list[i][0]))
| python | Apache-2.0 | 7161bfcf2c180818db6a9f04f65e5b3963582ac7 | 2026-01-05T07:14:43.712841Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/setup.py | setup.py | import pocketcasts
from setuptools import setup, find_packages
setup(
name="pocketcasts-api",
version=pocketcasts.api.__version__,
description=pocketcasts.api.__doc__,
url=pocketcasts.api.__url__,
author=pocketcasts.api.__author__,
author_email='ferguslongley@live.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(exclude=['testing']),
keywords='podcasts pocketcasts',
install_requires=['requests'],
)
| python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/pocketcasts/api.py | pocketcasts/api.py | """Unofficial API for pocketcasts.com"""
import requests
from .podcast import Podcast
from .episode import Episode
__version__ = "0.2.3"
__author__ = "Fergus Longley"
__url__ = "https://github.com/exofudge/Pocket-Casts"
class Pocketcasts(object):
"""The main class for making getting and setting information from the server"""
def __init__(self, email, password):
"""
Args:
email (str): email of user
password (str): password of user
"""
self._username = email
self._password = password
self._session = requests.Session()
self._login()
def _make_req(self, url, method='GET', data=None):
"""Makes a HTTP GET/POST request
Args:
url (str): The URL to make the request to
method (str, optional): The method to use. Defaults to 'GET'
data (dict): data to send with a POST request. Defaults to None.
Returns:
requests.response.models.Response: A response object
"""
if method == 'JSON':
req = requests.Request('POST', url, json=data, cookies=self._session.cookies)
elif method == 'POST' or data:
req = requests.Request('POST', url, data=data, cookies=self._session.cookies)
elif method == 'GET':
req = requests.Request('GET', url, cookies=self._session.cookies)
else:
raise Exception("Invalid method")
prepped = req.prepare()
return self._session.send(prepped)
def _login(self):
"""Authenticate using "https://play.pocketcasts.com/users/sign_in"
Returns:
bool: True is successful
Raises:
Exception: If login fails
:return:
"""
login_url = "https://play.pocketcasts.com/users/sign_in"
data = {"[user]email": self._username, "[user]password": self._password}
attempt = self._make_req(login_url, data=data)
# TODO Find a more robust way to check if login failed
if "Invalid email or password" in attempt.text:
raise Exception("Login Failed")
else:
return True
def get_top_charts(self):
"""Get the top podcasts
Returns:
list: A list of the top 100 podcasts as Podcast objects
Raises:
Exception: If the top charts cannot be obtained
"""
page = self._make_req("https://static.pocketcasts.com/discover/json/popular_world.json").json()
results = []
for podcast in page['result']['podcasts']:
uuid = podcast.pop('uuid')
results.append(Podcast(uuid, self, **podcast))
return results
def get_featured(self):
"""Get the featured podcasts
Returns:
list: A list of the 30 featured podcasts as Podcast objects
Raises:
Exception: If the featured podcasts cannot be obtained
"""
page = self._make_req("https://static.pocketcasts.com/discover/json/featured.json").json()
results = []
for podcast in page['result']['podcasts']:
uuid = podcast.pop('uuid')
results.append(Podcast(uuid, self, **podcast))
return results
def get_trending(self):
"""Get the trending podcasts
Returns:
list: A list of the 100 trending podcasts as Podcast objects
Raises:
Exception: If the trending podcasts cannot be obtained
"""
page = self._make_req("https://static.pocketcasts.com/discover/json/trending.json").json()
results = []
for podcast in page['result']['podcasts']:
uuid = podcast.pop('uuid')
results.append(Podcast(uuid, self, **podcast))
return results
def get_episode(self, pod, e_uuid):
# TODO figure out what id is/does
"""Returns an episode object corresponding to the uuid's provided
Args:
pod (class): The podcast class
e_uuid (str): The episode UUID
Returns:
class: An Episode class with all information about an episode
Examples:
>>> p = Pocketcasts(email='email@email.com')
>>> pod = p.get_podcast('12012c20-0423-012e-f9a0-00163e1b201c')
>>> p.get_episode(pod, 'a35748e0-bb4d-0134-10a8-25324e2a541d')
<class 'episode.Episode'> ({
'_size': 10465287,
'_is_video': False,
'_url': 'http://.../2017-01-12-sysk-watersheds.mp3?awCollectionId=1003&awEpisodeId=923109',
'_id': None,
'_duration': '1934',
'_is_deleted': '',
'_title': 'How Watersheds Work',
'_file_type': 'audio/mpeg',
'_played_up_to': 1731,
'_published_at': '2017-01-12 08:00:00',
'_podcast': <class 'podcast.Podcast'> (...),
'_playing_status': 2,
'_starred': False,
'_uuid': 'a35748e0-bb4d-0134-10a8-25324e2a541d'})
"""
data = {
'uuid': pod.uuid,
'episode_uuid': e_uuid
}
attempt = self._make_req('https://play.pocketcasts.com/web/podcasts/podcast.json', data=data).json()['episode']
attempt.pop('uuid')
episode = Episode(e_uuid, pod, **attempt)
return episode
def get_podcast(self, uuid):
"""Get a podcast from it's UUID
Args:
uuid (str): The UUID of the podcast
Returns:
pocketcasts.Podcast: A podcast object corresponding to the UUID provided.
"""
data = {
'uuid': uuid
}
attempt = self._make_req('https://play.pocketcasts.com/web/podcasts/podcast.json', data=data).json()['podcast']
attempt.pop('uuid')
podcast = Podcast(uuid, self, **attempt)
return podcast
def get_podcast_episodes(self, pod, sort=Podcast.SortOrder.NewestToOldest):
"""Get all episodes of a podcasts
Args:
pod (class): The podcast class
sort (int): The sort order, 3 for Newest to oldest, 2 for Oldest to newest. Defaults to 3.
Returns:
list: A list of Episode classes.
Examples:
>>> p = Pocketcasts('email@email.com')
>>> pod = p.get_podcast('12012c20-0423-012e-f9a0-00163e1b201c')
>>> p.get_podcast_episodes(pod)
[<class 'episode.Episode'> ({
'_size': 17829778,
'_is_video': False,
'_url': 'http://.../2017-02-21-sysk-death-tax-final.mp3?awCollectionId=1003&awEpisodeId=923250',
'_id': None,
'_duration': '3161',
'_is_deleted': 0,
'_title': 'The ins and outs of the DEATH TAX',
'_file_type': 'audio/mpeg',
'_played_up_to': 0,
'_published_at': '2017-02-21 08:00:00',
'_podcast': <class 'podcast.Podcast'> (...),
'_playing_status': 0,
'_starred': False,
'_uuid': '9189eba0-da79-0134-ebdd-4114446340cb'}),
...]
"""
page = 1
more_pages = True
episodes = []
while more_pages:
data = {
'uuid': pod.uuid,
'page': page,
'sort': sort
}
attempt = self._make_req('https://play.pocketcasts.com/web/episodes/find_by_podcast.json', data=data).json()
for epi in attempt['result']['episodes']:
uuid = epi.pop('uuid')
episodes.append(Episode(uuid, podcast=pod, **epi))
if attempt['result']['total'] > len(episodes):
page += 1
else:
more_pages = False
return episodes
def get_episode_notes(self, episode_uuid):
"""Get the notes for an episode
Args:
episode_uuid (str): The episode UUID
Returns:
str: The notes for the episode UUID provided
"""
data = {
'uuid': episode_uuid
}
return self._make_req('https://play.pocketcasts.com/web/episodes/show_notes.json', data=data) \
.json()['show_notes']
def get_subscribed_podcasts(self):
"""Get the user's subscribed podcasts
Returns:
List[pocketcasts.podcast.Podcast]: A list of podcasts
"""
attempt = self._make_req('https://play.pocketcasts.com/web/podcasts/all.json', method='POST').json()
results = []
for podcast in attempt['podcasts']:
uuid = podcast.pop('uuid')
results.append(Podcast(uuid, self, **podcast))
return results
def get_new_releases(self):
"""Get newly released podcasts from a user's subscriptions
Returns:
List[pocketcasts.episode.Episode]: A list of episodes
"""
attempt = self._make_req('https://play.pocketcasts.com/web/episodes/new_releases_episodes.json', method='POST')
results = []
podcasts = {}
for episode in attempt.json()['episodes']:
pod_uuid = episode['podcast_uuid']
if pod_uuid not in podcasts:
podcasts[pod_uuid] = self.get_podcast(pod_uuid)
uuid = episode.pop('uuid')
results.append(Episode(uuid, podcasts[pod_uuid], **episode))
return results
def get_in_progress(self):
"""Get all in progress episodes
Returns:
List[pocketcasts.episode.Episode]: A list of episodes
"""
attempt = self._make_req('https://play.pocketcasts.com/web/episodes/in_progress_episodes.json', method='POST')
results = []
podcasts = {}
for episode in attempt.json()['episodes']:
pod_uuid = episode['podcast_uuid']
if pod_uuid not in podcasts:
podcasts[pod_uuid] = self.get_podcast(pod_uuid)
uuid = episode.pop('uuid')
results.append(Episode(uuid, podcasts[pod_uuid], **episode))
return results
def get_starred(self):
"""Get all starred episodes
Returns:
List[pocketcasts.episode.Episode]: A list of episodes
"""
attempt = self._make_req('https://play.pocketcasts.com/web/episodes/starred_episodes.json', method='POST')
results = []
podcasts = {}
for episode in attempt.json()['episodes']:
pod_uuid = episode['podcast_uuid']
if pod_uuid not in podcasts:
podcasts[pod_uuid] = self.get_podcast(pod_uuid)
uuid = episode.pop('uuid')
results.append(Episode(uuid, podcasts[pod_uuid], **episode))
return results
def update_starred(self, podcast, episode, starred):
"""Star or unstar an episode
Args:
podcast (pocketcasts.Podcast): A podcast class
episode (pocketcasts.Episode): An episode class to be updated
starred (int): 1 for starred, 0 for unstarred
"""
data = {
'starred': starred,
'podcast_uuid': podcast.uuid,
'uuid': episode.uuid
}
self._make_req("https://play.pocketcasts.com/web/episodes/update_episode_star.json", data=data)
# TODO Check if successful or not
def update_playing_status(self, podcast, episode, status=Episode.PlayingStatus.Unplayed):
"""Update the playing status of an episode
Args:
podcast (pocketcasts.Podcast): A podcast class
episode (pocketcasts.Episode): An episode class to be updated
status (int): 0 for unplayed, 2 for playing, 3 for played. Defaults to 0.
"""
if status not in [0, 2, 3]:
raise Exception('Invalid status.')
data = {
'playing_status': status,
'podcast_uuid': podcast.uuid,
'uuid': episode.uuid
}
self._make_req("https://play.pocketcasts.com/web/episodes/update_episode_position.json", data=data)
def update_played_position(self, podcast, episode, position):
"""Update the current play duration of an episode
Args:
podcast (pocketcasts.Podcast): A podcast class
episode (pocketcasts.Episode): An episode class to be updated
position (int): A time in seconds
Returns:
bool: True if update is successful
Raises:
Exception: If update fails
"""
data = {
'playing_status': episode.playing_status,
'podcast_uuid': podcast.uuid,
'uuid': episode.uuid,
'duration': episode.duration,
'played_up_to': position
}
attempt = self._make_req("https://play.pocketcasts.com/web/episodes/update_episode_position.json",
method='JSON', data=data)
if attempt.json()['status'] != 'ok':
raise Exception('Sorry your update failed.')
return True
def subscribe_podcast(self, podcast):
"""Subscribe to a podcast
Args:
podcast (pocketcasts.Podcast): The podcast to subscribe to
"""
data = {
'uuid': podcast.uuid
}
self._make_req("https://play.pocketcasts.com/web/podcasts/subscribe.json", data=data)
def unsubscribe_podcast(self, podcast):
"""Unsubscribe from a podcast
Args:
podcast (pocketcasts.Podcast): The podcast to unsubscribe from
"""
data = {
'uuid': podcast.uuid
}
self._make_req("https://play.pocketcasts.com/web/podcasts/unsubscribe.json", data=data)
def search_podcasts(self, search_str):
"""Search for podcasts
Args:
search_str (str): The string to search for
Returns:
List[pocketcasts.podcast.Podcast]: A list of podcasts matching the search string
"""
data = {
'term': search_str
}
attempt = self._make_req("https://play.pocketcasts.com/web/podcasts/search.json", data=data)
results = []
for podcast in attempt.json()['podcasts']:
uuid = podcast.pop('uuid')
results.append(Podcast(uuid, self, **podcast))
return results
| python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/pocketcasts/episode.py | pocketcasts/episode.py | from datetime import datetime
class Episode(object):
"""Class for podcast episodes"""
class PlayingStatus(object):
"""Class to allow ease of reference to play statuses"""
Unplayed = 0
Playing = 2
Played = 3
def __init__(self, uuid, podcast, **kwargs):
"""
Args:
uuid (str): Episode UUID
podcast (pocketcasts.Podcast): Podcast for the episode
**kwargs: Other information about episode
"""
self._podcast = podcast
self._api = podcast.api
self._uuid = uuid
self._id = kwargs.get('id', '')
self._is_deleted = kwargs.get('is_deleted', '')
self._is_video = bool(kwargs.get('is_video', ''))
self._file_type = kwargs.get('file_type', '')
self._size = kwargs.get('size', '')
self._title = kwargs.get('title', '')
self._url = kwargs.get('url', '')
self._duration = kwargs.get('duration', '')
self._published_at = datetime.strptime(kwargs.get('published_at', ''), '%Y-%m-%d %H:%M:%S')
self._starred = bool(kwargs.get('starred', ''))
self._playing_status = kwargs.get('playing_status', Episode.PlayingStatus.Unplayed)
self._played_up_to = kwargs.get('played_up_to', '')
def __repr__(self):
return "%s (%r)" % (self.__class__, self.__dict__)
@property
def podcast(self):
"""Get the podcast object for the episode"""
return self._podcast
@property
def uuid(self):
"""Get the episode UUID"""
return self._uuid
@property
def id(self):
"""Get the episode ID"""
return self._id
@property
def is_deleted(self):
"""Get the is_deleted property"""
return self._is_deleted
@property
def is_video(self):
"""Get the is_video property"""
return self._is_video
@property
def file_type(self):
"""Get the file type"""
return self._file_type
@property
def size(self):
"""Get the episode size"""
return self._size
@property
def title(self):
"""Get the episode title"""
return self._title
@property
def url(self):
"""Get the episode URL"""
return self._url
@property
def duration(self):
"""Get the episode duration"""
return self._duration
@property
def published_at(self):
"""Get the episode publish time"""
return self._published_at
@property
def starred(self):
"""Get and set the starred status"""
return self._starred
@starred.setter
def starred(self, starred):
star = 1 if starred else 0
self._api.update_starred(self._podcast, self, star)
self._starred = starred
@property
def playing_status(self):
"""Get and set the playing status"""
return self._playing_status
@playing_status.setter
def playing_status(self, status):
self._api.update_playing_status(self._podcast, self, status)
if status == self.PlayingStatus.Unplayed:
self._api.update_played_position(self._podcast, self, 0)
self._playing_status = status
@property
def played_up_to(self):
"""Get and set the play duration"""
return self._played_up_to
@played_up_to.setter
def played_up_to(self, position):
self._api.update_played_position(self._podcast, self, position)
self._played_up_to = position
| python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/pocketcasts/__init__.py | pocketcasts/__init__.py | from .api import Pocketcasts
from .episode import Episode
from .podcast import Podcast | python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/pocketcasts/podcast.py | pocketcasts/podcast.py | class Podcast(object):
"""Class for podcast information and methods"""
class SortOrder(object):
"""Class to allow ease of reference to sort orders"""
NewestToOldest = 3
OldestToNewest = 2
def __init__(self, uuid, api, **kwargs):
"""
Args:
uuid (str): Podcast UUID
api (pocketcasts.Pocketcasts): The API object
**kwargs: Other information about the podcast
"""
self._api = api
self._uuid = uuid
self._id = kwargs.get('id', '')
self._title = kwargs.get('title', '')
self._author = kwargs.get('author', '')
self._description = kwargs.get('description', '')
self._url = kwargs.get('url', '')
self._episodes_sort_order = kwargs.get('episodes_sort_order', Podcast.SortOrder.NewestToOldest)
self._language = kwargs.get('language', '')
self._categories = str(kwargs.get('category', '')).split('\n')
self._thumbnail_url_src = kwargs.get('thumbnail_url', '')
self._thumbnail_url_small = "http://static.pocketcasts.com/discover/images/130/{}.jpg".format(uuid)
self._thumbnail_url_medium = "http://static.pocketcasts.com/discover/images/200/{}.jpg".format(uuid)
self._thumbnail_url_large = "http://static.pocketcasts.com/discover/images/280/{}.jpg".format(uuid)
self._media_type = kwargs.get('media_type', '')
def __repr__(self):
return "%s (%r)" % (self.__class__, self.__dict__)
@property
def api(self):
"""Get the API object"""
return self._api
@property
def uuid(self):
"""Get the podcast UUID"""
return self._uuid
@property
def id(self):
"""Get the podcast ID"""
return self._id
@property
def title(self):
"""Get the podcast title"""
return self._title
@property
def author(self):
"""Get the podcast author"""
return self._author
@property
def description(self):
"""Get the podcast description"""
return self._description
@property
def url(self):
"""Get the podcast URL"""
return self._url
@property
def sort_order(self):
"""Get the podcast sort order"""
return self._episodes_sort_order
@property
def language(self):
"""Get the podcast language"""
return self._language
@property
def categories(self):
"""Get the podcast categories"""
return self._categories
@property
def thumbnail_url_src(self):
"""Get the source podcast image"""
return self._thumbnail_url_src
@property
def thumbnail_url_small(self):
"""Get the small podcast image (130x130)"""
return self._thumbnail_url_small
@property
def thumbnail_url_medium(self):
"""Get the medium podcast image (200x200)"""
return self._thumbnail_url_medium
@property
def thumbnail_url_large(self):
"""Get the large podcast image (280x280)"""
return self._thumbnail_url_large
@property
def subscribed(self):
"""Get and set the subscribed status of the podcast"""
podcasts = self._api.get_subscribed_podcasts()
for x in podcasts:
if x.uuid == self.uuid:
return True
return False
@subscribed.setter
def subscribed(self, status):
if status:
self._api.subscribe_podcast(self)
else:
self._api.unsubscribe_podcast(self)
| python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
furgoose/Pocket-Casts | https://github.com/furgoose/Pocket-Casts/blob/54db3425a99a412a1fd3784c8e0247cd44356f3a/tests/test_api.py | tests/test_api.py | import os
import unittest
import pocketcasts
USERNAME = os.environ.get('POCKETCAST_USER')
PASSWORD = os.environ.get('POCKETCAST_PASSWORD')
class PocketcastTest(unittest.TestCase):
pocket = pocketcasts.Pocketcasts(USERNAME, PASSWORD)
def test_invalid_method(self):
self.assertRaises(Exception, self.pocket._make_req, 'test', method='INVALID')
def test_invalid_login(self):
self.assertRaises(Exception, pocketcasts.Pocketcasts, 'test', 'INVALID')
def test_get_top_charts(self):
response = self.pocket.get_top_charts()
def test_get_featured(self):
response = self.pocket.get_featured()
def test_get_trending(self):
response = self.pocket.get_trending()
def test_get_podcast(self):
response = self.pocket.get_podcast('12012c20-0423-012e-f9a0-00163e1b201c')
def test_get_podcast_episodes(self):
response = self.pocket.get_podcast_episodes(self.pocket.get_trending()[0])
def test_get_episode(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
self.pocket.get_episode(pod, "7b28c700-d4f1-0134-ebdd-4114446340cb")
def test_get_starred(self):
self.pocket.get_starred()
def test_search_podcasts(self):
self.pocket.search_podcasts('test')
def test_subscribe_functions(self):
pod = self.pocket.get_podcast("da9bb800-e230-0132-0bd1-059c869cc4eb")
pod.subscribed = True
pod.subscribed = False
def test_get_episode_notes(self):
response = self.pocket.get_episode_notes('a35748e0-bb4d-0134-10a8-25324e2a541d')
def test_get_subscribed_podcasts(self):
response = self.pocket.get_subscribed_podcasts()
def test_get_new_releases(self):
response = self.pocket.get_new_releases()
def test_get_in_progress(self):
response = self.pocket.get_in_progress()
def test_update_playing_status(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
epi = self.pocket.get_podcast_episodes(pod)[-1]
epi.playing_status = 3
def test_invalid_update_playing_status(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
epi = self.pocket.get_podcast_episodes(pod)[-1]
with self.assertRaises(Exception) as context:
epi.playing_status = 'invalid'
self.assertTrue('Sorry your update failed.' in context.exception)
def test_update_played_position(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
epi = self.pocket.get_podcast_episodes(pod)[-1]
epi.played_up_to = 2
def test_invalid_played_position(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
epi = self.pocket.get_podcast_episodes(pod)[-1]
with self.assertRaises(Exception) as context:
epi.played_up_to = 'invalid'
self.assertTrue('Sorry your update failed.' in context.exception)
def test_update_starred(self):
pod = self.pocket.get_podcast("12012c20-0423-012e-f9a0-00163e1b201c")
epi = self.pocket.get_podcast_episodes(pod)[-1]
epi.starred = True
epi.starred = False
if __name__ == '__main__':
unittest.main()
| python | MIT | 54db3425a99a412a1fd3784c8e0247cd44356f3a | 2026-01-05T07:14:47.067072Z | false |
cqparts/cqparts | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_fasteners/params.py | src/cqparts_fasteners/params.py | import six
from cqparts.params import Parameter, ParametricObject
# Types of things... not parts on their own, but utilised in many
from .solidtypes import fastener_heads
from .solidtypes import screw_drives
from .solidtypes import threads
# --------- Custom Parameter types ---------
class FastenerComponentParam(Parameter):
"""
Custom fastener component as a parameter.
(not to be confused with a *Part*)
"""
name = None
finder_callback = None
component_base = None
def type(self, value):
if isinstance(value, self.component_base):
# component's instance explicitly provided.
return value
elif isinstance(value, (tuple, list)):
# split value
if len(value) != 2:
raise ParameterError("given tuple must have 2 elements ({name}_type, dict(params)): {val!r}".format(
name=self.name, val=value
))
(component_type, params) = value
# Get component's class (or raise an exception trying)
component_class = None
if type(component_type) is type: # given value is a class
if issubclass(component_type, self.component_base):
# component class is explicitly defined
component_class = component_type
else:
raise ParameterError(
"given {name} type class {cls!r} does not inherit from {base!r}".format(
name=self.name, cls=type(component_type), base=self.component_base
)
)
elif isinstance(component_type, six.string_types):
# name of component type given, use callback to find it
try:
component_class = self.finder_callback(name=component_type)
except ValueError as e:
raise ParameterError(
("{name} type of '{type}' cannot be found. ".format(name=self.name, type=component_type)) +
"is it spelt correctly (case sensitive)?, has the library defining it been imported?"
)
else:
raise ParameterError(
"{name} type {val!r} must be a str, or a class inheriting from {base!r}".format(
name=self.name, val=component_type, base=self.component_base
)
)
# Verify parameters (very loosely)
if not isinstance(params, dict):
raise ParameterError("parameters must be a dict: {!r}".format(params))
# Create instance & return it
return component_class(**params)
# Serialize / Deserialize
@classmethod
def serialize(cls, value):
if value is None:
return value
return value.serialize() # divert to ParametricObject's serialize()
@classmethod
def deserialize(cls, value):
if value is None:
return value
return ParametricObject.deserialize(value)
class HeadType(FastenerComponentParam):
name = 'head'
finder_callback = staticmethod(fastener_heads.find)
component_base = fastener_heads.FastenerHead
_doc_type = "``value`` for :meth:`HeadType.type <cqparts_fasteners.params.HeadType.type>`"
def type(self, value):
"""
:param value: defined type of male fastener head
:type value: see below
``value`` can be any of:
- :class:`FastenerHead <cqparts.solidtypes.fastener_heads.FastenerHead>` instance
- :class:`tuple` of (``head type``, ``parameters``) where:
- ``head type`` is one of
- :class:`str` name of fastener head (registered with :meth:`register <cqparts.solidtypes.fastener_heads.register>`)
- :class:`FastenerHead <cqparts.solidtypes.fastener_heads.FastenerHead>` subclass
- ``parameters`` is a :class:`dict`
"""
return super(HeadType, self).type(value)
class DriveType(FastenerComponentParam):
name = 'drive'
finder_callback = staticmethod(screw_drives.find)
component_base = screw_drives.ScrewDrive
_doc_type = "``value`` for :meth:`DriveType.type <cqparts_fasteners.params.DriveType.type>`"
def type(self, value):
"""
:param value: defined type of screw-drive
:type value: see below
``value`` can be any of:
- :class:`ScrewDrive <cqparts.solidtypes.screw_drives.ScrewDrive>` instance
- :class:`tuple` of (``drive type``, ``parameters``) where
- ``drive type`` is one of
- ``str``: name of screw-drive (registered with :meth:`register <cqparts.solidtypes.screw_drives.register>`)
- :class:`ScrewDrive <cqparts.solidtypes.screw_drives.ScrewDrive>` subclass
- ``parameters`` is a :class:`dict`
"""
return super(DriveType, self).type(value)
class ThreadType(FastenerComponentParam):
name = 'thread'
finder_callback = staticmethod(threads.find)
component_base = threads.Thread
_doc_type = "``value`` for :meth:`ThreadType.type <cqparts_fasteners.params.ThreadType.type>`"
def type(self, value):
"""
:param value: defined type of thread
:type value: see below
``value`` can be any of:
- :class:`Thread <cqparts.solidtypes.threads.Thread>` instance
- :class:`tuple` of (``thread type``, ``parameters``) where:
- ``thread type`` is one of
- ``str``: name of thread type (registered with :meth:`register <cqparts.solidtypes.threads.register>`)
- :class:`Thread <cqparts.solidtypes.threads.Thread>` subclass
- ``parameters`` is a :class:`dict`
"""
return super(ThreadType, self).type(value)
| python | Apache-2.0 | 018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53 | 2026-01-05T07:14:41.025281Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.