code
stringlengths
17
6.64M
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): '\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or chan...
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): '\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alp...
class ModelMeanType(enum.Enum): '\n Which type of output the model predicts.\n ' PREVIOUS_X = enum.auto() START_X = enum.auto() EPSILON = enum.auto()
class ModelVarType(enum.Enum): "\n What is used as the model's output variance.\n\n The LEARNED_RANGE option has been added to allow the model to predict\n values between FIXED_SMALL and FIXED_LARGE, making its job easier.\n " LEARNED = enum.auto() FIXED_SMALL = enum.auto() FIXED_LARGE = enum.auto...
class LossType(enum.Enum): MSE = enum.auto() RESCALED_MSE = enum.auto() KL = enum.auto() RESCALED_KL = enum.auto() def is_vb(self): return ((self == LossType.KL) or (self == LossType.RESCALED_KL))
class GaussianDiffusion(): '\n Utilities for training and sampling diffusion models.\n\n Ported directly from here, and adapted over time to further experimentation.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n\n :param betas...
def normal_kl(mean1, logvar1, mean2, logvar2): '\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n ' tensor = None for obj in (mean1, logvar1, mean2, logvar2): if isinstance(obj, np.ndarr...
def approx_standard_normal_cdf(x): '\n A fast approximation of the cumulative distribution function of the\n standard normal.\n ' return (0.5 * (1.0 + np.tanh((np.sqrt((2.0 / np.pi)) * (x + (0.044715 * np.power(x, 3)))))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales): '\n Compute the log-likelihood of a Gaussian distribution discretizing to a\n given image.\n :param x: the target images. It is assumed that this was uint8 values,\n rescaled to the range [-1, 1].\n :param means: the Gaussian mean Te...
def _extract_into_tensor(arr, timesteps, broadcast_shape): '\n Extract values from a 1-D numpy array for a batch of indices.\n\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n ...
class NoiseScheduleVP(): def __init__(self, schedule='discrete', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20.0): "Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise lin...
def model_wrapper(model, noise_schedule, model_type='noise', model_kwargs={}, guidance_type='uncond', condition=None, unconditional_condition=None, guidance_scale=1.0, classifier_fn=None, classifier_kwargs={}): 'Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continu...
class DPM_Solver(): def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.0): 'Construct a DPM-Solver.\n\n We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").\n If `predict_x0` is False, we...
def interpolate_fn(x, xp, yp): '\n A piecewise linear function y = f(x), using xp and yp as keypoints.\n We implement f(x) in a differentiable way (i.e. applicable for autograd).\n The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define ...
def expand_dims(v, dims): '\n Expand the tensor `v` to the dim `dims`.\n\n Args:\n `v`: a jnp.DeviceArray with shape [N].\n `dim`: a `int`.\n Returns:\n a jnp.DeviceArray with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.\n ' return v[((...,) + ((None,) * (dims - ...
def to_sparse_list(l): sparse_l = [] num = 0 last_item = None for item in l: if (last_item is None): last_item = item num = 1 elif (item != last_item): sparse_l.append((last_item, num)) last_item = item num = 1 else: ...
class SamplerPolicy(object): def __init__(self, policy, qf=None, mean=0, std=1, ensemble=False, act_method='ddpm'): self.policy = policy self.qf = qf self.mean = mean self.std = std self.num_samples = 50 self.act_method = act_method def update_params(self, par...
class DiffusionTrainer(): def __init__(self): self._cfgs = absl.flags.FLAGS self._algo = DiffusionQL self._algo_type = 'DiffusionQL' self._cfgs.algo_cfg.max_grad_norm = hyperparameters[self._cfgs.env]['gn'] self._cfgs.algo_cfg.lr_decay_steps = (self._cfgs.n_epochs * self._...
def to_arch(string): return tuple((int(x) for x in string.split('-')))
def _build_rodent_escape_env(): 'Build environment where a rodent escapes from a bowl.' walker = walkers.Rat(observable_options={'egocentric_camera': dict(enabled=True)}) arena = arenas.bowl.Bowl(size=(20.0, 20.0), aesthetic='outdoor_natural') locomotion_task = tasks.escape.Escape(walker=walker, arena...
def _build_rodent_maze_env(): 'Build environment where a rodent runs to targets.' walker = walkers.Rat(observable_options={'egocentric_camera': dict(enabled=True)}) wall_textures = arenas.labmaze_textures.WallTextures(style='style_01') arena = arenas.mazes.RandomMazeWithTargets(x_cells=11, y_cells=11,...
def _build_rodent_corridor_gaps(): 'Build environment where a rodent runs over gaps.' walker = walkers.Rat(observable_options={'egocentric_camera': dict(enabled=True)}) platform_length = distributions.Uniform(low=0.4, high=0.8) gap_length = distributions.Uniform(low=0.05, high=0.2) arena = arenas....
def _build_rodent_two_touch_env(): 'Build environment where a rodent touches targets.' walker = walkers.Rat(observable_options={'egocentric_camera': dict(enabled=True)}) arena_floor = arenas.floors.Floor(size=(10.0, 10.0), aesthetic='outdoor_natural') task_reach = tasks.reach.TwoTouch(walker=walker, a...
def _build_humanoid_walls_env(): 'Build humanoid walker walls environment.' walker = walkers.CMUHumanoidPositionControlled(name='walker', observable_options={'egocentric_camera': dict(enabled=True)}) wall_width = distributions.Uniform(low=1, high=7) wall_height = distributions.Uniform(low=2.5, high=4....
def _build_humanoid_corridor_env(): 'Build humanoid walker walls environment.' walker = walkers.CMUHumanoidPositionControlled(name='walker', observable_options={'egocentric_camera': dict(enabled=True)}) arena = arenas.EmptyCorridor(corridor_width=10, corridor_length=100) humanoid_task = tasks.RunThrou...
def _build_humanoid_corridor_gaps(): 'Build humanoid walker walls environment.' walker = walkers.CMUHumanoidPositionControlled(name='walker', observable_options={'egocentric_camera': dict(enabled=True)}) platform_length = distributions.Uniform(low=0.3, high=2.5) gap_length = distributions.Uniform(low=...
class MujocoActionNormalizer(wrappers.EnvironmentWrapper): 'Rescale actions to [-1, 1] range for mujoco physics engine.\n\n For control environments whose actions have bounded range in [-1, 1], this\n adaptor rescale actions to the desired range. This allows actor network to\n output unscaled actions for b...
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper): 'Turn each dimension of the actions into the range of [-1, 1].' def __init__(self, environment): super().__init__(environment) action_spec = environment.action_spec() self._scale = (action_spec.maximum - action_spec.minimu...
class FilterObservationsWrapper(wrappers.EnvironmentWrapper): 'Filter out all the observations not specified to this wrapper.' def __init__(self, environment, observations_to_keep): super().__init__(environment) self._observations_to_keep = observations_to_keep spec = self._environmen...
class ControlSuite(): 'Create bits needed to run agents on an Control Suite dataset.' def __init__(self, task_name='humanoid_run'): 'Initializes datasets/environments for the Deepmind Control suite.\n\n Args:\n task_name: take name. Must be one of,\n finger_turn_hard, manipulator_inser...
class CmuThirdParty(): 'Create bits needed to run agents on an locomotion humanoid dataset.' def __init__(self, task_name='humanoid_walls'): self._task_name = task_name self._pixel_keys = self.get_pixel_keys() self._uint8_features = set(['observation/walker/egocentric_camera']) ...
class Rodent(): 'Create bits needed to run agents on an Rodent dataset.' def __init__(self, task_name='rodent_gaps'): self._task_name = task_name self._pixel_keys = self.get_pixel_keys() self._uint8_features = set(['observation/walker/egocentric_camera']) self._proprio_keys = ...
def _parse_seq_tf_example(example, uint8_features, shapes): 'Parse tf.Example containing one or two episode steps.' def to_feature(key, shape): if (key in uint8_features): return tf.io.FixedLenSequenceFeature(shape=[], dtype=tf.string, allow_missing=True) else: return ...
def _build_sequence_example(sequences): 'Convert raw sequences into a Reverb sequence sample.' data = adders.Step(observation=sequences['observation'], action=sequences['action'], reward=sequences['reward'], discount=sequences['discount'], start_of_episode=(), extras=()) info = reverb.SampleInfo(key=tf.co...
def _build_sarsa_example(sequences): 'Convert raw sequences into a Reverb n-step SARSA sample.' o_tm1 = tree.map_structure((lambda t: t[0]), sequences['observation']) o_t = tree.map_structure((lambda t: t[1]), sequences['observation']) a_tm1 = tree.map_structure((lambda t: t[0]), sequences['action']) ...
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False): 'Batch data while handling unequal lengths.' padded_shapes = {} padded_shapes['observation'] = {} for (k, v) in shapes.items(): if ('observation' in k): padded_shapes['observation'][k.replace('observation/', '...
def dataset(root_path: str, data_path: str, shapes: Dict[(str, Tuple[int])], num_threads: int, batch_size: int, uint8_features: Optional[Set[str]]=None, num_shards: int=100, shuffle_buffer_size: int=100000, sarsa: bool=True) -> tf.data.Dataset: 'Create tf dataset for training.' uint8_features = (uint8_feature...
def get_version() -> str: init = open(os.path.join('diffusion', '__init__.py'), 'r').read().split() return init[(init.index('__version__') + 2)][1:(- 1)]
class JaxRNG(object): def __init__(self, seed): self.rng = jax.random.PRNGKey(seed) def __call__(self): (self.rng, next_rng) = jax.random.split(self.rng) return next_rng
def init_rng(seed): global jax_utils_rng jax_utils_rng = JaxRNG(seed)
def next_rng(): global jax_utils_rng return jax_utils_rng()
def extend_and_repeat(tensor, axis, repeat): return jnp.repeat(jnp.expand_dims(tensor, axis), repeat, axis=axis)
def mse_loss(val, target): return jnp.mean(jnp.square((val - target)))
def value_and_multi_grad(fun, n_outputs, argnums=0, has_aux=False): def select_output(index): def wrapped(*args, **kwargs): if has_aux: (x, *aux) = fun(*args, **kwargs) return (x[index], *aux) else: x = fun(*args, **kwargs) ...
@jax.jit def batch_to_jax(batch): return jax.tree_map(jax.device_put, batch)
class StepSampler(object): def __init__(self, env, max_traj_length=1000): self.max_traj_length = max_traj_length self._env = env self._traj_steps = 0 self._current_observation = self.env.reset() def sample(self, policy, n_steps, deterministic=False, replay_buffer=None): ...
class TrajSampler(object): def __init__(self, env, max_traj_length=1000, render=False): self.max_traj_length = max_traj_length self._env = env self._render = render def norm_obs(self, obs, obs_statistics): (obs_mean, obs_std, obs_clip) = obs_statistics return np.clip(...
def split_into_trajectories(observations, actions, rewards, masks, dones_float, next_observations): trajs = [[]] for i in tqdm(range(len(observations))): trajs[(- 1)].append((observations[i], actions[i], rewards[i], masks[i], dones_float[i], next_observations[i])) if ((dones_float[i] == 1.0) a...
class Dataset(object): def __init__(self, observations: np.ndarray, actions: np.ndarray, rewards: np.ndarray, masks: np.ndarray, dones_float: np.ndarray, next_observations: np.ndarray, size: int): self.observations = observations self.actions = actions self.rewards = rewards self....
class D4RLDataset(Dataset): def __init__(self, env: gym.Env, clip_to_eps: bool=True, eps: float=1e-05): self.raw_dataset = dataset = d4rl.qlearning_dataset(env) if clip_to_eps: lim = (1 - eps) dataset['actions'] = np.clip(dataset['actions'], (- lim), lim) dones_flo...
def compute_returns(traj): episode_return = 0 for (_, _, rew, _, _, _) in traj: episode_return += rew return episode_return
def get_traj_dataset(env, sorting=True, norm_reward=False): env = (gym.make(env) if isinstance(env, str) else env) dataset = D4RLDataset(env) trajs = split_into_trajectories(dataset.observations, dataset.actions, dataset.rewards, dataset.masks, dataset.dones_float, dataset.next_observations) if sortin...
def nstep_reward_prefix(rewards, nstep=5, gamma=0.9): gammas = np.array([(gamma ** i) for i in range(nstep)]) nstep_rewards = np.convolve(rewards, gammas)[(nstep - 1):] return nstep_rewards
def get_nstep_dataset(env, nstep=5, gamma=0.9, sorting=True, norm_reward=False): gammas = np.array([(gamma ** i) for i in range(nstep)]) (trajs, raw_dataset) = get_traj_dataset(env, sorting, norm_reward) (obss, acts, terms, next_obss, nstep_rews, dones_float) = ([], [], [], [], [], []) for traj in tra...
def norm_obs(ds, mean, std, clip_val): ds['observations'] = ((ds['observations'] - mean) / (std + 1e-06)) ds['next_observations'] = ((ds['next_observations'] - mean) / (std + 1e-06)) ds['observations'] = np.clip(ds['observations'], (- clip_val), clip_val) ds['next_observations'] = np.clip(ds['next_obs...
class Timer(object): def __init__(self): self._time = None def __enter__(self): self._start_time = time.time() return self def __exit__(self, exc_type, exc_value, exc_tb): self._time = (time.time() - self._start_time) def __call__(self): return self._time
class WandBLogger(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.team = 'jax_offrl' config.online = False config.prefix = '' config.project = 'OfflineRL' config.output_dir = '/tmp/diffusion_rl' config.random_de...
def define_flags_with_default(**kwargs): for (key, val) in kwargs.items(): if isinstance(val, ConfigDict): config_flags.DEFINE_config_dict(key, val) elif isinstance(val, bool): absl.flags.DEFINE_bool(key, val, 'automatically defined flag') elif isinstance(val, int):...
def set_random_seed(seed): np.random.seed(seed) random.seed(seed) init_rng(seed)
def print_flags(flags, flags_def): logging.info('Running training with hyperparameters: \n{}'.format(pprint.pformat(['{}: {}'.format(key, val) for (key, val) in get_user_flags(flags, flags_def).items()])))
def get_user_flags(flags, flags_def): output = {} for key in flags_def: val = getattr(flags, key) if isinstance(val, ConfigDict): output.update(flatten_config_dict(val, prefix=key)) else: output[key] = val return output
def flatten_config_dict(config, prefix=None): output = {} for (key, val) in config.items(): if isinstance(val, ConfigDict): output.update(flatten_config_dict(val, prefix=key)) elif (prefix is not None): output['{}.{}'.format(prefix, key)] = val else: ...
def prefix_metrics(metrics, prefix): return {'{}/{}'.format(prefix, key): value for (key, value) in metrics.items()}
class TerminalTablePrinter(object): def __init__(self): self.headers = None self.tabulars = [] def print_tabular(self, new_tabular): if (self.headers is None): self.headers = [x[0] for x in new_tabular] else: assert (len(self.headers) == len(new_tabula...
class MyEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, type): return {'$class': ((o.__module__ + '.') + o.__name__)} elif isinstance(o, Enum): return {'$enum': ((((o.__module__ + '.') + o.__class__.__name__) + '.') + o.name)} elif callable(o): ...
def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if ((exc.errno == errno.EEXIST) and os.path.isdir(path)): pass else: raise
class Logger(object): def __init__(self): self._prefixes = [] self._prefix_str = '' self._tabular_prefixes = [] self._tabular_prefix_str = '' self._tabular = [] self._text_outputs = [] self._tabular_outputs = [] self._text_fds = {} self._tab...
def safe_json(data): if (data is None): return True elif isinstance(data, (bool, int, float)): return True elif isinstance(data, (tuple, list)): return all((safe_json(x) for x in data)) elif isinstance(data, dict): return all(((isinstance(k, str) and safe_json(v)) for (...
def dict_to_safe_json(d): "\n Convert each value in the dictionary into a JSON'able primitive.\n :param d:\n :return:\n " new_d = {} for (key, item) in d.items(): if safe_json(item): new_d[key] = item elif isinstance(item, dict): new_d[key] = dict_to_safe_json(i...
def create_exp_name(exp_prefix, exp_id=0, seed=0): '\n Create a semi-unique experiment name that has a timestamp\n :param exp_prefix:\n :param exp_id:\n :return:\n ' now = datetime.datetime.now(dateutil.tz.tzlocal()) timestamp = now.strftime('%Y_%m_%d_%H_%M_%S') return ('%s_%s-s-%d--%s' % (exp_pr...
def create_log_dir(exp_prefix, exp_id=0, seed=0, base_log_dir=None, include_exp_prefix_sub_dir=True): '\n Creates and returns a unique log directory.\n\n :param exp_prefix: All experiments with this prefix will have log\n directories be under this directory.\n :param exp_id: The number of the specific experim...
def setup_logger(exp_prefix='default', variant=None, text_log_file='debug.log', variant_log_file='variant.json', tabular_log_file='progress.csv', snapshot_mode='last', snapshot_gap=1, log_tabular_only=False, base_log_dir=None, **create_log_dir_kwargs): '\n Set up logger to have some reasonable default settings.\...
def _pipe_segment_with_colons(align, colwidth): "Return a segment of a horizontal line with optional colons which\n indicate column's alignment (as in `pipe` output format)." w = colwidth if (align in ['right', 'decimal']): return (('-' * (w - 1)) + ':') elif (align == 'center'): retu...
def _pipe_line_with_colons(colwidths, colaligns): "Return a horizontal line with optional colons to indicate column's\n alignment (as in `pipe` output format)." segments = [_pipe_segment_with_colons(a, w) for (a, w) in zip(colaligns, colwidths)] return (('|' + '|'.join(segments)) + '|')
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): alignment = {'left': '', 'right': 'align="right"| ', 'center': 'align="center"| ', 'decimal': 'align="right"| '} values_with_attrs = [(((' ' + alignment.get(a, '')) + c) + ' ') for (c, a) in zip(cell_values, colaligns)] colsep = ...
def _latex_line_begin_tabular(colwidths, colaligns): alignment = {'left': 'l', 'right': 'r', 'center': 'c', 'decimal': 'r'} tabular_columns_fmt = ''.join([alignment.get(a, 'l') for a in colaligns]) return (('\\begin{tabular}{' + tabular_columns_fmt) + '}\n\\hline')
def simple_separated_format(separator): 'Construct a simple TableFormat with columns separated by a separator.\n\n >>> tsv = simple_separated_format("\\t") ; tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == \'foo \\t 1\\nspam\\t23\'\n True\n\n ' return TableFormat(None, None, None, None, headerrow...
def _isconvertible(conv, string): try: n = conv(string) return True except ValueError: return False
def _isnumber(string): '\n >>> _isnumber("123.45")\n True\n >>> _isnumber("123")\n True\n >>> _isnumber("spam")\n False\n ' return _isconvertible(float, string)
def _isint(string): '\n >>> _isint("123")\n True\n >>> _isint("123.45")\n False\n ' return ((type(string) is int) or ((isinstance(string, _binary_type) or isinstance(string, _text_type)) and _isconvertible(int, string)))
def _type(string, has_invisible=True): 'The least generic type (type(None), int, float, str, unicode).\n\n >>> _type(None) is type(None)\n True\n >>> _type("foo") is type("")\n True\n >>> _type("1") is type(1)\n True\n >>> _type(\'\x1b[31m42\x1b[0m\') is type(42)\n True\n >>> _type(\'\x1b[31m42\x1b[0m\')...
def _afterpoint(string): 'Symbols after a decimal point, -1 if the string lacks the decimal point.\n\n >>> _afterpoint("123.45")\n 2\n >>> _afterpoint("1001")\n -1\n >>> _afterpoint("eggs")\n -1\n >>> _afterpoint("123e45")\n 2\n\n ' if _isnumber(string): if _isint(string): return ...
def _padleft(width, s, has_invisible=True): "Flush right.\n\n >>> _padleft(6, 'яйца') == ' яйца'\n True\n\n " iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width) fmt = ('{0:>%ds}' % iwidth) return fmt.format(s)
def _padright(width, s, has_invisible=True): "Flush left.\n\n >>> _padright(6, 'яйца') == 'яйца '\n True\n\n " iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width) fmt = ('{0:<%ds}' % iwidth) return fmt.format(s)
def _padboth(width, s, has_invisible=True): "Center string.\n\n >>> _padboth(6, 'яйца') == ' яйца '\n True\n\n " iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width) fmt = ('{0:^%ds}' % iwidth) return fmt.format(s)
def _strip_invisible(s): 'Remove invisible ANSI color codes.' if isinstance(s, _text_type): return re.sub(_invisible_codes, '', s) else: return re.sub(_invisible_codes_bytes, '', s)
def _visible_width(s): 'Visible width of a printed string. ANSI color codes are removed.\n\n >>> _visible_width(\'\x1b[31mhello\x1b[0m\'), _visible_width("world")\n (5, 5)\n\n ' if (isinstance(s, _text_type) or isinstance(s, _binary_type)): return len(_strip_invisible(s)) else: return l...
def _align_column(strings, alignment, minwidth=0, has_invisible=True): '[string] -> [padded_string]\n\n >>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))\n [\' 12.345 \', \'-1234.5 \', \' 1.23 \', \' 1234.5 \', \' 1e+234 \', \' 1.0e234\...
def _more_generic(type1, type2): types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4} invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type} moregeneric = max(types.get(type1, 4), types.get(type2, 4)) return invtypes[moregeneric]
def _column_type(strings, has_invisible=True): 'The least generic type all column values are convertible to.\n\n >>> _column_type(["1", "2"]) is _int_type\n True\n >>> _column_type(["1", "2.3"]) is _float_type\n True\n >>> _column_type(["1", "2.3", "four"]) is _text_type\n True\n >>> _column_type(["four", ...
def _format(val, valtype, floatfmt, missingval=''): "Format a value accoding to its type.\n\n Unicode is supported:\n\n >>> hrow = ['буква', 'цифра'] ; tbl = [['аз', 2], ['буки', 4]] ; good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u043...
def _align_header(header, alignment, width): if (alignment == 'left'): return _padright(width, header) elif (alignment == 'center'): return _padboth(width, header) elif (not alignment): return '{0}'.format(header) else: return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers): 'Transform a supported data type to a list of lists, and a list of headers.\n\n Supported tabular data types:\n\n * list-of-lists or another iterable of iterables\n\n * list of named tuples (usually used with headers="keys")\n\n * 2D NumPy arrays\n\n * NumP...
def tabulate(tabular_data, headers=[], tablefmt='simple', floatfmt='g', numalign='decimal', stralign='left', missingval=''): 'Format a fixed width table for pretty printing.\n\n >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))\n --- ---------\n 1 2.34\n -56 8.999\n 2 10001\n ...
def _build_simple_row(padded_cells, rowfmt): 'Format row according to DataRow format without padding.' (begin, sep, end) = rowfmt return ((begin + sep.join(padded_cells)) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt): 'Return a string which represents a row of data cells.' if (not rowfmt): return None if hasattr(rowfmt, '__call__'): return rowfmt(padded_cells, colwidths, colaligns) else: return _build_simple_row(padded_cells, rowfmt...
def _build_line(colwidths, colaligns, linefmt): 'Return a string which represents a horizontal line.' if (not linefmt): return None if hasattr(linefmt, '__call__'): return linefmt(colwidths, colaligns) else: (begin, fill, sep, end) = linefmt cells = [(fill * w) for w in...
def _pad_row(cells, padding): if cells: pad = (' ' * padding) padded_cells = [((pad + cell) + pad) for cell in cells] return padded_cells else: return cells
def _format_table(fmt, headers, rows, colwidths, colaligns): 'Produce a plain-text representation of the table.' lines = [] hidden = (fmt.with_header_hide if (headers and fmt.with_header_hide) else []) pad = fmt.padding headerrow = fmt.headerrow padded_widths = [(w + (2 * pad)) for w in colwid...
def download_flan(): dataset = load_dataset('conceptofmind/FLAN_2022', split='train') dataset = dataset.filter((lambda example: (example['template_type'] == 'zs_noopt')), num_proc=32) task_names = dataset.unique('task_name') for task_name in task_names: print('Processing task: ', task_name) ...