code stringlengths 17 6.64M |
|---|
def param_grad_or_zeros(param):
if (param.grad is not None):
return param.grad.data.detach()
else:
return th.zeros_like(param)
|
class MixedPrecisionTrainer():
def __init__(self, *, model, use_fp16=False, fp16_scale_growth=0.001, initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(self.model.named_parameters())
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean('lg_loss_scale', self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
(grad_norm, param_norm) = self._compute_norms(grad_scale=(2 ** self.lg_loss_scale))
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
zero_master_grads(self.master_params)
return False
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
for p in self.master_params:
p.grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
(grad_norm, param_norm) = self._compute_norms()
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += (th.norm(p, p=2, dtype=th.float32).item() ** 2)
if (p.grad is not None):
grad_norm += (th.norm(p.grad, p=2, dtype=th.float32).item() ** 2)
return ((np.sqrt(grad_norm) / grad_scale), np.sqrt(param_norm))
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(self.model, self.param_groups_and_shapes, master_params, self.use_fp16)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
|
def check_overflow(value):
return ((value == float('inf')) or (value == (- float('inf'))) or (value != value))
|
def mean_flat(tensor):
'\n Take the mean over all non-batch dimensions.\n '
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
class ModelMeanType(enum.Enum):
'\n Which type of output the model predicts.\n '
PREVIOUS_X = enum.auto()
START_X = enum.auto()
EPSILON = enum.auto()
|
class ModelVarType(enum.Enum):
"\n What is used as the model's output variance.\n The LEARNED_RANGE option has been added to allow the model to predict\n values between FIXED_SMALL and FIXED_LARGE, making its job easier.\n "
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
|
class LossType(enum.Enum):
MSE = enum.auto()
RESCALED_MSE = enum.auto()
KL = enum.auto()
RESCALED_KL = enum.auto()
def is_vb(self):
return ((self == LossType.KL) or (self == LossType.RESCALED_KL))
|
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = (beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64))
warmup_time = int((num_diffusion_timesteps * warmup_frac))
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
|
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
'\n This is the deprecated API for creating beta schedules.\n See get_named_beta_schedule() for the new library of schedules.\n '
if (beta_schedule == 'quad'):
betas = (np.linspace((beta_start ** 0.5), (beta_end ** 0.5), num_diffusion_timesteps, dtype=np.float64) ** 2)
elif (beta_schedule == 'linear'):
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif (beta_schedule == 'warmup10'):
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif (beta_schedule == 'warmup50'):
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif (beta_schedule == 'const'):
betas = (beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64))
elif (beta_schedule == 'jsd'):
betas = (1.0 / np.linspace(num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64))
else:
raise NotImplementedError(beta_schedule)
assert (betas.shape == (num_diffusion_timesteps,))
return betas
|
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
'\n Get a pre-defined beta schedule for the given name.\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n '
if (schedule_name == 'linear'):
scale = (1000 / num_diffusion_timesteps)
return get_beta_schedule('linear', beta_start=(scale * 0.0001), beta_end=(scale * 0.02), num_diffusion_timesteps=num_diffusion_timesteps)
elif (schedule_name == 'squaredcos_cap_v2'):
return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2)))
else:
raise NotImplementedError(f'unknown beta schedule: {schedule_name}')
|
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
'\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alpha_bar: a lambda that takes an argument t from 0 to 1 and\n produces the cumulative product of (1-beta) up to that\n part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n '
betas = []
for i in range(num_diffusion_timesteps):
t1 = (i / num_diffusion_timesteps)
t2 = ((i + 1) / num_diffusion_timesteps)
betas.append(min((1 - (alpha_bar(t2) / alpha_bar(t1))), max_beta))
return np.array(betas)
|
class GaussianDiffusion():
'\n Utilities for training and sampling diffusion models.\n Original ported from this codebase:\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n :param betas: a 1-D numpy array of betas for each diffusion timestep,\n starting at T and going to 1.\n '
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = (np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) if (len(self.posterior_variance) > 1) else np.array([]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
def q_mean_variance(self, x_start, t):
"\n Get the distribution q(x_t | x_0).\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n "
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
'\n Diffuse the data for a given number of diffusion steps.\n In other words, sample from q(x_t | x_0).\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n '
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
'\n Compute the mean and variance of the diffusion posterior:\n q(x_{t-1} | x_t, x_0)\n '
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n "
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
(model_output, extra) = model_output
else:
extra = None
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart, 'extra': extra}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
'\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n '
gradient = cond_fn(x, t, **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n See condition_mean() for details on cond_fn.\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n "
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
"\n Sample x_{t-1} from the model at the given timestep.\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n "
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
"\n Generate samples from the model.\n :param model: the model module.\n :param shape: the shape of the samples, (N, C, H, W).\n :param noise: if specified, the noise from the encoder to sample.\n Should be of the same shape as `shape`.\n :param clip_denoised: if True, clip x_start predictions to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param device: if specified, the device to create the samples on.\n If not specified, use a model parameter's device.\n :param progress: if True, show a tqdm progress bar.\n :return: a non-differentiable batch of samples.\n "
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
'\n Generate samples from the model and yield intermediate samples from\n each timestep of diffusion.\n Arguments are the same as p_sample_loop().\n Returns a generator over dicts, where each dict is the return value of\n p_sample().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.p_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t-1} from the model using DDIM.\n Same usage as p_sample().\n '
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t+1} from the model using DDIM reverse ODE.\n '
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Generate samples from the model using DDIM.\n Same usage as p_sample_loop().\n '
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Use DDIM to sample from the model and yield intermediate samples from\n each timestep of DDIM.\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.ddim_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"\n Get a term for the variational lower-bound.\n The resulting units are bits (rather than nats, as one might expect).\n This allows for comparison to other papers.\n :return: a dict with the following keys:\n - 'output': a shape [N] tensor of NLLs or KLs.\n - 'pred_xstart': the x_0 predictions.\n "
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- discretized_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
'\n Compute training losses for a single timestep.\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key "loss" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n '
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['loss'] = self._vb_terms_bpd(model=model, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, model_kwargs=model_kwargs)['output']
if (self.loss_type == LossType.RESCALED_KL):
terms['loss'] *= self.num_timesteps
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
model_output = model(x_t, t, **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n "
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
'\n Compute the entire variational lower-bound, measured in bits-per-dim,\n as well as other related quantities.\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param clip_denoised: if True, clip denoised samples.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - total_bpd: the total variational lower-bound, per batch element.\n - prior_bpd: the prior term in the lower-bound.\n - vb: an [N x T] tensor of terms in the lower-bound.\n - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.\n - mse: an [N x T] tensor of epsilon MSEs for each timestep.\n '
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse}
|
def _extract_into_tensor(arr, timesteps, broadcast_shape):
'\n Extract values from a 1-D numpy array for a batch of indices.\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n dimension equal to the length of timesteps.\n :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.\n '
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while (len(res.shape) < len(broadcast_shape)):
res = res[(..., None)]
return (res + th.zeros(broadcast_shape, device=timesteps.device))
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = ('%-8.3g' % val)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=(lambda kv: kv[0].lower())):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
maxlen = 30
return ((s[:(maxlen - 3)] + '...') if (len(s) > maxlen) else s)
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if (i < (len(seq) - 1)):
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = list((kvs.keys() - self.keys))
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
get_current().logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
get_current().logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
'\n Write all of the diagnostics from the current iteration\n '
return get_current().dumpkvs()
|
def getkvs():
return get_current().name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
get_current().log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
get_current().set_level(level)
|
def set_comm(comm):
get_current().set_comm(comm)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return get_current().get_dir()
|
@contextmanager
def profile_kv(scopename):
logkey = ('wait_' + scopename)
tstart = time.time()
try:
(yield)
finally:
get_current().name2val[logkey] += (time.time() - tstart)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
def get_current():
if (Logger.CURRENT is None):
_configure_default_logger()
return Logger.CURRENT
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.comm is None):
d = self.name2val
else:
d = mpi_weighted_mean(self.comm, {name: (val, self.name2cnt.get(name, 1)) for (name, val) in self.name2val.items()})
if (self.comm.rank != 0):
d['dummy'] = 1
out = d.copy()
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def get_rank_without_mpi_import():
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if (varname in os.environ):
return int(os.environ[varname])
return 0
|
def mpi_weighted_mean(comm, local_name2valcount):
'\n Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110\n Perform a weighted average over dicts that are each on a different node\n Input: local_name2valcount: dict mapping key -> (value, count)\n Returns: key -> mean\n '
all_name2valcount = comm.gather(local_name2valcount)
if (comm.rank == 0):
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if (comm.rank == 0):
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += (val * count)
name2count[name] += count
return {name: (name2sum[name] / name2count[name]) for name in name2sum}
else:
return {}
|
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
'\n If comm is provided, average all numerical stats across that comm\n '
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir))
|
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
(yield)
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
def space_timesteps(num_timesteps, section_counts):
'\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n For example, if there\'s 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n If the stride is a string starting with "ddim", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use "ddimN" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n '
if isinstance(section_counts, str):
if section_counts.startswith('ddim'):
desired_count = int(section_counts[len('ddim'):])
for i in range(1, num_timesteps):
if (len(range(0, num_timesteps, i)) == desired_count):
return set(range(0, num_timesteps, i))
raise ValueError(f'cannot create exactly {num_timesteps} steps with an integer stride')
section_counts = [int(x) for x in section_counts.split(',')]
size_per = (num_timesteps // len(section_counts))
extra = (num_timesteps % len(section_counts))
start_idx = 0
all_steps = []
for (i, section_count) in enumerate(section_counts):
size = (size_per + (1 if (i < extra) else 0))
if (size < section_count):
raise ValueError(f'cannot divide section of {size} steps into {section_count}')
if (section_count <= 1):
frac_stride = 1
else:
frac_stride = ((size - 1) / (section_count - 1))
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append((start_idx + round(cur_idx)))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
|
class SpacedDiffusion(GaussianDiffusion):
'\n A diffusion process which can skip steps in a base diffusion process.\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n '
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.original_num_steps)
def _scale_timesteps(self, t):
return t
|
class _WrappedModel():
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
return self.model(x, new_ts, **kwargs)
|
def add_dict_to_argparser(parser, default_dict):
for (k, v) in default_dict.items():
v_type = type(v)
if (v is None):
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f'--{k}', default=v, type=v_type)
|
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
|
def str2bool(v):
'\n https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse\n '
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
|
class TrainLoop():
def __init__(self, *, model, diffusion, data, batch_size, microbatch, lr, ema_rate, log_interval, save_interval, resume_checkpoint, use_fp16=False, fp16_scale_growth=0.001, schedule_sampler=None, weight_decay=0.0, lr_anneal_steps=0, scale_factor=0.18215):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = (microbatch if (microbatch > 0) else batch_size)
self.lr = lr
self.ema_rate = ([ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(',')])
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = (schedule_sampler or UniformSampler(diffusion))
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.scale_factor = scale_factor
self.step = 0
self.resume_step = 0
self.global_batch = (self.batch_size * dist.get_world_size())
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(model=self.model, use_fp16=self.use_fp16, fp16_scale_growth=fp16_scale_growth)
self.opt = AdamW(self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay)
if self.resume_step:
self._load_optimizer_state()
self.ema_params = [self._load_ema_parameters(rate) for rate in self.ema_rate]
else:
self.ema_params = [copy.deepcopy(self.mp_trainer.master_params) for _ in range(len(self.ema_rate))]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False)
else:
if (dist.get_world_size() > 1):
logger.warn('Distributed training requires CUDA. Gradients will not be synchronized properly!')
self.use_ddp = False
self.ddp_model = self.model
self.instantiate_first_stage()
def instantiate_first_stage(self):
model = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse').to(dist_util.dev())
self.first_stage_model = model.eval()
self.first_stage_model.train = False
for param in self.first_stage_model.parameters():
param.requires_grad = False
@th.no_grad()
def get_first_stage_encoding(self, x):
encoder_posterior = self.first_stage_model.encode(x, return_dict=True)[0]
z = encoder_posterior.sample()
return (z.to(dist_util.dev()) * self.scale_factor)
def _load_and_sync_parameters(self):
resume_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if (dist.get_rank() == 0):
logger.log(f'loading model from checkpoint: {resume_checkpoint}...')
self.model.load_state_dict(dist_util.load_state_dict(resume_checkpoint, map_location=dist_util.dev()))
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if (dist.get_rank() == 0):
logger.log(f'loading EMA from checkpoint: {ema_checkpoint}...')
state_dict = dist_util.load_state_dict(ema_checkpoint, map_location=dist_util.dev())
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
opt_checkpoint = bf.join(bf.dirname(main_checkpoint), f'opt{self.resume_step:06}.pt')
if bf.exists(opt_checkpoint):
logger.log(f'loading optimizer state from checkpoint: {opt_checkpoint}')
state_dict = dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
self.opt.load_state_dict(state_dict)
def run_loop(self):
while ((not self.lr_anneal_steps) or ((self.step + self.resume_step) < self.lr_anneal_steps)):
(batch, cond) = next(self.data)
self.run_step(batch, cond)
if ((self.step % self.log_interval) == 0):
logger.dumpkvs()
if ((self.step % self.save_interval) == 0):
self.save()
if (os.environ.get('DIFFUSION_TRAINING_TEST', '') and (self.step > 0)):
return
self.step += 1
if (((self.step - 1) % self.save_interval) != 0):
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i:(i + self.microbatch)].to(dist_util.dev())
micro = self.get_first_stage_encoding(micro).detach()
micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()}
last_batch = ((i + self.microbatch) >= batch.shape[0])
(t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond)
micro_cond_mask = micro_cond.copy()
micro_cond_mask['enable_mask'] = True
compute_losses_mask = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond_mask)
if (last_batch or (not self.use_ddp)):
losses = compute_losses()
losses_mask = compute_losses_mask()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
losses_mask = compute_losses_mask()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, (losses['loss'].detach() + losses_mask['loss'].detach()))
loss = ((losses['loss'] * weights).mean() + (losses_mask['loss'] * weights).mean())
log_loss_dict(self.diffusion, t, {k: (v * weights) for (k, v) in losses.items()})
log_loss_dict(self.diffusion, t, {('m_' + k): (v * weights) for (k, v) in losses_mask.items()})
self.mp_trainer.backward(loss)
def _update_ema(self):
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if (not self.lr_anneal_steps):
return
frac_done = ((self.step + self.resume_step) / self.lr_anneal_steps)
lr = (self.lr * (1 - frac_done))
for param_group in self.opt.param_groups:
param_group['lr'] = lr
def log_step(self):
logger.logkv('step', (self.step + self.resume_step))
logger.logkv('samples', (((self.step + self.resume_step) + 1) * self.global_batch))
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if (dist.get_rank() == 0):
logger.log(f'saving model {rate}...')
if (not rate):
filename = f'model{(self.step + self.resume_step):06d}.pt'
else:
filename = f'ema_{rate}_{(self.step + self.resume_step):06d}.pt'
with bf.BlobFile(bf.join(get_blob_logdir(), filename), 'wb') as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for (rate, params) in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if (dist.get_rank() == 0):
with bf.BlobFile(bf.join(get_blob_logdir(), f'opt{(self.step + self.resume_step):06d}.pt'), 'wb') as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
|
def parse_resume_step_from_filename(filename):
"\n Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the\n checkpoint's number of steps.\n "
split = filename.split('model')
if (len(split) < 2):
return 0
split1 = split[(- 1)].split('.')[0]
try:
return int(split1)
except ValueError:
return 0
|
def get_blob_logdir():
return logger.get_dir()
|
def find_resume_checkpoint():
return None
|
def find_ema_checkpoint(main_checkpoint, step, rate):
if (main_checkpoint is None):
return None
filename = f'ema_{rate}_{step:06d}.pt'
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
|
def log_loss_dict(diffusion, ts, losses):
for (key, values) in losses.items():
logger.logkv_mean(key, values.mean().item())
for (sub_t, sub_loss) in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(((4 * sub_t) / diffusion.num_timesteps))
logger.logkv_mean(f'{key}_q{quartile}', sub_loss)
|
def main():
th.backends.cuda.matmul.allow_tf32 = True
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log('creating model and diffusion...')
configs = args_to_dict(args, model_and_diffusion_defaults().keys())
print(configs)
image_size = configs['image_size']
latent_size = (image_size // 8)
model = models_mdt.__dict__[args.model](input_size=latent_size, decode_layer=args.decode_layer)
msg = model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location='cpu'))
print(msg)
config_diffusion = args_to_dict(args, diffusion_defaults().keys())
config_diffusion['timestep_respacing'] = str(args.num_sampling_steps)
print(config_diffusion)
diffusion = create_diffusion(**config_diffusion)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
th.set_grad_enabled(False)
vae = AutoencoderKL.from_pretrained(('stabilityai/sd-vae-ft-' + str(args.vae_decoder))).to(dist_util.dev())
logger.log('sampling...')
all_images = []
all_labels = []
while ((len(all_images) * args.batch_size) < args.num_samples):
model_kwargs = {}
if args.cfg_cond:
classes = th.randint(low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev())
z = th.randn(args.batch_size, 4, latent_size, latent_size, device=dist_util.dev())
z = th.cat([z, z], 0)
classes_null = th.tensor(([NUM_CLASSES] * args.batch_size), device=dist_util.dev())
classes_all = th.cat([classes, classes_null], 0)
model_kwargs['y'] = classes_all
model_kwargs['cfg_scale'] = args.cfg_scale
model_kwargs['diffusion_steps'] = config_diffusion['diffusion_steps']
model_kwargs['scale_pow'] = args.scale_pow
else:
z = th.randn(args.batch_size, 4, latent_size, latent_size, device=dist_util.dev())
classes = th.randint(low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev())
model_kwargs['y'] = classes
sample_fn = (diffusion.p_sample_loop if (not args.use_ddim) else diffusion.ddim_sample_loop)
sample = sample_fn(model.forward_with_cfg, z.shape, z, clip_denoised=args.clip_denoised, progress=True, model_kwargs=model_kwargs, device=dist_util.dev())
if args.cfg_cond:
(sample, _) = sample.chunk(2, dim=0)
sample = vae.decode((sample / 0.18215)).sample
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
if args.class_cond:
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f'created {(len(all_images) * args.batch_size)} samples')
arr = np.concatenate(all_images, axis=0)
arr = arr[:args.num_samples]
if args.class_cond:
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[:args.num_samples]
if (dist.get_rank() == 0):
shape_str = 'x'.join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f'samples_{shape_str}.npz')
logger.log(f'saving to {out_path}')
if args.class_cond:
np.savez(out_path, arr, label_arr)
else:
np.savez(out_path, arr)
dist.barrier()
logger.log('sampling complete')
|
def create_argparser():
defaults = dict(num_sampling_steps=250, clip_denoised=False, num_samples=5000, batch_size=16, use_ddim=False, model_path='', model='MDT_S_2', class_cond=True, cfg_scale=3.8, decode_layer=None, cfg_cond=False)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
parser.add_argument('--scale_pow', default=4, type=float)
parser.add_argument('--vae_decoder', type=str, default='ema')
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--rank', default=0, type=int, help='rank for distrbuted training.')
add_dict_to_argparser(parser, defaults)
return parser
|
def main():
args = create_argparser().parse_args()
dist_util.setup_dist_multinode(args)
logger.configure()
logger.log('creating model and diffusion...')
configs = args_to_dict(args, model_and_diffusion_defaults().keys())
print(configs)
print(args)
image_size = configs['image_size']
latent_size = (image_size // 8)
model = models_mdt.__dict__[args.model](input_size=latent_size, mask_ratio=args.mask_ratio, decode_layer=args.decode_layer)
print(model)
diffusion = create_diffusion(**args_to_dict(args, diffusion_defaults().keys()))
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log('creating data loader...')
data = load_data(data_dir=args.data_dir, batch_size=args.batch_size, image_size=args.image_size, class_cond=args.class_cond)
logger.log('training...')
TrainLoop(model=model, diffusion=diffusion, data=data, batch_size=args.batch_size, microbatch=args.microbatch, lr=args.lr, ema_rate=args.ema_rate, log_interval=args.log_interval, save_interval=args.save_interval, resume_checkpoint=args.resume_checkpoint, use_fp16=args.use_fp16, fp16_scale_growth=args.fp16_scale_growth, schedule_sampler=schedule_sampler, weight_decay=args.weight_decay, lr_anneal_steps=args.lr_anneal_steps).run_loop()
|
def create_argparser():
defaults = dict(data_dir='', schedule_sampler='uniform', lr=0.0003, weight_decay=0.0, lr_anneal_steps=0, batch_size=1, microbatch=(- 1), ema_rate='0.9999', log_interval=10, save_interval=10000, resume_checkpoint='', use_fp16=False, fp16_scale_growth=0.001, model='MDT_S_2', mask_ratio=None, decode_layer=None)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--rank', default=0, type=int, help='rank for distrbuted training.')
add_dict_to_argparser(parser, defaults)
return parser
|
class Action(Enum):
'\n Enum for GitHub event ``action``.\n '
opened = 'opened'
reopened = 'reopened'
closed = 'closed'
labeled = 'labeled'
unlabeled = 'unlabeled'
ready_for_review = 'ready_for_review'
synchronize = 'synchronize'
review_requested = 'review_requested'
converted_to_draft = 'converted_to_draft'
submitted = 'submitted'
|
class AuthorAssociation(Enum):
'\n Enum for GitHub ``authorAssociation``.\n '
def is_valid(self):
'\n Return whether ``self`` has valid permissions.\n '
c = self.__class__
return (self in [c.collaborator, c.member, c.owner])
collaborator = 'COLLABORATOR'
contributor = 'CONTRIBUTOR'
first_timer = 'FIRST_TIMER'
first_time_contributor = 'FIRST_TIME_CONTRIBUTOR'
mannequin = 'MANNEQUIN'
member = 'MEMBER'
none = 'NONE'
owner = 'OWNER'
|
class RevState(Enum):
'\n Enum for GitHub event ``review_state``.\n '
def is_proper(self):
'\n Return whether ``self`` is a proper review state.\n '
c = self.__class__
return (self in [c.changes_requested, c.approved])
commented = 'COMMENTED'
changes_requested = 'CHANGES_REQUESTED'
approved = 'APPROVED'
dismissed = 'DISMISSED'
|
class Priority(Enum):
'\n Enum for priority labels.\n '
blocker = 'p: blocker /1'
critical = 'p: critical /2'
major = 'p: major /3'
minor = 'p: minor /4'
trivial = 'p: trivial /5'
|
class Status(Enum):
'\n Enum for status labels.\n '
positive_review = 's: positive review'
needs_work = 's: needs work'
needs_review = 's: needs review'
needs_info = 's: needs info'
|
class Resolution(Enum):
'\n Enum for resolution labels.\n '
duplicate = 'r: duplicate'
invalid = 'r: invalid'
wontfix = 'r: wontfix'
worksforme = 'r: worksforme'
|
def selection_list(label):
'\n Return the selection list to which ``label`` belongs to.\n '
for sel_list in [Priority, Status, Resolution]:
for item in sel_list:
if (label == item.value):
return sel_list
return None
|
class GhLabelSynchronizer():
'\n Handler for access to GitHub issue via the ``gh`` in the bash command line\n of the GitHub runner.\n '
def __init__(self, url, actor):
'\n Python constructor sets the issue / PR url and list of active labels.\n '
self._url = url
self._actor = actor
self._warning_prefix = 'Label Sync Warning:'
self._hint_prefix = 'Label Sync Hint:'
self._labels = None
self._author = None
self._draft = None
self._open = None
self._review_decision = None
self._reviews = None
self._reviews_from_rest_api = None
self._review_requests = None
self._commits = None
self._commit_date = None
self._bot_login = None
self._gh_version = None
s = url.split('/')
self._owner = s[3]
self._repo = s[4]
self._number = os.path.basename(url)
self._pr = True
self._issue = ('pull request #%s' % self._number)
if (url.rfind('issue') != (- 1)):
self._issue = ('issue #%s' % self._number)
self._pr = False
info(('Create label handler for %s and actor %s' % (self._issue, self._actor)))
self.bot_login()
self.clean_warnings()
def is_pull_request(self):
'\n Return if we are treating a pull request.\n '
return self._pr
def reset_view(self):
'\n Reset cache of ``gh view`` results. \n '
self._labels = None
self._author = None
self._draft = None
self._open = None
self._review_decision = None
self._reviews = None
self._commits = None
self._commit_date = None
def rest_api(self, path_args, method=None, query=''):
'\n Return data obtained from ``gh`` command ``api``.\n '
meth = '-X GET'
if method:
meth = ('-X %s' % method)
cmd = ('gh api %s -H "Accept: application/vnd.github+json" %s %s' % (meth, path_args, query))
debug(('Execute command: %s' % cmd))
if method:
return check_output(cmd, shell=True)
return loads(check_output(cmd, shell=True))
def view(self, key):
'\n Return data obtained from ``gh`` command ``view``.\n '
issue = 'issue'
if self._pr:
issue = 'pr'
cmd = ('gh %s view %s --json %s' % (issue, self._url, key))
debug(('Execute command: %s' % cmd))
return loads(check_output(cmd, shell=True))[key]
def is_open(self):
'\n Return ``True`` if the issue res. PR is open.\n '
if (self._open is not None):
return self._open
if (self.view('state') == 'OPEN'):
self._open = True
else:
self._open = False
info(('Issue %s is open %s' % (self._issue, self._open)))
return self._open
def is_draft(self):
'\n Return ``True`` if the PR is a draft.\n '
if (self._draft is not None):
return self._draft
if self.is_pull_request():
self._draft = self.view('isDraft')
else:
self._draft = False
info(('Issue %s is draft %s' % (self._issue, self._draft)))
return self._draft
def bot_login(self):
'\n Return the login name of the bot.\n '
if self._bot_login:
return self._bot_login
from subprocess import run
cmd = 'gh version'
capt = run(cmd, shell=True, capture_output=True)
self._gh_version = str(capt.stdout).split('\\n')[0]
info(('version: %s' % self._gh_version))
cmd = 'gh auth status'
capt = run(cmd, shell=True, capture_output=True)
errtxt = str(capt.stderr)
outtxt = str(capt.stdout)
debug(('auth status err: %s' % errtxt))
debug(('auth status out: %s' % outtxt))
def read_login(txt, position_mark):
for t in txt:
for p in position_mark:
l = t.split()
if (p in l):
return l[(l.index(p) + 1)]
self._bot_login = read_login([errtxt, outtxt], ['account', 'as'])
if (not self._bot_login):
self._bot_login = default_bot
warning('Bot is unknown')
return self._bot_login
if self._bot_login.endswith('[bot]'):
self._bot_login = self._bot_login.split('[bot]')[0]
info(('Bot is %s' % self._bot_login))
return self._bot_login
def is_this_bot(self, login):
'\n Check whether login is the bot itself.\n '
return login.startswith(self.bot_login())
def is_auth_team_member(self, login):
'\n Return ``True`` if the user with given login belongs to an authorized\n team.\n '
def verify_membership(team):
path_args = ('/orgs/sagemath/teams/%s/memberships/%s' % (team, login))
try:
res = self.rest_api(path_args)
if ((res['state'] == 'active') and (res['role'] == 'member')):
info(('User %s is a member of %s' % (login, team)))
return True
except CalledProcessError:
pass
info(('User %s is not a member of %s' % (login, team)))
return False
if verify_membership('triage'):
return True
return False
def actor_authorized(self):
'\n Return ``True`` if the actor has sufficient permissions.\n '
if self.is_this_bot(default_bot):
rev = self.get_latest_review()
if (not rev):
return False
if (rev['author']['login'] == self._actor):
ass = rev['authorAssociation']
info(('Actor %s has association %s' % (self._actor, ass)))
return AuthorAssociation(ass).is_valid()
info(('Actor %s did not create latest review' % self._actor))
return False
else:
return self.is_auth_team_member(self._actor)
def query_multi_pages(self, path_args, since=None):
'\n Query data from REST api from multiple pages.\n '
per_page = 100
if since:
query = ('-f per_page=%s -f page={} -f since=%s' % (per_page, since.strftime(datetime_format)))
else:
query = ('-f per_page=%s -f page={}' % per_page)
page = 1
results = []
while True:
results_page = self.rest_api(path_args, query=query.format(page))
results += results_page
if (len(results_page) < per_page):
break
page += 1
return results
def clean_warnings(self):
'\n Remove all warnings that have been posted by ``GhLabelSynchronizer``\n more than ``warning_lifetime`` ago.\n '
warning_lifetime = timedelta(minutes=5)
time_frame = timedelta(minutes=730)
today = datetime.today()
since = (today - time_frame)
path_args = ('/repos/%s/%s/issues/comments' % (self._owner, self._repo))
comments = self.query_multi_pages(path_args, since=since)
info(('Cleaning warning comments since %s (total found %s)' % (since, len(comments))))
for c in comments:
login = c['user']['login']
body = c['body']
comment_id = c['id']
issue = c['issue_url'].split('/').pop()
created_at = c['created_at']
if self.is_this_bot(login):
debug(('%s comment %s created at %s on issue %s found' % (self.bot_login(), comment_id, created_at, issue)))
prefix = None
if body.startswith(self._warning_prefix):
prefix = self._warning_prefix
if body.startswith(self._hint_prefix):
prefix = self._hint_prefix
if prefix:
created = datetime.strptime(created_at, datetime_format)
lifetime = (today - created)
debug(('%s %s %s is %s old' % (self.bot_login(), prefix, comment_id, lifetime)))
if (lifetime > warning_lifetime):
try:
self.rest_api(('%s/%s' % (path_args, comment_id)), method='DELETE')
info(('Comment %s on issue %s deleted' % (comment_id, issue)))
except CalledProcessError:
info(('Comment %s on issue %s has been deleted already' % (comment_id, issue)))
def get_labels(self):
'\n Return the list of labels of the issue resp. PR.\n '
if (self._labels is not None):
return self._labels
data = self.view('labels')
self._labels = [l['name'] for l in data]
info(('List of labels for %s: %s' % (self._issue, self._labels)))
return self._labels
def get_author(self):
'\n Return the author of the issue resp. PR.\n '
if (self._author is not None):
return self._author
data = self.view('author')
self._author = self.view('author')['login']
info(('Author of %s: %s' % (self._issue, self._author)))
return self._author
def get_commits(self):
'\n Return the list of commits of the PR.\n '
if (not self.is_pull_request()):
return None
if (self._commits is not None):
return self._commits
self._commits = self.view('commits')
self._commit_date = max((com['committedDate'] for com in self._commits))
info(('Commits until %s for %s: %s' % (self._commit_date, self._issue, self._commits)))
return self._commits
def get_review_requests(self):
'\n Return the list of review request of the PR.\n '
if (not self.is_pull_request()):
return None
if (self._review_requests is None):
self._review_requests = self.view('reviewRequests')
debug(('Review requests for %s: %s' % (self._issue, self._review_requests)))
return self._review_requests
def get_reviews(self, complete=False):
'\n Return the list of reviews of the PR. Per default only those proper reviews\n are returned which have been submitted after the most recent commit. Use\n keyword ``complete`` to get them all.\n '
if (not self.is_pull_request()):
return None
if (self._reviews is None):
self._reviews = self.view('reviews')
debug(('Reviews for %s: %s' % (self._issue, self._reviews)))
if (complete or (not self._reviews)):
return self._reviews
if (self._commit_date is None):
self.get_commits()
date = self._commit_date
unproper_rev = RevState.commented.value
new_revs = [rev for rev in self._reviews if (rev['submittedAt'] > date)]
proper_new_revs = [rev for rev in new_revs if (rev['state'] != unproper_rev)]
info(('Proper reviews after %s for %s: %s' % (date, self._issue, proper_new_revs)))
return proper_new_revs
def get_latest_review(self, complete=False):
'\n Return the latest review of the PR. Per default only those proper reviews\n are considered which have been submitted after the most recent commit. Use\n keyword ``complete`` to get the latest of all.\n '
revs = self.get_reviews(complete=complete)
if (not revs):
return
res = revs[0]
max_date = res['submittedAt']
for rev in revs:
cur_date = rev['submittedAt']
if (cur_date > max_date):
max_date = cur_date
res = rev
fill_in = ''
if (not complete):
fill_in = ' proper'
info(('PR %s had latest%s review at %s: %s' % (self._issue, fill_in, max_date, res)))
return res
def active_partners(self, item):
'\n Return the list of other labels from the selection list\n of the given one that are already present on the issue / PR.\n '
sel_list = type(item)
partners = [i for i in sel_list if ((i != item) and (i.value in self.get_labels()))]
info(('Active partners of %s: %s' % (item, partners)))
return partners
def review_comment_to_status(self):
'\n Return a status label if the most recent review comment\n starts with its value.\n '
rev = self.get_latest_review(complete=True)
ass = AuthorAssociation(rev['authorAssociation'])
for status in Status:
body = rev['body']
if body.startswith(status.value):
info(('Latest review comment contains status label %s' % status))
return (status, ass)
return (None, ass)
def review_by_actor(self):
'\n Return ``True`` if the actor authored the latest review directly or indirectly.\n '
rev = self.get_latest_review()
if (not rev):
return False
answer = False
auth = rev['author']['login']
if (self._actor == auth):
answer = True
if self.is_this_bot(auth):
if (rev['body'].find(self._actor) > 0):
answer = True
if answer:
node_id = rev['id']
info(("Ignore actor's review %s" % node_id))
self.dismiss_bot_reviews(('@%s reverted decision.' % self._actor), node_id=node_id)
return answer
def check_review_decision(self, rev_decision):
'\n Return ``True`` if the latest proper review of the PR has the\n given decision.\n '
rev = self.get_latest_review()
if (not rev):
return False
return (rev['state'] == rev_decision.value)
def needs_work_valid(self):
'\n Return ``True`` if the PR needs work. This is the case if\n the latest proper review request changes.\n '
if self.check_review_decision(RevState.changes_requested):
info(('PR %s needs work' % self._issue))
return True
info(("PR %s doesn't need work" % self._issue))
return False
def positive_review_valid(self):
'\n Return ``True`` if the PR is positively reviewed. This is the case if\n the latest proper review is approved.\n '
if self.check_review_decision(RevState.approved):
info(('PR %s has positve review' % self._issue))
return True
info(("PR %s doesn't have positve review" % self._issue))
return False
def needs_review_valid(self):
'\n Return ``True`` if the PR needs review. This is the case if\n all proper reviews are older than the youngest commit.\n '
if self.is_draft():
return False
if self.review_by_actor():
info(('PR %s needs review (because of actor review)' % self._issue))
return True
if self.needs_work_valid():
info(('PR %s already under review (needs work)' % self._issue))
return False
if self.positive_review_valid():
info(('PR %s already reviewed' % self._issue))
return False
info(('PR %s needs review' % self._issue))
return True
def approve_allowed(self):
'\n Return if the actor has permission to approve this PR.\n '
revs = self.get_reviews()
revs = [rev for rev in revs if (not self.review_by_actor())]
ch_req = RevState.changes_requested
if any(((rev['state'] == ch_req.value) for rev in revs)):
info(("PR %s can't be approved by %s since others reqest changes" % (self._issue, self._actor)))
return False
return True
def actor_valid(self):
'\n Return if the actor has permission to approve this PR.\n '
author = self.get_author()
if (author != self._actor):
info(('PR %s can be approved by %s' % (self._issue, self._actor)))
return True
revs = self.get_reviews()
revs = [rev for rev in revs if (not self.is_this_bot(rev['author']['login']))]
if (not revs):
info(("PR %s can't be approved by the author %s since no other person reviewed it" % (self._issue, self._actor)))
return False
coms = self.get_commits()
authors = []
for com in coms:
for auth in com['authors']:
login = auth['login']
if (not (login in authors)):
if ((not self.is_this_bot(login)) and (login != author)):
debug(('PR %s has recent commit by %s' % (self._issue, login)))
authors.append(login)
if (not authors):
info(("PR %s can't be approved by the author %s since no other person commited to it" % (self._issue, self._actor)))
return False
info(('PR %s can be approved by the author %s as co-author' % (self._issue, author)))
return True
def gh_cmd(self, cmd, arg, option):
'\n Perform a system call to ``gh`` for ``cmd`` to an isuue resp. PR.\n '
issue = 'issue'
if self._pr:
issue = 'pr'
if arg:
cmd_str = ('gh %s %s %s %s "%s"' % (issue, cmd, self._url, option, arg))
else:
cmd_str = ('gh %s %s %s %s' % (issue, cmd, self._url, option))
debug(('Execute command: %s' % cmd_str))
ex_code = os.system(cmd_str)
if ex_code:
warning(('Execution of %s failed with exit code: %s' % (cmd_str, ex_code)))
def edit(self, arg, option):
'\n Perform a system call to ``gh`` to edit an issue resp. PR.\n '
self.gh_cmd('edit', arg, option)
def mark_as_ready(self):
'\n Perform a system call to ``gh`` to mark a PR as ready for review.\n '
self.gh_cmd('ready', '', '')
def review(self, arg, text=None):
'\n Perform a system call to ``gh`` to review a PR.\n '
if text:
self.gh_cmd('review', arg, ('-b "%s"' % text))
else:
self.gh_cmd('review', arg, '')
def approve(self):
'\n Approve the PR by the actor.\n '
self.review('--approve')
info(('PR %s approved by %s' % (self._issue, self._actor)))
def request_changes(self):
'\n Request changes for this PR by the actor.\n '
self.review('--request-changes', ('@%s requested changes for this PR' % self._actor))
info(('Changes requested for PR %s by %s' % (self._issue, self._actor)))
def dismiss_bot_reviews(self, message, node_id=None, state=None, actor=None):
'\n Dismiss all reviews of the bot matching the given features (``node_id``, ...).\n '
path_args = ('/repos/%s/%s/pulls/%s/reviews' % (self._owner, self._repo, self._number))
if (not self._reviews_from_rest_api):
self._reviews_from_rest_api = self.query_multi_pages(path_args)
reviews = self._reviews_from_rest_api
options = ('-f message="%s" -f event="DISMISS"' % message)
for rev in reviews:
rev_login = rev['user']['login']
rev_id = rev['id']
rev_node_id = rev['node_id']
rev_state = RevState(rev['state'])
if (not self.is_this_bot(rev_login)):
continue
if (not rev_state.is_proper()):
continue
debug(('Bot review with node_id %s has id %s' % (rev_node_id, rev_id)))
if node_id:
if (rev_node_id != node_id):
continue
if state:
if (rev_state != state):
continue
if actor:
if (not rev['body'].find(actor)):
continue
path_args_dismiss = ('%s/%s/dismissals' % (path_args, rev_id))
try:
self.rest_api(path_args_dismiss, method='PUT', query=options)
info(('Review %s (node_id %s, state %s) on PR %s dismissed' % (rev_id, rev_node_id, rev_state, self._issue)))
except CalledProcessError:
info(('Review %s (node_id %s, state %s) on PR %s cannot be dismissed' % (rev_id, rev_node_id, rev_state, self._issue)))
def review_comment(self, text):
'\n Add a review comment.\n '
self.review('--comment', text)
info(('Add review comment for PR %s: %s' % (self._issue, text)))
def add_comment(self, text):
'\n Perform a system call to ``gh`` to add a comment to an issue or PR.\n '
self.gh_cmd('comment', text, '-b')
info(('Add comment to %s: %s' % (self._issue, text)))
def add_warning(self, text):
'\n Perform a system call to ``gh`` to add a warning to an issue or PR.\n '
self.add_comment(('%s %s' % (self._warning_prefix, text)))
def add_hint(self, text):
'\n Perform a system call to ``gh`` to add a hint to an issue or PR.\n '
self.add_comment(('%s %s' % (self._hint_prefix, text)))
def add_label(self, label):
'\n Add the given label to the issue or PR.\n '
if (not (label in self.get_labels())):
self.edit(label, '--add-label')
info(('Add label to %s: %s' % (self._issue, label)))
def add_default_label(self, item):
'\n Add the given label if there is no active partner.\n '
if (not self.active_partners(item)):
self.add_label(item.value)
def select_label(self, item):
'\n Add the given label and remove all others.\n '
self.add_label(item.value)
sel_list = type(item)
for other in sel_list:
if (other != item):
self.remove_label(other.value)
def remove_label(self, label):
'\n Remove the given label from the issue or PR of the handler.\n '
if (label in self.get_labels()):
self.edit(label, '--remove-label')
info(('Remove label from %s: %s' % (self._issue, label)))
def reject_label_addition(self, item):
'\n Post a comment that the given label can not be added and remove\n it again.\n '
if (item is Status.positive_review):
self.add_warning(('Label *%s* cannot be added by the author of the PR.' % item.value))
self.remove_label(item.value)
return
def warning_about_label_addition(self, item):
'\n Post a comment that the given label my be incorrect.\n '
if (not self.is_pull_request()):
self.add_warning(('Label *%s* is not suitable for an issue. Please use it on the corresponding PR.' % item.value))
elif (item is Status.needs_review):
self.add_warning(('Label *%s* may be incorrect, since there are unresolved reviews.' % item.value))
else:
self.add_warning(("Label *%s* does not match the state of GitHub's review system." % item.value))
return
def hint_about_label_removal(self, item):
'\n Post a comment that the given label must not be removed any more.\n '
if (type(item) == Status):
sel_list = 'status'
else:
sel_list = 'priority'
self.add_hint(("You don't need to remove %s labels any more. You'd better just add the label which replaces it." % sel_list))
return
def on_label_add(self, label):
'\n Check if the given label belongs to a selection list. If so, remove\n all other labels of that list. In case of a status label reviews are\n booked accordingly.\n '
sel_list = selection_list(label)
if (not sel_list):
return
item = sel_list(label)
if (label not in self.get_labels()):
partn = self.active_partners(item)
if partn:
self.add_warning(('Label *%s* can not be added due to *%s*!' % (label, partn[0].value)))
else:
warning(('Label %s of %s not found!' % (label, self._issue)))
return
if (sel_list is Status):
if (not self.is_pull_request()):
if (item != Status.needs_info):
self.warning_about_label_addition(item)
return
if (item == Status.needs_review):
if self.needs_review_valid():
pass
elif self.is_draft():
self.mark_as_ready()
else:
self.warning_about_label_addition(item)
return
if (item == Status.needs_work):
if self.needs_work_valid():
pass
elif (not self.is_draft()):
self.request_changes()
else:
self.warning_about_label_addition(item)
return
if (item == Status.positive_review):
if self.positive_review_valid():
pass
elif (not self.actor_valid()):
self.reject_label_addition(item)
return
elif self.approve_allowed():
self.approve()
else:
self.warning_about_label_addition(item)
return
if (sel_list is Resolution):
self.remove_all_labels_of_sel_list(Priority)
for other in sel_list:
if (other != item):
self.remove_label(other.value)
def on_label_removal(self, label):
'\n Check if the given label belongs to a selection list. If so, a comment\n is posted to instead add a replacement for ``label`` from the list.\n Exceptions are status labels on issues and Status.needs_info on a PR.\n '
sel_list = selection_list(label)
if (not sel_list):
return
item = sel_list(label)
if (len(self.active_partners(item)) > 0):
return
if (sel_list is Status):
if self.is_pull_request():
if (item != Status.needs_info):
self.hint_about_label_removal(item)
elif (sel_list is Priority):
self.hint_about_label_removal(item)
return
def on_review_comment(self):
"\n Check if the text of the most recent review begins with a\n specific label name. In this case, simulate the corresponding\n label addition. This feature is needed for people who don't\n have permission to add labels (i.e. aren't a member of the\n Triage team).\n "
(status, ass) = self.review_comment_to_status()
if (ass is AuthorAssociation.owner):
if (status is Status.positive_review):
info(('Owner approves PR %s for testing the bot' % self._issue))
self.dismiss_bot_reviews(('@%s approved the PR.' % self._actor), state=RevState.changes_requested, actor=self._actor)
self.approve()
elif (ass.is_valid() or (ass is Author.Assoziation.contributor)):
if (status in (Status.needs_info, Status.needs_review)):
info(('Simulate label addition of %s for %s' % (label, self._issue)))
self.select_label(status)
self.run(Action.labeled, label=status.value)
def remove_all_labels_of_sel_list(self, sel_list):
'\n Remove all labels of given selection list.\n '
for item in sel_list:
self.remove_label(item.value)
def run(self, action, label=None, rev_state=None):
'\n Run the given action.\n '
self.reset_view()
if self.is_this_bot(self._actor):
info(('Trigger %s of the bot %s is ignored' % (action, self._actor)))
return
if ((action is Action.opened) and self.is_pull_request()):
if (not self.is_draft()):
self.add_default_label(Status.needs_review)
if (action in (Action.closed, Action.reopened, Action.converted_to_draft)):
self.remove_all_labels_of_sel_list(Status)
if (action is Action.labeled):
self.on_label_add(label)
if (action is Action.unlabeled):
self.on_label_removal(label)
if (action in (Action.ready_for_review, Action.synchronize)):
self.dismiss_bot_reviews('New changes ready for review.')
if self.needs_review_valid():
self.select_label(Status.needs_review)
if (action is Action.review_requested):
self.select_label(Status.needs_review)
if (action is Action.submitted):
rev_state = RevState(rev_state.upper())
if (rev_state is RevState.approved):
self.dismiss_bot_reviews(('@%s approved the PR.' % self._actor), state=RevState.changes_requested, actor=self._actor)
rev_req = self.get_review_requests()
if rev_req:
info(('Waiting on pending review requests: %s' % rev_req))
elif (self.actor_authorized() and self.positive_review_valid()):
self.select_label(Status.positive_review)
if (rev_state is RevState.changes_requested):
self.dismiss_bot_reviews(('@%s requested changes.' % self._actor), state=RevState.approved)
if self.needs_work_valid():
self.select_label(Status.needs_work)
if (rev_state is RevState.commented):
self.on_review_comment()
def run_tests(self):
'\n Simulative run over all posibble events.\n\n This is not intended to validate all functionality. It just\n tests for bugs on invoking the methods. The result in the\n issue or PR depends on timing. Note that the GitHub action runner\n may run in parallel on the triggered events possibly on an other\n version of the code.\n '
self.add_comment('Starting tests for sync_labels')
for action in Action:
self.add_comment(('Test action %s' % action.value))
if (action in (Action.labeled, Action.unlabeled)):
for status in Status:
if (action is Action.labeled):
self.add_label(status.value)
else:
self.remove_label(status.value)
self.run(action, label=status.value)
for prio in Priority:
if (action is Action.labeled):
self.add_label(prio.value)
else:
self.remove_label(prio.value)
self.run(action, label=prio.value)
res = Resolution.worksforme
if (action is Action.labeled):
self.add_label(res.value)
self.run(action, label=prio.value)
elif ((action == Action.submitted) and self.is_pull_request()):
for rev_state in RevState:
if (rev_state is RevState.approved):
self.approve()
self.run(action, rev_state=rev_state.value)
elif (rev_state is RevState.changes_requested):
self.request_changes()
self.run(action, rev_state=rev_state.value)
elif (rev_state is RevState.commented):
for status in Status:
self.review_comment(status.value)
self.run(action, rev_state=rev_state.value)
elif self.is_pull_request():
self.run(action)
|
def safe_makedirs(dir):
try:
os.makedirs(dir)
except OSError:
if (not os.path.isdir(dir)):
raise
|
def install_cremona():
from sqlite3 import connect
cremona_root = os.path.join(SAGE_SHARE, 'cremona')
safe_makedirs(cremona_root)
target = os.path.join(cremona_root, 'cremona_mini.db')
print('Creating database {0}'.format(target))
if os.path.exists(target):
os.remove(target)
con = connect(target)
con.execute('CREATE TABLE t_class(rank INTEGER, class TEXT PRIMARY KEY, conductor INTEGER)')
con.execute('CREATE TABLE t_curve(curve TEXT PRIMARY KEY, class TEXT, tors INTEGER, eqn TEXT UNIQUE)')
con.execute('CREATE INDEX i_t_class_conductor ON t_class(conductor)')
con.execute('CREATE INDEX i_t_curve_class ON t_curve(class)')
class_data = []
curve_data = []
for line in open(os.path.join('common', 'allcurves.00000-09999')):
(N, iso, num, eqn, r, tors) = line.split()
cls = (N + iso)
cur = (cls + num)
if (num == '1'):
class_data.append((N, cls, r))
curve_data.append((cur, cls, eqn, tors))
con.executemany('INSERT INTO t_class(conductor,class,rank) VALUES (?,?,?)', class_data)
con.executemany('INSERT INTO t_curve(curve,class,eqn,tors) VALUES (?,?,?,?)', curve_data)
con.commit()
|
def install_ellcurves():
ellcurves_root = os.path.join(SAGE_SHARE, 'ellcurves')
import shutil
try:
shutil.rmtree(ellcurves_root)
except OSError:
pass
safe_makedirs(ellcurves_root)
rank_src = {'rank0': None, 'rank1': None, 'rank2': None}
for r in os.listdir('ellcurves'):
if (not r.startswith('.')):
rank_src[r] = open(os.path.join('ellcurves', r))
rank_dst = {}
for r in rank_src:
filename = os.path.join(ellcurves_root, r)
print('Creating text file {0}'.format(filename))
rank_dst[r] = open(filename, 'w')
for line in open(os.path.join('common', 'allcurves.00000-09999')):
r = ('rank' + line.split()[4])
rank_dst[r].write(line)
for (r, f) in rank_src.items():
if (f is not None):
rank_dst[r].write(f.read())
|
def write_pc_file(target, libs, version):
filename = os.path.join(pkgconfigdir, '{0}.pc'.format(target))
libflags = ' '.join(('-l{0}'.format(lib) for lib in libs))
pc_file = TEMPLATE.format(SAGE_LOCAL=SAGE_LOCAL, target=target, libflags=libflags, version=version)
with open(filename, 'w') as f:
f.write(pc_file)
print('Wrote {0}'.format(filename))
|
class Application(object):
def config(self):
'\n Print the configuration\n\n $ sage --package config\n Configuration:\n * log = info\n * interactive = True\n '
log.debug('Printing configuration')
from sage_bootstrap.config import Configuration
print(Configuration())
def list_cls(self, *package_classes, **filters):
'\n Print a list of all available packages\n\n $ sage --package list\n 4ti2\n arb\n autotools\n [...]\n zlib\n\n $ sage -package list --has-file=spkg-configure.m4 :experimental:\n perl_term_readline_gnu\n\n $ sage -package list --has-file=spkg-configure.m4 --has-file=distros/debian.txt\n arb\n boost_cropped\n brial\n [...]\n zlib\n '
log.debug('Listing packages')
pc = PackageClass(*package_classes, **filters)
for pkg_name in pc.names:
print(pkg_name)
def name(self, tarball_filename):
'\n Find the package name given a tarball filename\n\n $ sage --package name pari-2.8-1564-gdeac36e.tar.gz\n pari\n '
log.debug('Looking up package name for %s', tarball_filename)
tarball = Tarball(os.path.basename(tarball_filename))
print(tarball.package.name)
def tarball(self, package_name):
'\n Find the tarball filename given a package name\n\n $ sage --package tarball pari\n pari-2.8-1564-gdeac36e.tar.gz\n '
log.debug('Looking up tarball name for %s', package_name)
package = Package(package_name)
print(package.tarball.filename)
def apropos(self, incorrect_name):
'\n Find up to 5 package names that are close to the given name\n\n $ sage --package apropos python\n Did you mean: cython, ipython, python2, python3, patch?\n '
log.debug('Apropos for %s', incorrect_name)
from sage_bootstrap.levenshtein import Levenshtein, DistanceExceeded
levenshtein = Levenshtein(5)
names = []
for pkg in Package.all():
try:
names.append([levenshtein(pkg.name, incorrect_name), pkg.name])
except DistanceExceeded:
pass
if names:
names = sorted(names)[:5]
print('Did you mean: {0}?'.format(', '.join((name[1] for name in names))))
else:
print('There is no package similar to {0}'.format(incorrect_name))
print('You can find further packages at http://files.sagemath.org/spkg/')
def commit(self, package_name, message=None):
'\n Commit the changes to the Sage source tree for the given package\n '
package = Package(package_name)
if (message is None):
message = 'build/pkgs/{0}: Update to {1}'.format(package_name, package.version)
os.system('git commit -m "{0}" {1}'.format(message, package.path))
def update(self, package_name, new_version, url=None, commit=False):
'\n Update a package. This modifies the Sage sources.\n\n $ sage --package update pari 2015 --url=http://localhost/pari/tarball.tgz\n '
log.debug('Updating %s to %s', package_name, new_version)
update = PackageUpdater(package_name, new_version)
if ((url is not None) or update.package.tarball_upstream_url):
log.debug('Downloading %s', url)
update.download_upstream(url)
update.fix_checksum()
if commit:
self.commit(package_name)
def update_latest(self, package_name, commit=False):
'\n Update a package to the latest version. This modifies the Sage sources.\n '
pkg = Package(package_name)
dist_name = pkg.distribution_name
if (dist_name is None):
log.debug(('%s does not have Python distribution info in install-requires.txt' % pkg))
return
if pkg.tarball_pattern.endswith('.whl'):
source = 'wheel'
else:
source = 'pypi'
try:
pypi = PyPiVersion(dist_name, source=source)
except PyPiNotFound:
log.debug('%s is not a pypi package', dist_name)
return
else:
pypi.update(pkg)
if commit:
self.commit(package_name)
def update_latest_cls(self, package_name_or_class, commit=False):
exclude = ['cypari']
pc = PackageClass(package_name_or_class, has_files=['checksums.ini', 'install-requires.txt'])
if (not pc.names):
log.warn('nothing to do (does not name a normal Python package)')
for package_name in sorted(pc.names):
if (package_name in exclude):
log.debug('skipping %s because of pypi name collision', package_name)
continue
try:
self.update_latest(package_name, commit=commit)
except PyPiError as e:
log.warn('updating %s failed: %s', package_name, e)
def download(self, package_name, allow_upstream=False):
'\n Download a package\n\n $ sage --package download pari\n Using cached file /home/vbraun/Code/sage.git/upstream/pari-2.8-2044-g89b0f1e.tar.gz\n /home/vbraun/Code/sage.git/upstream/pari-2.8-2044-g89b0f1e.tar.gz\n '
log.debug('Downloading %s', package_name)
package = Package(package_name)
package.tarball.download(allow_upstream=allow_upstream)
print(package.tarball.upstream_fqn)
def download_cls(self, package_name_or_class, allow_upstream=False, on_error='stop'):
'\n Download a package or a class of packages\n '
pc = PackageClass(package_name_or_class, has_files=['checksums.ini'])
def download_with_args(package):
try:
self.download(package, allow_upstream=allow_upstream)
except FileNotMirroredError:
if (on_error == 'stop'):
raise
elif (on_error == 'warn'):
log.warn('Unable to download tarball of %s', package)
else:
raise ValueError('on_error must be one of "stop" and "warn"')
pc.apply(download_with_args)
def upload(self, package_name):
'\n Upload a package to the Sage mirror network\n\n $ sage --package upload pari\n Uploading /home/vbraun/Code/sage.git/upstream/pari-2.8-2044-g89b0f1e.tar.gz\n '
package = Package(package_name)
if (not os.path.exists(package.tarball.upstream_fqn)):
log.debug('Skipping %s because there is no local tarball', package_name)
return
if (not package.tarball.is_distributable()):
log.info('Skipping %s because the tarball is marked as not distributable', package_name)
return
log.info('Uploading %s', package.tarball.upstream_fqn)
fs = FileServer()
fs.upload(package)
def upload_cls(self, package_name_or_class):
pc = PackageClass(package_name_or_class)
pc.apply(self.upload)
fs = FileServer()
log.info('Publishing')
fs.publish()
def fix_checksum_cls(self, *package_classes):
'\n Fix the checksum of packages\n\n $ sage --package fix-checksum\n '
pc = PackageClass(*package_classes, has_files=['checksums.ini'])
pc.apply(self.fix_checksum)
def fix_checksum(self, package_name):
'\n Fix the checksum of a package\n\n $ sage --package fix-checksum pari\n Updating checksum of pari-2.8-2044-g89b0f1e.tar.gz\n '
log.debug('Correcting the checksum of %s', package_name)
update = ChecksumUpdater(package_name)
pkg = update.package
if (not pkg.tarball_filename):
log.info('Ignoring {0} because it is not a normal package'.format(package_name))
return
if (not os.path.exists(pkg.tarball.upstream_fqn)):
log.info('Ignoring {0} because tarball is not cached'.format(package_name))
return
if pkg.tarball.checksum_verifies():
log.info('Checksum of {0} (tarball {1}) unchanged'.format(package_name, pkg.tarball_filename))
else:
log.info('Updating checksum of {0} (tarball {1})'.format(package_name, pkg.tarball_filename))
update.fix_checksum()
def create(self, package_name, version=None, tarball=None, pkg_type=None, upstream_url=None, description=None, license=None, upstream_contact=None, pypi=False, source=None):
'\n Create a package\n\n $ sage --package create foo --version 1.3 --tarball FoO-VERSION.tar.gz --type experimental\n\n $ sage --package create scikit_spatial --pypi --type optional\n\n $ sage --package create torch --pypi --source pip --type optional\n\n $ sage --package create jupyterlab_markup --pypi --source wheel --type optional\n '
if ('-' in package_name):
raise ValueError('package names must not contain dashes, use underscore instead')
if pypi:
if (source is None):
try:
if PyPiVersion(package_name, source='wheel').tarball.endswith('-none-any.whl'):
source = 'wheel'
else:
source = 'normal'
except PyPiError:
source = 'normal'
pypi_version = PyPiVersion(package_name, source=source)
if (source == 'normal'):
if (not tarball):
tarball = pypi_version.tarball.replace(pypi_version.version, 'VERSION')
if (not version):
version = pypi_version.version
upstream_url = 'https://pypi.io/packages/source/{0:1.1}/{0}/{1}'.format(package_name, tarball)
elif (source == 'wheel'):
if (not tarball):
tarball = pypi_version.tarball.replace(pypi_version.version, 'VERSION')
if (not tarball.endswith('-none-any.whl')):
raise ValueError('Only platform-independent wheels can be used for wheel packages, got {0}'.format(tarball))
if (not version):
version = pypi_version.version
upstream_url = 'https://pypi.io/packages/{2}/{0:1.1}/{0}/{1}'.format(package_name, tarball, pypi_version.python_version)
if (not description):
description = pypi_version.summary
if (not license):
license = pypi_version.license
if (not upstream_contact):
upstream_contact = pypi_version.package_url
if (upstream_url and (not tarball)):
tarball = upstream_url.rpartition('/')[2]
if (tarball and (source is None)):
source = 'normal'
if (tarball and (not pkg_type)):
pkg_type = 'optional'
log.debug('Creating %s: %s, %s, %s', package_name, version, tarball, pkg_type)
creator = PackageCreator(package_name)
if version:
creator.set_version(version)
if pkg_type:
creator.set_type(pkg_type)
if (description or license or upstream_contact):
creator.set_description(description, license, upstream_contact)
if (pypi or (source == 'pip')):
creator.set_python_data_and_scripts(pypi_package_name=pypi_version.name, source=source)
if tarball:
creator.set_tarball(tarball, upstream_url)
if (upstream_url and version):
update = PackageUpdater(package_name, version)
update.download_upstream()
else:
update = ChecksumUpdater(package_name)
update.fix_checksum()
def clean(self):
'\n Remove outdated source tarballs from the upstream/ directory\n\n $ sage --package clean\n 42 files were removed from the .../upstream directory\n '
log.debug('Cleaning upstream/ directory')
package_names = PackageClass(':all:').names
keep = [Package(package_name).tarball.filename for package_name in package_names]
count = 0
for filename in os.listdir(SAGE_DISTFILES):
if (filename not in keep):
filepath = os.path.join(SAGE_DISTFILES, filename)
if os.path.isfile(filepath):
log.debug('Removing file {}'.format(filepath))
os.remove(filepath)
count += 1
print('{} files were removed from the {} directory'.format(count, SAGE_DISTFILES))
|
def UNSIGNED(n):
return (n & 4294967295)
|
class CksumAlgorithm(object):
def __init__(self):
self._value = 0
self._length = 0
def update(self, buffer):
value = self._value
if isinstance(buffer, str):
buffer = list(map(ord, list(buffer)))
else:
buffer = list(buffer)
for ch in buffer:
tabidx = ((value >> 24) ^ ch)
value = (UNSIGNED((value << 8)) ^ crctab[tabidx])
self._value = value
self._length += len(buffer)
def get_value(self):
n = self._length
value = self._value
while n:
c = (n & 255)
n = (n >> 8)
value = (UNSIGNED((value << 8)) ^ crctab[((value >> 24) ^ c)])
return UNSIGNED((~ value))
def hexdigest(self):
return str(self.get_value())
|
def make_parser():
'\n The main commandline argument parser\n '
parser = argparse.ArgumentParser(description=description, epilog=epilog, prog='sage --package', formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--log', dest='log', default=None, help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')
subparsers = parser.add_subparsers(dest='subcommand')
parser_config = subparsers.add_parser('config', epilog=epilog_config, formatter_class=argparse.RawDescriptionHelpFormatter, help='Print the configuration')
parser_list = subparsers.add_parser('list', epilog=epilog_list, formatter_class=argparse.RawDescriptionHelpFormatter, help='Print a list of packages known to Sage')
parser_list.add_argument('package_class', metavar='[package_name|:package_type:]', type=str, default=[':all-or-nothing:'], nargs='*', help='package name or designator for all packages of a given type (one of :all:, :standard:, :optional:, and :experimental:); default: :all: (or nothing when --include-dependencies or --exclude-dependencies is given')
parser_list.add_argument('--has-file', action='append', default=[], metavar='FILENAME', dest='has_files', help='only include packages that have this file in their metadata directory (examples: SPKG.rst, spkg-configure.m4, distros/debian.txt, spkg-install|spkg-install.in)')
parser_list.add_argument('--no-file', action='append', default=[], metavar='FILENAME', dest='no_files', help='only include packages that do not have this file in their metadata directory (examples: huge, patches, huge|has_nonfree_dependencies)')
parser_list.add_argument('--exclude', nargs='*', action='append', default=[], metavar='PACKAGE_NAME', help='exclude package from list')
parser_list.add_argument('--include-dependencies', action='store_true', help='include (ordinary) dependencies of the packages recursively')
parser_list.add_argument('--exclude-dependencies', action='store_true', help='exclude (ordinary) dependencies of the packages recursively')
parser_name = subparsers.add_parser('name', epilog=epilog_name, formatter_class=argparse.RawDescriptionHelpFormatter, help='Find the package name given a tarball filename')
parser_name.add_argument('tarball_filename', type=str, help='Tarball filename')
parser_tarball = subparsers.add_parser('tarball', epilog=epilog_tarball, formatter_class=argparse.RawDescriptionHelpFormatter, help='Find the tarball filename given a package name')
parser_tarball.add_argument('package_name', type=str, help='Package name')
parser_apropos = subparsers.add_parser('apropos', epilog=epilog_apropos, formatter_class=argparse.RawDescriptionHelpFormatter, help='Find up to 5 package names that are close to the given name')
parser_apropos.add_argument('incorrect_name', type=str, help='Fuzzy name to search for')
parser_update = subparsers.add_parser('update', epilog=epilog_update, formatter_class=argparse.RawDescriptionHelpFormatter, help='Update a package. This modifies the Sage sources.')
parser_update.add_argument('package_name', type=str, help='Package name')
parser_update.add_argument('new_version', type=str, help='New version')
parser_update.add_argument('--url', type=str, default=None, help='Download URL')
parser_update.add_argument('--commit', action='store_true', help='Whether to run "git commit"')
parser_update_latest = subparsers.add_parser('update-latest', epilog=epilog_update_latest, formatter_class=argparse.RawDescriptionHelpFormatter, help='Update a package to the latest version. This modifies the Sage sources.')
parser_update_latest.add_argument('package_name', type=str, help='Package name (:all: for all packages)')
parser_update_latest.add_argument('--commit', action='store_true', help='Whether to run "git commit"')
parser_download = subparsers.add_parser('download', epilog=epilog_download, formatter_class=argparse.RawDescriptionHelpFormatter, help='Download tarball')
parser_download.add_argument('package_name', type=str, help='Package name or :type:')
parser_download.add_argument('--allow-upstream', action='store_true', help='Whether to fall back to downloading from the upstream URL')
parser_download.add_argument('--on-error', choices=['stop', 'warn'], default='stop', help='What to do if the tarball cannot be downloaded')
parser.add_argument('--no-check-certificate', action='store_true', help='Do not check SSL certificates for https connections')
parser_upload = subparsers.add_parser('upload', epilog=epilog_upload, formatter_class=argparse.RawDescriptionHelpFormatter, help='Upload tarball to Sage mirrors')
parser_upload.add_argument('package_name', type=str, help='Package name or :type:')
parser_fix_checksum = subparsers.add_parser('fix-checksum', epilog=epilog_fix_checksum, formatter_class=argparse.RawDescriptionHelpFormatter, help='Fix the checksum of normal packages.')
parser_fix_checksum.add_argument('package_class', metavar='[package_name|:package_type:]', type=str, default=[':all:'], nargs='*', help='package name or designator for all packages of a given type (one of :all:, :standard:, :optional:, and :experimental:); default: :all:')
parser_create = subparsers.add_parser('create', epilog=epilog_create, formatter_class=argparse.RawDescriptionHelpFormatter, help='Create or overwrite package.')
parser_create.add_argument('package_name', default=None, type=str, help='Package name.')
parser_create.add_argument('--source', type=str, default=None, help='Package source (one of normal, wheel, script, pip); default depends on provided arguments')
parser_create.add_argument('--version', type=str, default=None, help='Package version')
parser_create.add_argument('--tarball', type=str, default=None, help='Tarball filename pattern, e.g. Foo-VERSION.tar.bz2')
parser_create.add_argument('--type', type=str, default=None, help='Package type')
parser_create.add_argument('--url', type=str, default=None, help='Download URL pattern, e.g. http://example.org/Foo-VERSION.tar.bz2')
parser_create.add_argument('--description', type=str, default=None, help='Short description of the package (for SPKG.rst)')
parser_create.add_argument('--license', type=str, default=None, help='License of the package (for SPKG.rst)')
parser_create.add_argument('--upstream-contact', type=str, default=None, help='Upstream contact (for SPKG.rst)')
parser_create.add_argument('--pypi', action='store_true', help='Create a package for a Python package available on PyPI')
parser_clean = subparsers.add_parser('clean', epilog=epilog_clean, formatter_class=argparse.RawDescriptionHelpFormatter, help='Remove outdated source tarballs from the upstream/ directory')
return parser
|
def run():
parser = make_parser()
if (len(sys.argv) == 1):
parser.print_help()
return
args = parser.parse_args(sys.argv[1:])
if (args.log is not None):
level = getattr(logging, args.log.upper())
log.setLevel(level=level)
log.debug('Commandline arguments: %s', args)
app = Application()
if (args.subcommand == 'config'):
app.config()
elif (args.subcommand == 'list'):
if (args.package_class == [':all-or-nothing:']):
if (args.include_dependencies or args.exclude_dependencies):
args.package_class = []
else:
args.package_class = [':all:']
app.list_cls(*args.package_class, has_files=args.has_files, no_files=args.no_files, exclude=args.exclude, include_dependencies=args.include_dependencies, exclude_dependencies=args.exclude_dependencies)
elif (args.subcommand == 'name'):
app.name(args.tarball_filename)
elif (args.subcommand == 'tarball'):
app.tarball(args.package_name)
elif (args.subcommand == 'apropos'):
app.apropos(args.incorrect_name)
elif (args.subcommand == 'update'):
app.update(args.package_name, args.new_version, url=args.url, commit=args.commit)
elif (args.subcommand == 'update-latest'):
app.update_latest_cls(args.package_name, commit=args.commit)
elif (args.subcommand == 'download'):
if args.no_check_certificate:
try:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError:
pass
app.download_cls(args.package_name, allow_upstream=args.allow_upstream, on_error=args.on_error)
elif (args.subcommand == 'create'):
app.create(args.package_name, args.version, args.tarball, args.type, args.url, args.description, args.license, args.upstream_contact, pypi=args.pypi, source=args.source)
elif (args.subcommand == 'upload'):
app.upload_cls(args.package_name)
elif (args.subcommand == 'fix-checksum'):
app.fix_checksum_cls(*args.package_class)
elif (args.subcommand == 'clean'):
app.clean()
else:
raise RuntimeError('unknown subcommand: {0}'.format(args))
|
class Configuration(object):
_initialized = False
log = 'info'
interactive = os.isatty(sys.stdout.fileno())
def __init__(self):
if (not Configuration._initialized):
Configuration._init_from_environ()
if (self.log not in LOG_LEVELS):
raise ValueError('invalid log level: {0}'.format(self.log))
assert isinstance(self.interactive, bool)
@classmethod
def _init_from_environ(cls):
env = os.environ.get('SAGE_BOOTSTRAP', '').lower()
for pair in env.split(','):
if (not pair.strip()):
continue
(key, value) = pair.split(':', 1)
key = key.strip()
value = value.strip()
if (key == 'log'):
cls.log = value
elif (key == 'interactive'):
if (value == 'true'):
cls.interactive = True
elif (value == 'false'):
cls.interactive = False
else:
raise ValueError('interactive value must be "true" or "false", got "{0}"'.format(value))
else:
raise ValueError('unknown key: "{0}"'.format(key))
cls._initialized = True
def __repr__(self):
return '\n'.join(['Configuration:', ' * log = {0}'.format(self.log), ' * interactive = {0}'.format(self.interactive)])
|
class PackageCreator(object):
def __init__(self, package_name):
self.package_name = package_name
self.path = os.path.join(SAGE_ROOT, 'build', 'pkgs', package_name)
try:
os.mkdir(self.path)
except OSError:
pass
def set_version(self, version):
'\n Write the version to ``package-version.txt``\n '
with open(os.path.join(self.path, 'package-version.txt'), 'w+') as f:
f.write(version)
f.write('\n')
def set_type(self, pkg_type):
'\n Write the package type to ``type``\n '
with open(os.path.join(self.path, 'type'), 'w+') as f:
f.write(pkg_type)
f.write('\n')
def set_tarball(self, tarball, upstream_url):
'\n Write the tarball name pattern to ``checksums.ini``\n '
with open(os.path.join(self.path, 'checksums.ini'), 'w+') as f:
f.write('tarball={0}'.format(tarball))
f.write('\n')
if upstream_url:
f.write('upstream_url={0}'.format(upstream_url))
f.write('\n')
def set_description(self, description, license, upstream_contact):
'\n Write the ``SPKG.rst`` file\n '
with open(os.path.join(self.path, 'SPKG.rst'), 'w+') as f:
def heading(title, char='-'):
return '{0}\n{1}\n\n'.format(title, (char * len(title)))
if description:
title = '{0}: {1}'.format(self.package_name, description)
else:
title = self.package_name
f.write(heading(title, '='))
f.write(heading('Description'))
if description:
f.write('{0}\n\n'.format(description))
f.write(heading('License'))
if license:
f.write('{0}\n\n'.format(license))
f.write(heading('Upstream Contact'))
if upstream_contact:
f.write('{0}\n\n'.format(upstream_contact))
def set_python_data_and_scripts(self, pypi_package_name=None, source='normal'):
'\n Write the file ``dependencies`` and other files for Python packages.\n\n If ``source`` is ``"normal"``, write the files ``spkg-install.in`` and\n ``install-requires.txt``.\n\n If ``source`` is ``"wheel"``, write the file ``install-requires.txt``.\n\n If ``source`` is ``"pip"``, write the file ``requirements.txt``.\n '
if (pypi_package_name is None):
pypi_package_name = self.package_name
with open(os.path.join(self.path, 'dependencies'), 'w+') as f:
f.write(' | $(PYTHON_TOOLCHAIN) $(PYTHON)\n\n')
f.write('----------\nAll lines of this file are ignored except the first.\n')
if (source == 'normal'):
with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:
f.write('cd src\nsdh_pip_install .\n')
with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:
f.write('{0}\n'.format(pypi_package_name))
elif (source == 'wheel'):
with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:
f.write('{0}\n'.format(pypi_package_name))
elif (source == 'pip'):
with open(os.path.join(self.path, 'requirements.txt'), 'w+') as f:
f.write('{0}\n'.format(pypi_package_name))
elif (source == 'script'):
pass
else:
raise ValueError('package source must be one of normal, script, pip, or wheel')
|
class Application(object):
def __init__(self, timeout, quiet):
import socket
socket.setdefaulttimeout(timeout)
self.quiet = quiet
def print_fastest_mirror(self):
print(MirrorList().fastest)
def download_url(self, url, destination):
Download(url, destination, progress=(not self.quiet), ignore_errors=False).run()
def download_tarball(self, tarball_filename, destination=None, allow_upstream=False):
tarball = Tarball(tarball_filename)
tarball.download(allow_upstream=allow_upstream)
if (destination is not None):
tarball.save_as(destination)
|
def make_parser():
'\n The commandline argument parser for sage-download-file\n '
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--log', dest='log', default=None, help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')
parser.add_argument('--print-fastest-mirror', action='store_true', help='Print out the fastest mirror. All other arguments are ignored in that case.')
parser.add_argument('--quiet', action='store_true', help='Hide progress bar')
parser.add_argument('--timeout', type=float, default=None, help='Timeout for network operations')
parser.add_argument('--allow-upstream', action='store_true', help='Whether to fall back to downloading from the upstream URL')
parser.add_argument('url_or_tarball', type=str, nargs='?', default=None, help='A http:// url or a tarball filename. In the latter case, the\n tarball is downloaded from the mirror network and its checksum\n is verified.')
parser.add_argument('destination', type=str, nargs='?', default=None, help='Where to write the file. If the destination is not specified, a url\n will be downloaded and the content written to stdout and a\n tarball will be saved under {SAGE_DISTFILES}'.format(SAGE_DISTFILES=SAGE_DISTFILES))
parser.add_argument('--no-check-certificate', action='store_true', help='Do not check SSL certificates for https connections')
return parser
|
def run():
parser = make_parser()
if (len(sys.argv) == 1):
parser.print_help()
return
args = parser.parse_args(sys.argv[1:])
if (args.log is not None):
level = getattr(logging, args.log.upper())
log.setLevel(level=level)
log.debug('Commandline arguments: %s', args)
if args.no_check_certificate:
try:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError:
pass
app = Application(timeout=args.timeout, quiet=args.quiet)
if ((not args.print_fastest_mirror) and (args.url_or_tarball is None)):
parser.print_help()
print('')
print('error: either --print-fastest-mirror or url_or_tarball is required')
sys.exit(2)
if args.print_fastest_mirror:
app.print_fastest_mirror()
elif is_url(args.url_or_tarball):
app.download_url(args.url_or_tarball, args.destination)
else:
app.download_tarball(args.url_or_tarball, args.destination, args.allow_upstream)
|
def format_error(message):
stars = (('*' * 72) + '\n')
sys.stderr.write(stars)
try:
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.write(stars)
except BaseException:
pass
sys.stderr.write(message)
sys.stderr.write(stars)
|
def run_safe():
try:
run()
except Exception as error:
try:
format_error(error)
finally:
sys.exit(1)
|
def try_lock(fd, operation):
'\n Try flock() but ignore ``ENOLCK`` errors, which could happen if the\n file system does not support locking.\n '
try:
flock(fd, operation)
except IOError as e:
if (e.errno != ENOLCK):
raise
|
class MirrorListException(RuntimeError):
pass
|
class MirrorList(object):
def __init__(self):
self.sources = []
upstream_d = os.path.join(SAGE_ROOT, '.upstream.d')
for fname in sorted(os.listdir(upstream_d)):
if (('~' in fname) or ('#' in fname)):
continue
try:
with open(os.path.join(upstream_d, fname), 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
if (not line):
continue
line = line.replace('${SAGE_ROOT}', SAGE_ROOT)
line = line.replace('${SAGE_DISTFILES}', SAGE_DISTFILES)
if ('${SAGE_SERVER}' in line):
SAGE_SERVER = os.environ.get('SAGE_SERVER', '')
if (not SAGE_SERVER):
continue
line = line.replace('${SAGE_SERVER}', SAGE_SERVER)
if line.endswith('mirror_list'):
cache_filename = os.path.join(SAGE_DISTFILES, line.rpartition('/')[2])
self.sources.append(MirrorList_from_url(line, cache_filename))
else:
self.sources.append([line])
except IOError:
pass
def __iter__(self):
'\n Iterate through the list of mirrors.\n\n This is the main entry point into the mirror list. Every\n script should just use this function to try mirrors in order\n of preference. This will not just yield the official mirrors,\n but also urls for packages that are currently being tested.\n '
for source in self.sources:
for mirror in source:
(yield mirror)
|
class MirrorList_from_url(object):
MAXAGE = ((24 * 60) * 60)
def __init__(self, url, filename):
self.url = url
self.filename = filename
self._mirrors = None
@property
def mirrors(self):
if (self._mirrors is not None):
return self._mirrors
try:
self.mirrorfile = open(self.filename, 'r+t')
except IOError:
self.mirrorfile = open(self.filename, 'w+t')
with self.mirrorfile:
self.mirrorfd = self.mirrorfile.fileno()
try_lock(self.mirrorfd, LOCK_SH)
if self._must_refresh():
try_lock(self.mirrorfd, LOCK_EX)
if self._must_refresh():
self._refresh()
if (self._mirrors is None):
self._mirrors = self._load()
return self._mirrors
def _load(self, mirror_list=None):
'\n Load and return `mirror_list` (defaults to the one on disk) as\n a list of strings\n '
if (mirror_list is None):
try:
self.mirrorfile.seek(0)
mirror_list = self.mirrorfile.read()
except IOError:
log.critical('Failed to load the cached mirror list')
return []
if (mirror_list == ''):
return []
import ast
try:
return ast.literal_eval(mirror_list)
except SyntaxError:
log.critical('Downloaded mirror list has syntax error: {0}'.format(mirror_list))
return []
def _save(self):
'\n Save the mirror list for (short-term) future use.\n '
self.mirrorfile.seek(0)
self.mirrorfile.write(repr(self.mirrors))
self.mirrorfile.truncate()
self.mirrorfile.flush()
def _port_of_mirror(self, mirror):
if mirror.startswith('http://'):
return 80
if mirror.startswith('https://'):
return 443
if mirror.startswith('ftp://'):
return 21
return 80
def _rank_mirrors(self):
'\n Sort the mirrors by speed, fastest being first\n\n This method is used by the YUM fastestmirror plugin\n '
timed_mirrors = []
import time, socket
log.info('Searching fastest mirror')
timeout = 1
for mirror in self.mirrors:
if (not mirror.startswith('http')):
log.debug('we currently can only handle http, got %s', mirror)
continue
port = self._port_of_mirror(mirror)
mirror_hostname = urlparse.urlsplit(mirror).netloc
time_before = time.time()
try:
sock = socket.create_connection((mirror_hostname, port), timeout)
sock.close()
except (IOError, socket.error, socket.timeout) as err:
log.warning(((str(err).strip() + ': ') + mirror))
continue
result = (time.time() - time_before)
result_ms = int((1000 * result))
log.info(((str(result_ms).rjust(5) + 'ms: ') + mirror))
timed_mirrors.append((result, mirror))
timed_mirrors.sort()
if ((len(timed_mirrors) >= 5) and (timed_mirrors[4][0] < 0.3)):
break
if (len(timed_mirrors) == 0):
if ('http_proxy' not in os.environ):
log.error('Could not reach any mirror directly and no proxy set')
raise MirrorListException('Failed to connect to any mirror, probably no internet connection')
log.info('Cannot time mirrors via proxy, using default order')
else:
self._mirrors = [m[1] for m in timed_mirrors]
log.info(('Fastest mirror: ' + self.fastest))
def _age(self):
'\n Return the age of the cached mirror list in seconds\n '
import time
mtime = os.fstat(self.mirrorfd).st_mtime
now = time.mktime(time.localtime())
return (now - mtime)
def _must_refresh(self):
'\n Return whether we must download the mirror list.\n\n If and only if this method returns ``False`` is it admissible\n to use the cached mirror list.\n '
if (os.fstat(self.mirrorfd).st_size == 0):
return True
return (self._age() > self.MAXAGE)
def _refresh(self):
'\n Download and rank the mirror list.\n '
log.info('Downloading the Sage mirror list')
try:
with contextlib.closing(urllib.urlopen(self.url)) as f:
mirror_list = f.read().decode('ascii')
except IOError:
log.critical('Downloading the mirror list failed, using cached version')
else:
self._mirrors = self._load(mirror_list)
self._rank_mirrors()
self._save()
def __iter__(self):
'\n Iterate through the list of mirrors.\n\n This is the main entry point into the mirror list. Every\n script should just use this function to try mirrors in order\n of preference. This will not just yield the official mirrors,\n but also urls for packages that are currently being tested.\n '
try:
(yield os.environ['SAGE_SERVER'])
except KeyError:
pass
for mirror in self.mirrors:
if (not mirror.endswith('/')):
mirror += '/'
(yield (mirror + '/'.join(['spkg', 'upstream', '${SPKG}'])))
@property
def fastest(self):
return next(iter(self))
|
class ProgressBar(object):
'\n Progress bar as urllib reporthook\n '
def __init__(self, stream, length=70):
self.length = length
self.progress = 0
self.stream = stream
def start(self):
flush()
self.stream.write('[')
self.stream.flush()
def __call__(self, chunks_so_far, chunk_size, total_size):
if (total_size == (- 1)):
n = (0 if (chunks_so_far == 0) else (self.length // 2))
else:
n = (((chunks_so_far * chunk_size) * self.length) // total_size)
if (n > self.length):
return
if (n >= self.progress):
self.stream.write(('.' * (n - self.progress)))
self.stream.flush()
self.progress = n
def stop(self):
missing = ('.' * (self.length - self.progress))
self.stream.write((missing + ']\n'))
self.stream.flush()
def error_stop(self):
missing = ('x' * (self.length - self.progress))
self.stream.write((missing + ']\n'))
self.stream.flush()
|
class DownloadError(IOError):
pass
|
class Download(object):
'\n Download URL\n \n Right now, only via HTTP\n\n This should work for FTP as well but, in fact, hangs on python <\n 3.4, see http://bugs.python.org/issue16270\n\n INPUT:\n\n - ``url`` -- string. The URL to download.\n\n - ``destination`` -- string or ``None`` (default). The destination\n file name to save to. If not specified, the file is written to\n stdout.\n\n - ``progress`` -- boolean (default: ``True``). Whether to print a\n progress bar to stderr. For testing, this can also be a stream\n to which the progress bar is being sent.\n\n - ``ignore_errors`` -- boolean (default: ``False``). Catch network\n errors (a message is still being logged).\n '
def __init__(self, url, destination=None, progress=True, ignore_errors=False):
self.url = url
self.destination = (destination or '/dev/stdout')
self.progress = (progress is not False)
self.progress_stream = (sys.stderr if isinstance(progress, bool) else progress)
self.ignore_errors = ignore_errors
def http_error_default(self, url, fp, errcode, errmsg, headers):
'\n Callback for the URLopener to raise an exception on HTTP errors\n '
fp.close()
raise DownloadError(errcode, errmsg, url)
def start_progress_bar(self):
if self.progress:
self.progress_bar = ProgressBar(self.progress_stream)
self.progress_bar.start()
def success_progress_bar(self):
if self.progress:
self.progress_bar.stop()
def error_progress_bar(self):
if self.progress:
self.progress_bar.error_stop()
def run(self):
opener = urllib.FancyURLopener()
opener.http_error_default = self.http_error_default
self.start_progress_bar()
try:
if self.progress:
(filename, info) = opener.retrieve(self.url, self.destination, self.progress_bar)
else:
(filename, info) = opener.retrieve(self.url, self.destination)
except IOError as error:
self.error_progress_bar()
log.error(error)
if (not self.ignore_errors):
raise error
self.success_progress_bar()
|
class PackageClass(object):
def __init__(self, *package_names_or_classes, **filters):
self.__names = set()
exclude = filters.pop('exclude', ())
include_dependencies = filters.pop('include_dependencies', False)
exclude_dependencies = filters.pop('exclude_dependencies', False)
filenames = filters.pop('has_files', [])
no_filenames = filters.pop('no_files', [])
excluded = []
for package_names in exclude:
excluded.extend(package_names)
if filters:
raise ValueError('filter not supported')
def included_in_filter(pkg):
if (not all((any((pkg.has_file(filename) for filename in filename_disjunction.split('|'))) for filename_disjunction in filenames))):
return False
return (not any((any((pkg.has_file(filename) for filename in no_filename_disjunction.split('|'))) for no_filename_disjunction in no_filenames)))
for package_name_or_class in package_names_or_classes:
if (package_name_or_class == ':all:'):
self._init_all(predicate=included_in_filter)
elif (package_name_or_class == ':standard:'):
self._init_standard(predicate=included_in_filter)
elif (package_name_or_class == ':optional:'):
self._init_optional(predicate=included_in_filter)
elif (package_name_or_class == ':experimental:'):
self._init_experimental(predicate=included_in_filter)
else:
if (':' in package_name_or_class):
raise ValueError('a colon may only appear in designators of package types, which must be one of :all:, :standard:, :optional:, or :experimental:got {}'.format(package_name_or_class))
self.__names.add(package_name_or_class)
def include_recursive_dependencies(names, package_name):
if (package_name in names):
return
try:
pkg = Package(package_name)
except FileNotFoundError:
return
names.add(package_name)
for dependency in pkg.dependencies:
include_recursive_dependencies(names, dependency)
if include_dependencies:
package_names = set()
for name in self.__names:
include_recursive_dependencies(package_names, name)
self.__names = package_names
def exclude_recursive_dependencies(names, package_name):
try:
pkg = Package(package_name)
except FileNotFoundError:
return
for dependency in pkg.dependencies:
names.discard(dependency)
exclude_recursive_dependencies(names, dependency)
if exclude_dependencies:
for name in list(self.__names):
exclude_recursive_dependencies(self.__names, name)
self.__names.difference_update(excluded)
@property
def names(self):
return sorted(self.__names)
def _init_all(self, predicate):
self.__names.update((pkg.name for pkg in Package.all() if predicate(pkg)))
def _init_standard(self, predicate):
self.__names.update((pkg.name for pkg in Package.all() if ((pkg.type == 'standard') and predicate(pkg))))
def _init_optional(self, predicate):
self.__names.update((pkg.name for pkg in Package.all() if ((pkg.type == 'optional') and predicate(pkg))))
def _init_experimental(self, predicate):
self.__names.update((pkg.name for pkg in Package.all() if ((pkg.type == 'experimental') and predicate(pkg))))
def apply(self, function, *args, **kwds):
for package_name in self.names:
function(package_name, *args, **kwds)
|
class FileServer(object):
def __init__(self):
self.user = 'sagemath'
self.hostname = 'fileserver.sagemath.org'
def upstream_directory(self, package):
'\n Return the directory where the tarball resides on the server\n '
return os.path.join('/', 'data', 'files', 'spkg', 'upstream', package.name)
def upload(self, package):
'\n Upload the current tarball of package\n '
if (not package.tarball.is_distributable()):
raise ValueError('Tarball of {} is marked as not distributable'.format(package))
subprocess.check_call(['ssh', 'sagemath@fileserver.sagemath.org', 'mkdir -p {0} && touch {0}/index.html'.format(self.upstream_directory(package))])
subprocess.check_call(['rsync', '-av', '--checksum', '-e', 'ssh -l sagemath', package.tarball.upstream_fqn, 'fileserver.sagemath.org:{0}'.format(self.upstream_directory(package))])
def publish(self):
'\n Publish the files\n '
subprocess.check_call(['ssh', 'sagemath@fileserver.sagemath.org', './publish-files.sh'])
|
class FileType(argparse.FileType):
'\n Version of argparse.FileType with the option to ensure that the full path\n to the file exists.\n '
def __init__(self, mode='r', makedirs=False):
super(FileType, self).__init__(mode=mode)
self._makedirs = makedirs
def __call__(self, string):
if (self._makedirs and (string != '-')):
dirname = os.path.dirname(string)
try:
os.makedirs(dirname)
except OSError as exc:
if (not os.path.isdir(dirname)):
raise argparse.ArgumentTypeError("can't create '{0}': {1}".format(dirname, exc))
return super(FileType, self).__call__(string)
|
class IntOrFileType(FileType):
'\n Like FileType but also accepts an int (e.g. for a file descriptor).\n '
def __call__(self, string):
try:
return int(string)
except ValueError:
return super(IntOrFileType, self).__call__(string)
|
def run(argv=None):
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--shared', action='store_true', help='create a shared lock')
group.add_argument('-x', '--exclusive', action='store_true', help='create an exclusive lock (the default)')
group.add_argument('-u', '--unlock', action='store_true', help='remove an existing lock')
parser.add_argument('lock', metavar='LOCK', type=IntOrFileType('w+', makedirs=True), help='filename of the lock an integer file descriptor')
parser.add_argument('command', metavar='COMMAND', nargs=argparse.REMAINDER, help='command to run with the lock including any arguments to that command')
args = parser.parse_args(argv)
if args.shared:
locktype = fcntl.LOCK_SH
elif args.unlock:
locktype = fcntl.LOCK_UN
else:
locktype = fcntl.LOCK_EX
lock = args.lock
command = args.command
if (isinstance(lock, int) and command):
parser.error('sage-flock does not accept a command when passed a file descriptor number')
try:
fcntl.flock(lock, (locktype | fcntl.LOCK_NB))
except IOError as exc:
if (locktype == fcntl.LOCK_SH):
kind = 'shared'
elif (locktype == fcntl.LOCK_UN):
sys.stderr.write('Unexpected error trying to unlock fd: {0}\n'.format(exc))
return 1
else:
kind = 'exclusive'
sys.stderr.write('Waiting for {0} lock to run {1} ... '.format(kind, ' '.join((pipes.quote(arg) for arg in command))))
fcntl.flock(lock, locktype)
sys.stderr.write('ok\n')
if (not (args.unlock or isinstance(lock, int))):
os.execvp(command[0], command)
|
def check_lib_auditwheel(f, verbose=False):
from auditwheel.lddtree import lddtree
for (lib, info) in lddtree(f)['libs'].items():
if verbose:
print('- {0}: {1}'.format(lib, info['realpath']), file=sys.stderr)
if (info['realpath'] is None):
raise RuntimeError('Shared library {0} needed by {1} is not found'.format(lib, f))
|
def installcheck(spkg_name, sage_local, verbose=False):
'\n Given a package name and path to an installation tree (SAGE_LOCAL or SAGE_VENV),\n check the installation of the package in that tree.\n '
spkg_inst = pth.join(sage_local, 'var', 'lib', 'sage', 'installed')
pattern = pth.join(spkg_inst, '{0}-*'.format(spkg_name))
stamp_files = sorted(glob.glob(pattern), key=pth.getmtime)
if stamp_files:
stamp_file = stamp_files[(- 1)]
else:
stamp_file = None
spkg_meta = {}
if stamp_file:
try:
with open(stamp_file) as f:
spkg_meta = json.load(f)
except (OSError, ValueError):
pass
if ('files' not in spkg_meta):
if stamp_file:
print("Old-style or corrupt stamp file '{0}'".format(stamp_file), file=sys.stderr)
else:
print("Package '{0}' is currently not installed in '{1}'".format(spkg_name, sage_local), file=sys.stderr)
else:
files = spkg_meta['files']
for f in files:
f = os.path.join(sage_local, f)
if f.endswith(('.so', '.dylib')):
if verbose:
print("Checking shared library file '{0}'".format(f), file=sys.stderr)
if (sys.platform == 'darwin'):
try:
from delocate.libsana import _tree_libs_from_libraries, _filter_system_libs
except ImportError:
warnings.warn('delocate is not available, so nothing is actually checked')
else:
_tree_libs_from_libraries([f], lib_filt_func=_filter_system_libs, copy_filt_func=(lambda path: True))
else:
try:
check_lib_auditwheel(f, verbose=False)
except ImportError:
warnings.warn('auditwheel is not available, so nothing is actually checked')
elif f.endswith('-any.whl'):
pass
elif f.endswith('.whl'):
if verbose:
print("Checking wheel file '{0}'".format(f), file=sys.stderr)
if (sys.platform == 'darwin'):
try:
from delocate import wheel_libs
except ImportError:
warnings.warn('delocate is not available, so nothing is actually checked')
else:
wheel_libs(f)
else:
try:
from delocate.tmpdirs import TemporaryDirectory
from delocate.tools import zip2dir
except ImportError:
warnings.warn('delocate is not available, so nothing is actually checked')
else:
try:
with TemporaryDirectory() as tmpdir:
zip2dir(f, tmpdir)
for (dirpath, dirnames, basenames) in os.walk(tmpdir):
for base in basenames:
if base.endswith('.so'):
depending_path = os.path.realpath(os.path.join(dirpath, base))
check_lib_auditwheel(depending_path, verbose=False)
except ImportError:
warnings.warn('auditwheel is not available, so nothing is actually checked')
|
def dir_type(path):
"\n A custom argument 'type' for directory paths.\n "
if (path and (not pth.isdir(path))):
raise argparse.ArgumentTypeError("'{0}' is not a directory".format(path))
return path
|
def spkg_type(pkg):
"\n A custom argument 'type' for spkgs--checks whether the given package name\n is a known spkg.\n "
pkgbase = pth.join(PKGS, pkg)
if (not pth.isdir(pkgbase)):
raise argparse.ArgumentTypeError("'{0}' is not an spkg listed in '{1}'".format(pkg, PKGS))
return pkg
|
def make_parser():
'Returns the command-line argument parser for sage-spkg-installcheck.'
doc_lines = __doc__.strip().splitlines()
parser = argparse.ArgumentParser(description=doc_lines[0], epilog='\n'.join(doc_lines[1:]).strip(), formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('spkg', type=spkg_type, help='the spkg to check')
parser.add_argument('sage_local', type=dir_type, nargs='?', default=os.environ.get('SAGE_LOCAL'), help='the path of the installation tree (default: the $SAGE_LOCAL environment variable if set)')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output showing all files removed')
parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)
return parser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.