code stringlengths 17 6.64M |
|---|
class SympySGD(SympyPredictingOptimizer):
collect_order = ['v', 'theta']
def __init__(self):
self.theta = Symbol('theta')
self.grad = Symbol('g')
self.weight_decay = 0
self.momentum = Symbol('\\gamma')
self.buff = Symbol('v')
self.lr = Symbol('\\eta')
self.timestep = 0
def step(self):
d_p = tplus_time(self.grad, self.timestep)
if (self.weight_decay != 0):
d_p += (self.weight_decay * self.theta)
self.buff = ((self.buff * self.momentum) + d_p)
self.theta = (self.theta - (self.lr * self.buff))
self.timestep += 1
def prediction(self, nsteps):
buff_hat = self.buff
theta_hat = self.theta
for i in range(1, (nsteps + 1)):
d_p = 0
buff_hat = ((buff_hat * self.momentum) + d_p)
theta_hat = (theta_hat - (self.lr * buff_hat))
return (theta_hat, buff_hat)
|
class WDSympySGD(SympySGD):
def __init__(self):
super().__init__()
self.weight_decay = Symbol('\\lambda')
|
class WDSympySGDMsnag(WDSympySGD):
collect_order = ['v', 'theta', '\\phi']
def __init__(self):
super().__init__()
self.first_grad = Symbol('\\phi')
def prediction(self, nsteps):
buff_hat = self.buff
theta_hat = self.theta
for i in range(1, (nsteps + 1)):
if (i == 1):
d_p = self.first_grad
else:
d_p = 0
if (self.weight_decay != 0):
d_p += (self.weight_decay * theta_hat)
buff_hat = ((buff_hat * self.momentum) + d_p)
theta_hat = (theta_hat - (self.lr * buff_hat))
return (theta_hat, buff_hat)
|
class SympyAdam(SympyPredictingOptimizer):
collect_order = ['v', 'm', 'theta']
def __init__(self):
self.theta = Symbol('theta')
self.grad = Symbol('g')
self.weight_decay = 0
(self.exp_avg, self.exp_avg_sq) = (Symbol('m'), Symbol('v'))
(self.beta1, self.beta2) = (Symbol('\\beta_{1}'), Symbol('\\beta_{2}'))
self.eps = Symbol('\\epsilon')
self.lr = Symbol('\\eta')
self.timestep = 0
def step(self):
d_p = tplus_time(self.grad, self.timestep)
self.timestep += 1
bias_correction1 = (1 - (self.beta1 ** self.timestep))
bias_correction2 = (1 - (self.beta2 ** self.timestep))
self.exp_avg = ((self.beta1 * self.exp_avg) + ((1 - self.beta1) * d_p))
self.exp_avg_sq = ((self.beta2 * self.exp_avg_sq) + ((1 - self.beta2) * (d_p ** 2)))
denom = ((sympy.sqrt(self.exp_avg_sq) / sympy.sqrt(bias_correction2)) + self.eps)
step_size = (self.lr / bias_correction1)
self.theta = (self.theta - (step_size * (self.exp_avg / denom)))
def prediction(self, nsteps):
timestep = self.timestep
beta1 = self.beta1
beta2 = self.beta1
exp_avg = self.exp_avg
exp_avg_sq = self.exp_avg_sq
eps = self.eps
lr = self.lr
theta = self.theta
momentum_coeff = 0
for i in range(1, (nsteps + 1)):
timestep += 1
bias_correction1 = (1 - (beta1 ** timestep))
bias_correction2 = (1 - (beta2 ** timestep))
momentum_coeff += ((sympy.sqrt(bias_correction2) / bias_correction1) * (beta1 ** i))
a = (exp_avg / (sympy.sqrt(exp_avg_sq) + eps))
theta = (theta - ((lr * momentum_coeff) * a))
return (theta, exp_avg)
|
class NormalSympyAdam(SympyAdam):
def __init__(self):
super().__init__()
def prediction(self, nsteps):
d_p = 0
timestep = self.timestep
beta1 = self.beta1
beta2 = self.beta1
exp_avg = self.exp_avg
exp_avg_sq = self.exp_avg_sq
eps = self.eps
lr = self.lr
theta = self.theta
for i in range(1, (nsteps + 1)):
timestep += 1
bias_correction1 = (1 - (beta1 ** timestep))
bias_correction2 = (1 - (beta2 ** timestep))
exp_avg = ((beta1 * exp_avg) + ((1 - beta1) * d_p))
denom = ((sympy.sqrt(exp_avg_sq) / sympy.sqrt(bias_correction2)) + eps)
step_size = (lr / bias_correction1)
theta = (theta - (step_size * (exp_avg / denom)))
return (theta, exp_avg)
|
def run_sim(nsteps, optimizer_cls: SympyPredictingOptimizer=SympySGD, simplify=True):
s1 = optimizer_cls()
s2 = optimizer_cls()
theta_preds = []
theta_true = []
for staleness in range(1, (nsteps + 1)):
s1.step()
theta_true.append(s1.theta)
(theta_hat, _) = s2.prediction(staleness)
theta_preds.append(theta_hat)
gaps = [calc_gap(i, j, simplify=simplify) for (i, j) in zip(theta_true, theta_preds)]
return (theta_true, theta_preds, gaps)
|
def display_sim_resuts(theta_true, theta_preds, gaps, displayer=pprint):
print('True thetas:')
list(map(displayer, theta_true))
print('Theta Predictions:')
list(map(displayer, theta_preds))
print('Gaps')
list(map(displayer, gaps))
|
def run_and_display_sim(nsteps, optimizer_cls=SympySGD, displayer=pprint, simplify=True):
(theta_true, theta_preds, gaps) = run_sim(nsteps, optimizer_cls=optimizer_cls, simplify=simplify)
display_sim_resuts(theta_true, theta_preds, gaps, displayer=displayer)
return (theta_true, theta_preds, gaps)
|
class WeightStashingCachePolicy(Enum):
EVERY_BATCH = auto()
STEP_EVERY = auto()
|
class WeightStasher():
' Helper calss to handle weight stashing\n API:\n Stash during FWD pass:\n stash_current(idx)\n\n Pre backward pass:\n pop_and_load_stashed_params(idx)\n\n Post backward pass:\n # back to true weights\n\n # TODO: look to pipedream implementation and understand if they did something special with the weight decay.\n # TODO: think about batch norm, and similar "dangerous" layers.\n\n '
def __init__(self, optimizer, stage_depth, pipeline_depth, step_every=1, has_weight_predictor=False, true_weights_storage=None, using_clone_weight_predictor=False):
self.optimizer = optimizer
self.theta_buffer = OrderedDict()
self.dirty_mark = OrderedDict()
self.micro_batch = OrderedDict()
self.temporery_short_term_buff = []
self.step_every = step_every
self.is_problematic = False
self.has_weight_predictor = has_weight_predictor
self.using_clone_weight_predictor = using_clone_weight_predictor
self.true_weights_storage = true_weights_storage
if (self.step_every > 1):
self.make_weight_stashing_use_cache(stage_depth, pipeline_depth)
def _weight_stashng_cache_policy(self):
policy = WeightStashingCachePolicy.EVERY_BATCH
if ((self.step_every > 1) and (not self.has_weight_predictor)):
policy = WeightStashingCachePolicy.STEP_EVERY
return policy
def make_weight_stashing_use_cache(self, stage_depth, pipeline_depth, forward=True):
policy = self._weight_stashng_cache_policy()
self.is_problematic = True
se = self.step_every
if (policy == WeightStashingCachePolicy.STEP_EVERY):
if (se >= stage_depth):
def get_micro_batch(self, batch_index):
true_mb = (batch_index % se)
if (true_mb <= (stage_depth + (se - pipeline_depth))):
return true_mb
else:
return (true_mb - ((stage_depth + (se - pipeline_depth)) + 1))
else:
return
elif (policy == WeightStashingCachePolicy.EVERY_BATCH):
def get_micro_batch(self, batch_index):
return (batch_index if (batch_index < se) else 0)
else:
raise NotImplementedError()
if forward:
self.get_micro_batch_forward = types.MethodType(get_micro_batch, self)
else:
raise NotImplementedError()
def get_micro_batch_forward(self, batch_index):
return (batch_index % self.step_every)
def mark_stashed_as_dirty(self):
for i in self.dirty_mark:
self.dirty_mark[i] = True
def _create_current_cloned_buff(self):
if self.using_clone_weight_predictor:
with torch.no_grad():
buff = [[p for p in pg['params']] for pg in self.optimizer.param_groups]
else:
with torch.no_grad():
buff = [[p.detach().clone() for p in pg['params']] for pg in self.optimizer.param_groups]
return buff
def _is_current_last(self, batch_index):
return (next(reversed(self.theta_buffer.keys())) == batch_index)
def stash_current(self, batch_index, expected_updates):
'\n Stashes current weights if we expect updates.\n Also tracks "dirty-ness" of real weights w.r.t given batch_index\n # TODO: option to set dirty right ahead!\n '
if (expected_updates > 0):
micro_batch = self.get_micro_batch_forward(batch_index)
buff = (self.theta_buffer.get((batch_index - 1), None) if (micro_batch > 0) else None)
if (buff is None):
buff = self._create_current_cloned_buff()
elif (self.dirty_mark[(batch_index - 1)] and (not self.has_weight_predictor)):
s = f'Attemted to use dirty buff as stash: rank: {rank} b:{batch_index}, mb:{micro_batch}, prev b:{(batch_index - 1)} expected_updates:{expected_updates}'
warnings.warn(s)
warnings.warn(f'replacing get mb function, making mb {micro_batch} 0')
def get_micro_batch(self, batch_index):
return (((batch_index % self.step_every) - micro_batch) % self.step_every)
self.get_micro_batch_forward = types.MethodType(get_micro_batch, self)
buff = self._create_current_cloned_buff()
self.theta_buffer[batch_index] = buff
if self.has_weight_predictor:
self.dirty_mark[batch_index] = True
else:
self.dirty_mark[batch_index] = False
else:
self.dirty_mark[batch_index] = False
def _restore_from_buff(self, buff):
with torch.no_grad():
for (pg, cloned) in zip(self.optimizer.param_groups, buff):
pgp = pg['params']
for (p, bp) in zip(pgp, cloned):
p.data = bp.detach()
def pop_and_load_stashed_params(self, batch_index):
'\n Changed weight back to stashed weights.\n pops the stashed weights from memory.\n\n (used before backward)\n '
is_dirty = self.dirty_mark.pop(batch_index)
if is_dirty:
self.true_weights_storage.create_cloned_if_needed()
buff = self.theta_buffer.pop(batch_index)
self._restore_from_buff(buff)
self.true_weights_storage.record_change_mode('stashed')
else:
assert (batch_index not in self.theta_buffer)
def get_stashed_buff(self, batch_index, default=None):
return self.theta_buffer.get(batch_index, default)
def pop_stashed_buff(self, batch_index):
return self.theta_buffer.pop(batch_index, None)
|
def _get_num_unique_gpus(args):
if (not hasattr(args, 'stage_to_device_map')):
raise ValueError('Need stage_to_device_map to infer number of GPUs')
else:
n_unique_gpus = len(set(args.stage_to_device_map))
return n_unique_gpus
|
def _get_supremum_staleness(args):
supremum_staleness = getattr(args, 'supremum_staleness', None)
if (supremum_staleness == 'auto'):
supremum_staleness = _get_num_unique_gpus(args)
print(f'-I- auto inferred supremum_staleness of {supremum_staleness}')
elif (supremum_staleness is not None):
assert isinstance(supremum_staleness, int)
if ((supremum_staleness is not None) and (supremum_staleness > (- 1))):
print(f'-I- using supremum_staleness of {supremum_staleness}')
else:
print(f'-I- using unlimited supremum_staleness. Staleness with be determined by work scheduler.')
raise NotImplementedError()
return supremum_staleness
|
def get_work_scheduler(args, pipe_config: Optional[PipelineConfig]=None) -> WorkScheduler:
sched_name = args.work_scheduler.lower()
kw = {}
if (sched_name == 'virtual_stages_1f1b'):
kw['num_gpus'] = _get_num_unique_gpus(args)
kw['supremum_staleness'] = _get_supremum_staleness(args)
if (pipe_config is None):
raise ValueError()
kw['pipeline_depth'] = pipe_config.pipeline_depth
return AVAILABLE_WORK_SCHEDULERS.get(sched_name)(args.step_every, **kw)
|
def get_fwd_bwd_string_for_stage(stage, scheduler: WorkScheduler, num_stages, num_batches) -> str:
f = 0
b = 0
s = ''
stage_depth = ((num_stages - stage) - 1)
if hasattr(scheduler, 'get_virtual_stage_depth'):
original_stage = stage
virtual_stage_depth = scheduler.get_virtual_stage_depth(stage_depth)
stage_depth = virtual_stage_depth
while (b < num_batches):
if scheduler(stage_depth, num_stages, num_batches, f, b):
s += 'F'
f += 1
if ((stage_depth == 0) and (not isinstance(scheduler, GpipeScheduler))):
s += 'B'
b += 1
else:
s += 'B'
b += 1
scheduler.reset()
return s
|
def get_fwds_between_1st_and_2nd_step_from_str(s: str, step_every) -> List[int]:
all_B_idexes = [m.start() for m in re.finditer('B', s)]
first = all_B_idexes[(step_every - 1)]
second = all_B_idexes[((2 * step_every) - 1)]
c1 = Counter(s[:first])['F']
c2 = Counter(s[:second])['F']
idexes = list(range(c1, c2))
return idexes
|
def get_fwds_between_first_and_seconds_step_for_stage(scheduler: WorkScheduler, stage, num_stages, num_batches) -> Tuple[(List[int], bool)]:
s = get_fwd_bwd_string_for_stage(stage, scheduler, num_stages, num_batches)
step_every = scheduler.step_every
if (step_every == 1):
print('-W- with step_every=1, all scheudlers are not problematic. Skipping check.')
return ([], False)
fwds = get_fwds_between_1st_and_2nd_step_from_str(s, step_every)
print(stage, fwds)
if (len(fwds) == 1):
is_problematic = False
else:
is_problematic = ((fwds[0] % step_every) != 0)
return (fwds, is_problematic)
|
def should_do_step(batch_idx, se) -> bool:
do_step = ((batch_idx % se) == (se - 1))
return do_step
|
def expected_staleness(done_fwds, done_bwds, se) -> int:
return sum([should_do_step(x, se) for x in range(done_bwds, done_fwds)])
|
def my_version(done_bwds, se) -> int:
' steps so far '
return sum([should_do_step(i, se) for i in range(done_bwds)])
|
def expected_version(done_fwds, done_bwds, se) -> Tuple[(int, int)]:
return (my_version(done_bwds, se), expected_staleness(done_fwds, done_bwds, se))
|
def backward_version(done_fwds, done_bwds, se) -> int:
return (my_version(done_bwds, se) + expected_staleness(done_fwds, done_bwds, se))
|
def get_staleness_for_stage(stage, scheduler: WorkScheduler, num_stages, num_batches, se) -> Dict[(int, Dict[(int, Any)])]:
s = get_fwd_bwd_string_for_stage(stage, scheduler, num_stages, num_batches)
d = {}
done_fwds = 0
done_bwds = 0
for c in s:
if (c == 'F'):
es = expected_staleness(done_fwds, done_bwds, se)
mv = my_version(done_bwds, se)
ev = expected_version(done_fwds, done_bwds, se)
bv = (mv + es)
mys = s[:((done_fwds + done_bwds) + 1)]
d[done_fwds] = dict(es=es, mv=mv, ev=ev, bv=bv, mys=mys)
done_fwds += 1
if (c == 'B'):
steps_so_far = (done_bwds // se)
if (not (steps_so_far == d[done_bwds]['bv'])):
raise AssertionError(f'''Stage:{stage}, batch:{done_bwds}, steps_so_far:{steps_so_far}, but predicted: {d[done_bwds]['bv']}.
Extra:
{pprint.pformat(d)},
{s[:((done_fwds + done_bwds) + 1)]}''')
done_bwds += 1
return d
|
def print_string_for_all_stages(num_stages, scheduler: WorkScheduler, num_batches):
stage_strings = dict()
for stage in range(num_stages):
print(f'Stage {stage}')
s = get_fwd_bwd_string_for_stage(stage, scheduler, num_stages, num_batches)
print(s)
stage_strings[stage] = s
print()
|
class WorkScheduler(abc.ABC):
def __init__(self, step_every, *args, **kw):
self.step_every = step_every
@abc.abstractmethod
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds) -> bool:
raise NotImplementedError()
def reset(self):
pass
|
class FBScheduler(WorkScheduler):
' Note: this is not like the scheduler in pipedream.\n In pipedream all partitions except last do D forwards in "warmup state",\n here every partitions does a different number of forwards in "warmup state" \n '
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
assert (0 <= stage_depth < pipeline_depth)
if (stage_depth == 0):
return True
if (done_fwds == num_batches):
return False
delta = (done_fwds - done_bwds)
return (delta <= stage_depth)
|
class VirtualStagesFBScheduler(FBScheduler):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.supremum_staleness = kw['supremum_staleness']
self.num_gpus = kw['num_gpus']
self.pipeline_depth = kw['pipeline_depth']
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
' Requires conversion to virtual stage to be done by caller e.g by get_virtual_stage_depth'
assert (0 <= stage_depth < pipeline_depth)
if (stage_depth == 0):
return True
if (done_fwds == num_batches):
return False
delta = (done_fwds - done_bwds)
return (delta <= stage_depth)
def get_virtual_stage_depth(self, stage_depth: int) -> int:
if (stage_depth == self.pipeline_depth):
return min(stage_depth, (self.supremum_staleness - 1))
else:
return max(0, (self.get_virtual_stage_depth((stage_depth + 1)) - 1))
|
class PipeDream1F1BScheduler(WorkScheduler):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.warmup = True
def set_warmup(self, warmup=True):
self.warmup = warmup
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
assert (0 <= stage_depth < pipeline_depth)
if (stage_depth == 0):
return True
if (done_fwds == num_batches):
return False
delta = (done_fwds - done_bwds)
if (done_fwds == 0):
self.warmup = True
if (delta == stage_depth):
self.warmup = False
if self.warmup:
return True
return (delta <= stage_depth)
def reset(self):
self.warmup = True
|
class SeqScheduler(WorkScheduler):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
if (stage_depth == 0):
return True
if (done_fwds == num_batches):
return False
return (done_bwds == done_fwds)
|
class Synchronous1F1BScheduler(WorkScheduler):
' "1f1b-gpipe.\n First scheduler I implemented in simulation 1.5 years ago...\n '
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
assert hasattr(self, 'step_every')
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
num_micro_batches = self.step_every
if (done_fwds == num_batches):
return False
fwd_batch_idx = (done_fwds // num_micro_batches)
bwd_batch_idx = (done_bwds // num_micro_batches)
if (fwd_batch_idx == bwd_batch_idx):
return ((stage_depth == 0) or ((done_fwds - done_bwds) <= stage_depth))
else:
return False
|
class GpipeScheduler(WorkScheduler):
'\n GPipe scheduler with num_micro_batches = step_every.\n Supports shorter "last batch".\n\n NOTE:\n User responsibility to check that\n (1) last_batch_size % (normal_batch_size // step_every) == 0\n (2) normal_batch_size % step_every == 0\n This can easly be done with dataloader set to given micro_batch_size,\n that is (normal_batch_size // step_every).\n\n Example:\n For step_every = 4 we get FFFFBBBBFFFFBBBB...\n '
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
assert hasattr(self, 'step_every')
def __call__(self, stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds):
num_micro_batches = self.step_every
if (done_fwds == num_batches):
return False
fwd_batch_idx = (done_fwds // num_micro_batches)
bwd_batch_idx = (done_bwds // num_micro_batches)
return (fwd_batch_idx == bwd_batch_idx)
|
class SmallerLastBatchPolicy(Enum):
ProportionalStep = auto()
DropReminder = auto()
|
def is_huggingface_transformer(args):
if getattr(args, 'is_huggingface_transformer', False):
return True
return (args.model in pipe.models.transformers_cfg.MODEL_TOKENIZER_AND_CONFIG_FUNCTIONS.keys())
|
def create_comm_handler(args, comm_init_args, device) -> CommunicationHandlerBase:
handler_cls = get_auto_comm_handler_cls(args.distributed_backend, args.cpu)
comm_handler = handler_cls(args.rank, args.local_rank, args.distributed_backend, args.world_size, args.num_stages, args.stage, *comm_init_args, args.cpu, args.num_chunks, device, GRAD_UGLY_SHAMEFUL_NAME='_grad', verbose=getattr(args, 'verbose_comm', False))
return comm_handler
|
def create_comm_handler_v2(args, comm_init_args, device, v2_args) -> CommunicationHandlerBase:
handler_cls = MultiprocessingCommunicationHandler
comm_handler = handler_cls(*v2_args, args.rank, args.local_rank, args.distributed_backend, args.world_size, args.num_stages, args.stage, *comm_init_args, args.cpu, args.num_chunks, device, GRAD_UGLY_SHAMEFUL_NAME='_grad', verbose=(args.verbose_comm if hasattr(args, 'verbose_comm') else False))
return comm_handler
|
def get_lr_scheduler(args, optimizer):
if hasattr(args, 'lr_scheduler'):
attr = getattr(args, 'lr_scheduler')
preproc_lr_scheduler_args(args)
scheduler_cls = get_lr_scheduler_class(args)
scheduler = scheduler_cls(optimizer, **attr['args'])
return scheduler
|
def preproc_lr_scheduler_args(args):
attr = getattr(args, 'lr_scheduler')
preproc_args = attr.get('preproc_args', None)
if preproc_args:
for (arg_name, preproc_command) in preproc_args.items():
if (preproc_command == 'epochs_to_steps'):
if (args.steps > 0):
raise NotImplementedError('Expected to be limited by number of epochs')
given_epochs = attr['args'][arg_name]
if (given_epochs < 0):
if (args.epochs < 0):
raise ValueError('Expected a concrete number of epochs')
given_epochs = args.epochs
num_steps = (args.steps_per_epoch * given_epochs)
attr['args'][arg_name] = num_steps
print(f'preprocessed {arg_name} from {given_epochs} epochs to {num_steps} steps.')
elif (preproc_command == 'ratio_from_num_training_steps'):
num_steps = attr['args']['num_training_steps']
given_ratio = attr['args'][arg_name]
assert (0 <= given_ratio <= 1)
warmup_steps = int((given_ratio * num_steps))
attr['args'][arg_name] = warmup_steps
print(f'preprocessed {arg_name} from ratio {given_ratio} to {warmup_steps} steps.')
else:
raise NotImplementedError(f'Unsupported preprocess argument {preproc_command}')
|
def get_lr_scheduler_class(args):
attr = getattr(args, 'lr_scheduler')
if (attr['type'] in pipe.optimizers.lr_scheduler.ADDITIONAL_AVAILABLE_LR_SCHEDULERS):
scheduler_cls = pipe.optimizers.lr_scheduler.ADDITIONAL_AVAILABLE_LR_SCHEDULERS[attr['type']]
else:
scheduler_cls = getattr(torch.optim.lr_scheduler, attr['type'])
return scheduler_cls
|
def get_sched_aware_stuff(args):
attr = getattr(args, 'lr_scheduler')
scheduler_cls = get_lr_scheduler_class(args)
sched_aware_stuff = (scheduler_cls, attr['args'])
return sched_aware_stuff
|
def get_gap_aware(args, optimizer):
if (not hasattr(args, 'gap_aware')):
return None
gap_aware_args = getattr(args, 'gap_aware')['args']
optimizer_type = getattr(args, 'optimizer')['type']
if ((not (optimizer_type == 'sgd1')) and (not getattr(args, 'weight_stashing', False))):
raise NotImplementedError()
if ('sgd' in optimizer_type):
gap_aware_cls = get_sgd_gap_aware_cls(optimizer_type)
elif ('adam' == optimizer_type):
gap_aware_cls = get_adam_gap_aware_cls()
elif ('adamw' == optimizer_type):
gap_aware_cls = get_adamw_gap_aware_cls()
elif ('adafactor' == optimizer_type):
raise NotImplementedError('WIP')
else:
raise NotImplementedError
return gap_aware_cls(optimizer, **gap_aware_args)
|
def try_replace_prediction_with_nesterov(args):
optimizer_type = getattr(args, 'optimizer')['type']
if (('sgd' in optimizer_type) and getattr(args, 'nesterov_set_for_last_partition', False)):
tmp = args.optimizer['args']
if (not tmp.get('nesterov', False)):
pred = getattr(args, 'weight_prediction', None)
if (pred is not None):
tmp['nesterov'] = True
pred['args']['nag_with_predictor'] = False
args.nesterov_set_for_last_partition = True
print('-I- Setting nesterov=True for last partition')
res = getattr(args, 'weight_prediction')
ArgsStasher.stash_to_args(args, replaced_key='weight_prediction', old_value=res)
delattr(args, 'weight_prediction')
|
def get_weight_predictor(args, optimizer, scheduler=None, true_weights_storage=None):
'\n Returns:\n weight_predictor,\n nag_with_predictor: bool\n '
assert (true_weights_storage is not None)
if (not hasattr(args, 'weight_prediction')):
return (None, None)
optimizer_type = getattr(args, 'optimizer')['type']
pred = getattr(args, 'weight_prediction')
pred_mem = pred['args']['pred_mem']
pred_type = pred['type']
nag_with_predictor = pred['args'].get('nag_with_predictor', False)
assert (pred_mem in {'clone', 'calc'})
assert (pred_type in {'msnag', 'aggmsnag'})
assert (('sgd' in optimizer_type) or ('adam' in optimizer_type))
sched_predictor = get_sched_aware_predictor(args, optimizer, scheduler)
weight_predictor = get_weight_predictor_partial(optimizer_type, pred_mem, pred_type, optimizer, scheduler=sched_predictor, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage, sched_predictor=sched_predictor)
assert (weight_predictor is not None)
return (weight_predictor, nag_with_predictor)
|
def get_sched_aware_predictor(args, optimizer, scheduler):
optimizer_type = getattr(args, 'optimizer')['type']
pred = getattr(args, 'weight_prediction')
sched_predictor = None
if pred['args'].get('sched_aware', False):
print('-I- using sched aware weight prediction')
assert (scheduler is not None)
sched_aware_stuff = get_sched_aware_stuff(args)
assert (sched_aware_stuff is not None)
scheduler_class = sched_aware_stuff[0]
scheduler_kw = sched_aware_stuff[1]
sched_predictor = get_sched_predictor(optimizer, scheduler_class, **scheduler_kw)
sched_predictor.patch_scheduler(scheduler)
assert ('adam' in optimizer_type)
return sched_predictor
|
def get_ngpus_per_node(args):
nnodes = args.nnodes
if (not hasattr(args, 'ngpus_per_node')):
if ((args.world_size % nnodes) != 0):
raise NotImplementedError()
ngpus_per_node = ([(args.world_size // nnodes)] * nnodes)
else:
ngpus_per_node = args.ngpus_per_node
assert (len(ngpus_per_node) == nnodes)
return ngpus_per_node
|
def get_device_for_rank(args, rank, local_rank):
nnodes = args.nnodes
ngpus_per_node = get_ngpus_per_node(args)
if hasattr(args, 'stage_to_device_map'):
stage_to_device_map = args.stage_to_device_map
cuda_device_id = stage_to_device_map[rank]
if (nnodes > 1):
for (node_idx, x) in enumerate(ngpus_per_node):
if (cuda_device_id >= x):
cuda_device_id -= x
else:
break
else:
raise ValueError(f"Can't determine device index. rank={rank}, stage_to_device_map={stage_to_device_map}, global_device_id={cuda_device_id}, nnodes={nnodes}, ngpus_per_node={ngpus_per_node}")
local_device_id = cuda_device_id
else:
local_device_id = local_rank
device = torch.device(('cpu' if args.cpu else f'cuda:{local_device_id}'))
return device
|
def get_rank_to_device_map(args):
if (args.nnodes == 1):
local_ranks = list(range(args.world_size))
else:
ngpus_per_node = get_ngpus_per_node(args)
local_ranks = list()
for n in ngpus_per_node:
local_ranks.extend(range(n))
return {rank: get_device_for_rank(args, rank, local_rank) for (rank, local_rank) in zip(range(args.world_size), local_ranks)}
|
def test_rank_to_device_map(world_size=8, nnodes=1, cpu=False):
from types import SimpleNamespace
args = SimpleNamespace(cpu=cpu, world_size=world_size, nnodes=nnodes)
print(get_rank_to_device_map(args))
|
def hack_trainer_type_to_gap_aware(args, stage_depth=None):
" replaces TRAINER with TRAINER_gap_aware,\n according to parsed policy\n SUPPORTED_POLICIES = {\n 'almost_last_partition', \n 'all_except_last',\n 'all_except_last_two'\n }\n # TODO: policy for max delay 1\n # TODO: policy with staleness limit\n "
def hack():
args.trainer['type'] += '_gap_aware'
if hasattr(args, 'gap_aware'):
if (stage_depth is None):
is_zero_staleness_stage = (args.local_rank == (args.world_size - 1))
is_one_staleness_stage = (args.local_rank == (args.world_size - 2))
else:
is_zero_staleness_stage = (stage_depth == 0)
is_one_staleness_stage = (stage_depth == 1)
warnings.warn('Assuming no grad accumulation and no staleness limit...')
if (args.gap_aware['policy'] == 'almost_last_partition'):
if is_one_staleness_stage:
hack()
return True
elif (args.gap_aware['policy'] == 'all_except_last'):
if (not is_zero_staleness_stage):
hack()
return True
elif (args.gap_aware['policy'] == 'all_except_last_two'):
if ((not is_zero_staleness_stage) and (not is_one_staleness_stage)):
hack()
return True
else:
raise ValueError(f"Unknown policy for GA {args.gap_aware['policy']}. supported policies are {SUPPORTED_GAP_AWARE_POLICIES}")
return False
|
def get_optimizer_cls(args, has_gap_aware):
optimizer_type = args.optimizer['type']
if (has_gap_aware and (optimizer_type in {'adam', 'adamw'})):
optimizer_type += '_record_step'
optimizer_cls = AVAILBALE_OPTIMIZERS.get(optimizer_type)
assert (optimizer_cls is not None), f'{optimizer_type} not in {AVAILBALE_OPTIMIZERS.keys()}'
return optimizer_cls
|
def tuplify(listything):
if isinstance(listything, list):
return tuple(map(tuplify, listything))
if isinstance(listything, dict):
return {k: tuplify(v) for (k, v) in listything.items()}
return listything
|
def get_optimizer(args, optimizer_cls, parameters):
assert isinstance(parameters, list)
if (len(parameters) == 0):
if (not getattr(args, 'allow_stateless', False)):
raise ValueError(f'Got stateless partition {args.stage}, if this is wanter, set "allow_stateless": true')
else:
warnings.warn('using a dummy parameter')
parameters = torch.nn.ParameterList([torch.nn.Parameter(torch.randn(1))])
tuplified_opt_args = tuplify(args.optimizer['args'])
optimizer = optimizer_cls(parameters, **tuplified_opt_args)
return optimizer
|
def preproc_data(args, cache=None, save_cache=True):
print(f'Loading partitioned model and dataset...')
if (cache is None):
handler = pipe.models.AVAILABLE_MODELS.get(args.model)
if save_cache:
cache = handler
else:
handler = cache
parsed_config = parse_config.PartitioningConfigParser(args.model, args.rank, args.bs_train, args.bs_test, handler=handler, send_target_in_pipe=('_nonsep' in args.data_propagator), prefer_seq_sends=getattr(args, 'prefer_seq_sends', True))
dataset_keywords = {}
parsed_config.load_model(handler=handler, bs_train=args.bs_train, rank=args.rank)
extra_kw = handler.get_extra()
if isinstance(extra_kw, dict):
dataset_keywords.update(extra_kw)
del handler
pipe_config = parsed_config.pipe_config
args.num_stages = parsed_config.num_stages
args.stage = parsed_config.stage_id
from pipe.data import get_dataloaders
get_dataloaders(args, pipe_config=pipe_config, dataset_keywords=dataset_keywords)
return cache
|
def prepare_pipeline(args, shared_ctx=None, comm_version=1):
is_gpipe = ('gpipe' == args.work_scheduler.lower())
if args.is_multiprocessing_worker:
comm_version = 2
local_rank_to_device_map = get_rank_to_device_map(args)
device = local_rank_to_device_map[args.local_rank]
if (not args.cpu):
torch.cuda.set_device(device)
print(f'Loading partitioned model and dataset...')
handler = pipe.models.AVAILABLE_MODELS.get(args.model)
parsed_config = parse_config.PartitioningConfigParser(args.model, args.rank, args.bs_train, args.bs_test, handler=handler, send_target_in_pipe=('_nonsep' in args.data_propagator), prefer_seq_sends=getattr(args, 'prefer_seq_sends', True))
pipe_config = parsed_config.pipe_config
args.num_stages = parsed_config.num_stages
args.stage = parsed_config.stage_id
comm_init_args = parsed_config.comm_init_args()
assert ((args.epochs >= 1) or (args.steps >= 1))
assert (not (args.stage is None))
logger = FileLogger(args.logdir, global_rank=args.rank, local_rank=args.local_rank, name='msnag', world_size=args.world_size, name_prefix=args.out_filename)
if (comm_version == 1):
comm_handler = create_comm_handler(args, comm_init_args, device)
comm_handler.init_process_group()
elif (comm_version == 2):
stage_to_device_map = []
v2_args = (shared_ctx, stage_to_device_map, local_rank_to_device_map)
comm_handler = create_comm_handler_v2(args, comm_init_args, device, v2_args)
else:
raise NotImplementedError('In progress')
work_scheduler = get_work_scheduler(args, pipe_config=pipe_config)
dataset_keywords = {}
dataset_keywords['pipe_config'] = pipe_config
for i in range(args.world_size):
if getattr(args, 'load_model_one_by_one', False):
print(f'loading the model rank by rank to save host RAM {(i + 1)}/{args.world_size}')
torch.distributed.barrier()
if (i == args.rank):
parsed_config.load_model(handler=handler, bs_train=args.bs_train, rank=args.rank)
extra_kw = handler.get_extra()
if isinstance(extra_kw, dict):
dataset_keywords.update(extra_kw)
del handler
gc.collect()
training_tensor_dtypes = parsed_config.training_tensor_dtypes
eval_tensor_shapes = parsed_config.eval_tensor_shapes
training_tensor_shapes = parsed_config.training_tensor_shapes
model = parsed_config.model
model.device = device
stage_depth = pipe_config.get_depth_for_stage(args.stage)
pipeline_depth = pipe_config.pipeline_depth
args.pipeline_depth = pipeline_depth
is_last_partition = (args.stage == (args.num_stages - 1))
is_first_partition = (args.stage == 0)
is_zero_staleness_stage = ((stage_depth == 0) if (not is_gpipe) else True)
eval_tensor_dtypes = training_tensor_dtypes
from pipe.data import get_dataloaders
(train_dl, eval_dl, samplers, extra) = get_dataloaders(args, pipe_config=pipe_config, dataset_keywords=dataset_keywords)
del dataset_keywords
(last_batch_diff_eval, last_batch_diff_train, eval_dl_len, train_dl_len) = synchronize_dataloaders_length(args, is_first_partition, logger, eval_dl, train_dl)
if (last_batch_diff_train > 0):
last_batch_train_shapes = parsed_config.get_shapes(last_batch_diff_train)
else:
last_batch_train_shapes = None
if (last_batch_diff_eval > 0):
last_batch_eval_shapes = parsed_config.get_shapes(last_batch_diff_eval)
else:
last_batch_eval_shapes = None
if ((args.epochs > 0) and (args.steps < 0)):
steps_per_epoch = (train_dl_len // args.step_every)
if ((train_dl_len % args.step_every) > 0):
last_batch_smaller_n_micro_batches_policy = getattr(args, 'last_batch_smaller_n_micro_batches_policy', DEFAULT_STEP_EVERY_SMALLER_LAST_BATCH_POLICY)
if (last_batch_smaller_n_micro_batches_policy == SmallerLastBatchPolicy.ProportionalStep):
steps_per_epoch += 1
args.steps_per_epoch = steps_per_epoch
expected_training_steps = (steps_per_epoch * args.epochs)
elif ((args.epochs < 0) and (args.steps > 0)):
expected_training_steps = args.steps
else:
raise NotImplementedError('Missing steps or epochs limit')
args.expected_training_steps = expected_training_steps
buffers_ctx = (training_tensor_shapes, eval_tensor_shapes, training_tensor_dtypes, eval_tensor_dtypes, last_batch_train_shapes, last_batch_eval_shapes, args.max_buffers, args.keep_buffers_alive)
comm_handler.init_buffers_ctx(buffers_ctx)
partition_using_gap_aware = False
if (not is_gpipe):
partition_using_gap_aware = hack_trainer_type_to_gap_aware(args, stage_depth)
if partition_using_gap_aware:
logger.info(f'Stage {args.stage} will use Gap Aware')
trainer_cls = get_trainer_cls(args)
propagator_cls = get_propagator_cls(args)
optimizer_cls = get_optimizer_cls(args, partition_using_gap_aware)
statistics = get_statistics(args.statistics, is_last_partition=is_last_partition)
assert (not (statistics is None))
gap_aware_just_loss = False
if (not is_gpipe):
gap_aware_just_loss = getattr(args, 'gap_aware_just_loss', False)
if gap_aware_just_loss:
if is_zero_staleness_stage:
gap_aware_just_loss = False
elif args.no_recomputation:
raise NotImplementedError('gap_aware_just_loss works only with recomputation on')
if is_gpipe:
partition_mgr_cls = GPipePartitionManager
else:
partition_mgr_cls = SinglePartitionManager
partition_mgr_cls: Type[SinglePartitionManager]
partition = partition_mgr_cls(args.stage, stage_depth, pipeline_depth, args.num_stages, model, comm_handler, work_scheduler, device, is_last_partition, is_first_partition, log_frequency=args.log_frequency, step_every=args.step_every, use_recomputation=(not args.no_recomputation), gap_aware_just_loss=gap_aware_just_loss, weight_stashing_just_for_stats=getattr(args, 'weight_stashing_just_for_stats', False), disable_clone_inputs=args.is_multiprocessing_worker, req_grad=parsed_config.req_grad)
propagator = propagator_cls(device, is_last_partition, is_first_partition, args.stage, pipe_config)
partition.set_data_propagator(propagator)
if (hasattr(args, 'ddp_sim_num_gpus') and (args.ddp_sim_num_gpus > 1)):
print(f'-I- simulating DDP accuracy with {args.ddp_sim_num_gpus} (DDP) GPUs per stage')
dp_sim.convert_to_num_gpus(partition.partition, args.ddp_sim_num_gpus)
optimizer_grouped_parameters = get_optimizer_parameter_groups(args, partition)
if ((not is_gpipe) and is_zero_staleness_stage):
try_replace_prediction_with_nesterov(args)
optimizer = get_optimizer(args, optimizer_cls, optimizer_grouped_parameters)
if (0 < args.flush_rate < args.step_every):
raise NotImplementedError()
scheduler = get_lr_scheduler(args, optimizer)
trainer_kwds = dict(model=partition.partition, optimizer=optimizer, scheduler=scheduler, statistics=statistics, step_every=args.step_every)
trainer_kwds.update(args.trainer['args'])
if (issubclass(trainer_cls, GapAwareTrainerMixin) or getattr(trainer_cls, 'HAS_GAP_AWARE', False)):
gap_aware = get_gap_aware(args, optimizer)
trainer = trainer_cls(gap_aware, **trainer_kwds)
partition.set_gap_aware(gap_aware)
assert (not is_gpipe)
else:
trainer = trainer_cls(**trainer_kwds)
if extra:
extra(trainer)
partition.set_trainer(trainer)
partition.set_lr_scheduler(scheduler)
if (not is_gpipe):
true_weights_storage = TrueWeightsStorage(optimizer)
partition.set_true_weights_storage(true_weights_storage)
(weight_predictor, nag_with_predictor) = get_weight_predictor(args, optimizer, scheduler=scheduler, true_weights_storage=true_weights_storage)
if weight_predictor:
partition.set_weight_predictor(weight_predictor)
logger.info(f'Stage {args.stage} will use Weight Predictor')
if getattr(args, 'weight_stashing', False):
if (not is_zero_staleness_stage):
has_weight_predictor = (weight_predictor is not None)
if has_weight_predictor:
using_clone_weight_predictor = (args.weight_prediction['args']['pred_mem'] == 'clone')
else:
using_clone_weight_predictor = False
weight_stasher = WeightStasher(optimizer, stage_depth, pipeline_depth, step_every=args.step_every, has_weight_predictor=has_weight_predictor, true_weights_storage=true_weights_storage, using_clone_weight_predictor=using_clone_weight_predictor)
partition.set_weight_stasher(weight_stasher)
if gap_aware_just_loss:
assert getattr(args, 'weight_stashing', False)
if getattr(args, 'auto_file_name', True):
auto_file_name(args)
return (logger, train_dl, eval_dl, is_first_partition, is_last_partition, partition, statistics, train_dl_len, eval_dl_len, samplers)
|
def synchronize_dataloaders_length(args, is_first_partition: bool, logger, eval_dl: DataLoader, train_dl: DataLoader):
if (args.rank == 0):
assert is_first_partition
(train_dl_len, eval_dl_len) = (len(train_dl), len(eval_dl))
(train_dataset_len, eval_dataset_len) = (len(train_dl.dataset), len(eval_dl.dataset))
if ((args.steps < 0) or (args.steps >= (train_dataset_len // args.bs_train))):
last_batch_diff_train = ((train_dataset_len % args.bs_train) if (not train_dl.drop_last) else 0)
else:
last_batch_diff_train = 0
last_batch_diff_eval = ((eval_dataset_len % args.bs_test) if (not eval_dl.drop_last) else 0)
d = dict(train_dataset_len=train_dataset_len, eval_dataset_len=eval_dataset_len, train_dl_len=train_dl_len, eval_dl_len=eval_dl_len, last_batch_diff_train=last_batch_diff_train, last_batch_diff_eval=last_batch_diff_eval)
logger.info(f'Synchronized: {d}')
data = [train_dl_len, eval_dl_len, last_batch_diff_train, last_batch_diff_eval]
data = torch.tensor(data, dtype=torch.long)
else:
data = torch.zeros(4, dtype=torch.long)
torch.distributed.broadcast(data, 0)
train_dl_len = data[0].item()
eval_dl_len = data[1].item()
last_batch_diff_train = data[2].item()
last_batch_diff_eval = data[3].item()
def calc_shapes_for_train(train_dl):
assert (train_dl is not None)
train_dl_len = len(train_dl)
train_dataset_len = len(train_dl.dataset)
if ((args.steps < 0) or (args.steps >= (train_dataset_len // args.bs_train))):
last_batch_diff_train = ((train_dataset_len % args.bs_train) if (not train_dl.drop_last) else 0)
else:
last_batch_diff_train = 0
return (last_batch_diff_train, eval_dl_len, train_dl_len)
if (train_dl is not None):
assert (calc_shapes_for_train(train_dl) == (last_batch_diff_train, eval_dl_len, train_dl_len))
return (last_batch_diff_eval, last_batch_diff_train, eval_dl_len, train_dl_len)
|
def get_optimizer_parameter_groups(args, partition):
if is_huggingface_transformer(args):
model = partition.partition
opt_args = args.optimizer['args']
no_decay = {'bias', 'LayerNorm.weight', 'T5LayerNorm.weight'}
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': opt_args['weight_decay']}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
parameters_count = {'no_decay': sum((p.numel() for p in optimizer_grouped_parameters[1]['params'])), 'decay': sum((p.numel() for p in optimizer_grouped_parameters[0]['params']))}
total_parameters = (parameters_count['decay'] + parameters_count['no_decay'])
parameters_count['total'] = total_parameters
else:
optimizer_grouped_parameters = list(partition.partition.parameters())
parameters_count = sum((p.numel() for p in optimizer_grouped_parameters))
print(f'-I- optimized parameters count: {parameters_count}')
return optimizer_grouped_parameters
|
def run_function(func, cfg, q):
gpu = q.get()
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
print(f'# GPU:{gpu}')
func(**cfg)
q.put(gpu)
|
def run_function_on_several_gpus(required_gpus, func, cfg, q):
gpus = [str(q.get()) for _ in range(required_gpus)]
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpus)
print(f'# GPUs:{gpus}')
func(**cfg)
for gpu in gpus:
q.put(gpu)
|
def prepare_gpu_queue(manager, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES=None):
q = manager.Queue()
if CUDA_VISIBLE_DEVICES:
for i in CUDA_VISIBLE_DEVICES:
q.put(i)
else:
for i in range(NUM_AVAIALBLE_GPUS):
q.put(i)
return q
|
def map_to_several_limited_gpus(func, configs, gpus_per_config, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES=None):
with Manager() as manager:
q = prepare_gpu_queue(manager, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES)
if (not isinstance(gpus_per_config, list)):
gpus_per_config = [gpus_per_config for _ in range(len(configs))]
assert (len(gpus_per_config) == len(configs))
n_jobs = (NUM_AVAIALBLE_GPUS // max(gpus_per_config))
Parallel(n_jobs=n_jobs, verbose=10)((delayed(run_function_on_several_gpus)(required_gpus, func, cfg, q) for (cfg, required_gpus) in zip(configs, gpus_per_config)))
|
def pop_from_cfg(cfg, name):
attr = cfg.pop(name)
return (cfg, attr)
|
def pop_FUNC_from_cfg(cfg):
return pop_from_cfg(cfg, name='FUNC')
|
def pop_REQUIRED_GPUS_from_cfg(cfg):
return pop_from_cfg(cfg, name='REQUIRED_GPUS')
|
def flexible_map_to_several_limited_gpus(configs, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES=None):
with Manager() as manager:
q = prepare_gpu_queue(manager, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES)
gpus_per_config = []
funcs = []
cfgs = []
for cfg in configs:
(cfg, f) = pop_FUNC_from_cfg(cfg)
(cfg, r) = pop_REQUIRED_GPUS_from_cfg(cfg)
funcs.append(f)
gpus_per_config.append(r)
cfgs.append(cfg)
configs = cfgs
n_jobs = (NUM_AVAIALBLE_GPUS // max(gpus_per_config))
Parallel(n_jobs=n_jobs, verbose=10)((delayed(run_function_on_several_gpus)(required_gpus, func, cfg, q) for (cfg, required_gpus, func) in zip(configs, gpus_per_config, funcs)))
|
def map_to_limited_gpus(func, configs, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES=None):
with Manager() as manager:
q = manager.Queue()
if CUDA_VISIBLE_DEVICES:
for i in CUDA_VISIBLE_DEVICES:
q.put(i)
else:
for i in range(NUM_AVAIALBLE_GPUS):
q.put(i)
Parallel(n_jobs=NUM_AVAIALBLE_GPUS, verbose=10)((delayed(run_function)(func, cfg, q) for cfg in configs))
|
class RunGridHelper():
'\n Example: running on 4 GPUs:\n\n helper = RunGridHelper(gpu_list=[0,1,2,3])\n helper.add_runs("python main.py", dict(seed=[42, 12]), num_gpus=1)\n helper.run()\n '
def __init__(self, verbose=True, test=False, gpu_list=None):
self.grids = []
self.gpu_list = (gpu_list if gpu_list else [])
self.verbose = verbose
self.test = test
def add_runs(self, base_command, param_grid, num_gpus):
'\n base_command: command to run (common to all)\n param_grid: grid of parameters to add to command, or list of such grids.\n (see sklearn.model_selection.ParameterGrid: The parameters is the dict it accepts)\n num_gpus: int: number of GPUs required.\n '
func = partial(call_function, base_command, _verbose=self.verbose, _test=self.test)
assert isinstance(num_gpus, int)
def _pack_single(g):
g['FUNC'] = [func]
g['REQUIRED_GPUS'] = [num_gpus]
if isinstance(param_grid, dict):
_pack_single(param_grid)
self.grids.append(param_grid)
else:
assert isinstance(param_grid, list)
for g in param_grid:
_pack_single(g)
self.grids.extend(param_grid)
def run(self):
param_grid = self.grids
configs = ParameterGrid(param_grid)
gpu_list = self.gpu_list
flexible_map_to_several_limited_gpus(configs, len(gpu_list), CUDA_VISIBLE_DEVICES=gpu_list)
|
def call_function(COMMAND, *args, _verbose=True, _test=False, **kw):
'\n Example:\n The following:\n base_command = "python main.py"\n call_function(base_command, **dict(seed=42))\n\n calls:\n python main.py --seed 42\n\n '
sargs = ('--' + ' --'.join([f'{i} {v}' for (i, v) in kw.items()]))
cmd = f'{COMMAND} {sargs}'
if _verbose:
print(cmd)
if _test:
return
os.system(cmd)
|
def subprocess_func(COMMAND, *args, **kw):
sargs = ('--' + ' --'.join([f'{i} {v}' for (i, v) in kw.items()]))
command_line = f'{COMMAND} {sargs}'
args = shlex.split(command_line)
print(f'-I- Runnning: {command_line}')
p = subprocess.Popen(args)
p.wait()
|
def run_grid_on(COMMAND, param_grid, gpu_list, skip_first=0):
configs = ParameterGrid(param_grid)
if (skip_first > 0):
print(f'-I- Skipping first {skip_first} configs')
print(f'-I- Skipping: {list(configs)[:skip_first]}')
configs = list(configs)[skip_first:]
func = partial(subprocess_func, COMMAND)
map_to_limited_gpus(func, configs, len(gpu_list), CUDA_VISIBLE_DEVICES=gpu_list)
|
def run_grid_on_multi_gpu_per_run(COMMAND, param_grid, gpu_list, gpus_per_config=1):
configs = ParameterGrid(param_grid)
func = partial(call_function, COMMAND)
map_to_several_limited_gpus(func, configs, gpus_per_config, len(gpu_list), CUDA_VISIBLE_DEVICES=gpu_list)
|
def infer_number_of_gpus(COMMAND):
raise NotImplementedError()
|
def training_loop(args, logger, train_dl, test_dl, is_last_partition, partition: SinglePartitionManager, statistics: Stats, train_dl_len, test_dl_len, samplers):
last_batch_smaller_n_micro_batches_policy = getattr(args, 'last_batch_smaller_n_micro_batches_policy', DEFAULT_STEP_EVERY_SMALLER_LAST_BATCH_POLICY)
save_checkpoint_every_x_epochs = approximate_checkpoint_every_x_epochs(args, train_dl_len)
epochs = 0
steps = 0
total_epoch_times_list = []
train_epochs_times_list = []
cp_saver = CheckpointsSaver(args)
logger.info(f'flush rate {args.flush_rate}')
logger.info(f'Running for {args.epochs} epochs and {args.steps} steps')
if (args.flush_rate >= 0):
raise NotImplementedError()
train_batches_limit = getattr(args, 'train_batches_limit', train_dl_len)
test_batches_limit = getattr(args, 'test_batches_limit', test_dl_len)
if (getattr(args, 'train_batches_limit', (- 1)) > 0):
warnings.warn('(dev feature) hard limiting train batches per flush: different last batch not supported, messages may get truncated')
if (getattr(args, 'test_batches_limit', (- 1)) > 0):
warnings.warn('(dev feature) hard limiting test batches per flush: different last batch not supported, messages may get truncated')
train_batches_limit = (train_dl_len if (train_batches_limit < 0) else train_batches_limit)
test_batches_limit = (test_dl_len if (test_batches_limit < 0) else test_batches_limit)
def run_eval(eval_batches_to_run):
logger.info(f'Running eval')
if (eval_batches_to_run == 0):
partition.eval()
if statistics:
statistics.eval()
return False
if test_dl:
partition.set_dataloader(test_dl, eval_batches_to_run)
partition.eval()
if statistics:
statistics.eval()
with torch.no_grad():
partition.run_forward_until_flush(eval_batches_to_run)
if is_last_partition:
statistics.last_partition_on_epoch_end()
return True
def run_train(train_batches_to_run):
logger.info(f'Running train')
train_epoch_start_time = time.time()
if (train_batches_to_run == 0):
return False
if train_dl:
partition.set_dataloader(train_dl, train_batches_to_run)
partition.train()
if statistics:
statistics.train()
if (args.flush_rate > 0):
for _ in range(0, train_batches_to_run, args.flush_rate):
partition.run_until_flush(args.flush_rate)
reminder = (train_batches_to_run % args.flush_rate)
if (reminder > 0):
logger.info(f'Warning: will run for reminder {reminder} to finish epoch')
partition.run_until_flush(reminder)
if (not partition.trainer.PER_STEP_SCHEDULER):
partition.lr_scheduler.step()
else:
partition.run_until_flush(train_batches_to_run)
train_epochs_times_list.append((time.time() - train_epoch_start_time))
if is_last_partition:
statistics.last_partition_on_epoch_end()
else:
statistics.non_last_partition_on_epoch_end()
return True
while ((epochs < args.epochs) or (args.epochs < 0)):
for s in samplers:
s.set_epoch(epochs)
(reminder_micro_batches, train_batches_limit_to_use) = get_micro_batches_until_flush(args, train_batches_limit, steps, last_batch_smaller_n_micro_batches_policy, logger, partition)
if (train_batches_limit_to_use <= 0):
logger.info(f"breaking early: can't complete a full step with {args.step_every} gradient accumulations.")
break
epoch_start_time = time.time()
did_train = run_train(train_batches_limit_to_use)
did_eval = run_eval(test_batches_limit)
epochs += 1
if did_train:
floor_steps = ((args.steps > 0) and reminder_micro_batches and (last_batch_smaller_n_micro_batches_policy == SmallerLastBatchPolicy.DropReminder))
if floor_steps:
steps += math.floor((train_batches_limit_to_use / args.step_every))
else:
steps += math.ceil((train_batches_limit_to_use / args.step_every))
is_last = ((0 < args.epochs <= epochs) or (0 < args.steps <= steps))
if (is_last or ((epochs % save_checkpoint_every_x_epochs) == 0)):
cp_saver.maybe_save_checkpoint(partition.partition.layers, steps)
total_epoch_time = (time.time() - epoch_start_time)
total_epoch_times_list.append(total_epoch_time)
if (args.local_rank == (args.world_size - 1)):
logger.info(('-' * 89))
info_str = '| end of epoch {:3d} | time: {:5.2f}s | steps: {:5d}'.format(epochs, total_epoch_time, steps)
if did_train:
info_str += statistics.get_epoch_info_str(is_train=True)
if did_eval:
info_str += statistics.get_epoch_info_str(is_train=False)
logger.info(info_str)
logger.info(('-' * 89))
if (0 < args.steps <= steps):
logger.info(f'Finished all steps. Total steps:{steps}, rank:{args.local_rank}')
break
elif getattr(args, 'patience', False):
if (args.world_size - 1):
assert is_last_partition
should_early_stop = should_stop_early(args, statistics.get_metric_for_early_stop(), logger)
data = torch.tensor(int(should_early_stop))
else:
data = torch.tensor(int(False))
torch.distributed.broadcast(data, (args.world_size - 1))
should_early_stop = data.item()
if should_early_stop:
break
return (total_epoch_times_list, train_epochs_times_list)
|
def get_micro_batches_until_flush(args, train_batches_limit, steps, step_every_smaller_last_batch_policy, logger, partition):
if (args.steps > 0):
steps_left = (args.steps - steps)
batches_left = (steps_left * args.step_every)
train_batches_limit_to_use = min(train_batches_limit, batches_left)
if (batches_left < train_batches_limit):
logger.info('batches_left are smaller than dataloader or limit: killing comm_handler.last_batch_train_shapes')
partition.comm_handler.last_batch_train_shapes = None
reminder_micro_batches = (train_batches_limit_to_use % args.step_every)
if reminder_micro_batches:
if (step_every_smaller_last_batch_policy == SmallerLastBatchPolicy.DropReminder):
logger.info(f'Got reminder of {reminder_micro_batches} micro batches. Will drop them.')
train_batches_limit_to_use -= reminder_micro_batches
elif (step_every_smaller_last_batch_policy == SmallerLastBatchPolicy.ProportionalStep):
logger.info(f'Got reminder of {reminder_micro_batches} micro batches. Will take proportional {(reminder_micro_batches / args.step_every)} last step')
else:
raise NotImplementedError(f'Unknown SMALLER_LAST_BATCH_POLICY, {step_every_smaller_last_batch_policy}')
else:
train_batches_limit_to_use = train_batches_limit
reminder_micro_batches = 0
return (reminder_micro_batches, train_batches_limit_to_use)
|
def approximate_checkpoint_every_x_epochs(args, train_dl_len):
save_checkpoint_every_x_epochs = getattr(args, 'save_checkpoint_every_x_steps', None)
approx_step_per_epoch = (train_dl_len // args.step_every)
if (save_checkpoint_every_x_epochs is not None):
save_checkpoint_every_x_epochs = (save_checkpoint_every_x_epochs // approx_step_per_epoch)
else:
save_checkpoint_every_x_epochs = 1
assert (save_checkpoint_every_x_epochs >= 1)
print(f'Approximating: An epoch is approx {approx_step_per_epoch} steps.')
print(f'Approximating: will save checkpoint every {save_checkpoint_every_x_epochs} epochs, and at the end.')
return save_checkpoint_every_x_epochs
|
def should_stop_early(args, valid_loss, logger):
if (valid_loss is None):
return False
if (args.patience <= 0):
return False
def is_better(a, b):
return ((a > b) if getattr(args, 'maximize_best_checkpoint_metric', False) else (a < b))
prev_best = getattr(should_stop_early, 'best', None)
if ((prev_best is None) or is_better(valid_loss, prev_best)):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if (should_stop_early.num_runs >= args.patience):
logger.info(f"early stop since valid performance hasn't improved for last {args.patience} runs")
return True
else:
return False
|
class CheckpointsSaver():
def __init__(self, args):
self.args = args
self.num_saved_checkpoints = 0
if getattr(args, 'save_checkpoints', False):
assert hasattr(args, 'checkpoints_save_dir')
os.makedirs(args.checkpoints_save_dir, exist_ok=True)
else:
print('-W- will not save checkpoints')
def maybe_save_checkpoint(self, model, steps):
args = self.args
if (not getattr(args, 'save_checkpoints', False)):
return
name_prefix = getattr(args, 'checkpoints_save_name_prefix', '')
name_prefix += f'_{self.num_saved_checkpoints}'
fn = os.path.join(args.checkpoints_save_dir, f'{name_prefix}_Partition{args.stage}.pt')
tik = time.time()
torch.save(model.state_dict(), fn)
tok = time.time()
print(f'-V- stage {args.stage}: saving checkpoint took: {(tok - tik)}')
self.num_saved_checkpoints += 1
print(f'-I- stage {args.stage}: model checkpoint saved: {fn}')
metatdata_fn = os.path.join(args.checkpoints_save_dir, f'{name_prefix}_Partition{args.stage}.steps')
try:
with open(metatdata_fn, 'w') as f:
f.write(str(steps))
except Exception as _:
warnings.warn(f'Failed to save metadata for checkpoint {metatdata_fn}, ignoring exception')
|
class MyTestCase(unittest.TestCase):
def test_our_loader_vs_timm_ViT_B_16(self):
url1 = 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_16.npz'
state_dict1 = load_state_dict_from_url(url1)
model = vit_base_patch16_384_in21k(pretrained=False)
_fix_pos_embed(model, state_dict1)
url2 = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_384_in21k-0243c7d9.pth'
state_dict2 = load_state_dict_from_url(url2)
self.assertEqual(sorted(state_dict1.keys()), sorted(state_dict2.keys()))
for k in state_dict1:
v1 = state_dict1[k]
v2 = state_dict2[k]
self.assertTrue(torch.allclose(v2, v1))
def test_our_loader_vs_timm_ViT_L_32(self):
url1 = 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_32.npz'
state_dict1 = load_state_dict_from_url(url1)
model = vit_large_patch32_384_in21k(pretrained=False)
_fix_pos_embed(model, state_dict1)
url2 = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_384_in21k-a9da678b.pth'
state_dict2 = load_state_dict_from_url(url2)
self.assertEqual(sorted(state_dict1.keys()), sorted(state_dict2.keys()))
for k in state_dict1:
v1 = state_dict1[k]
v2 = state_dict2[k]
self.assertTrue(torch.allclose(v2, v1))
|
def skip_property_member(app, what, name, obj, skip, options):
if isinstance(obj, property):
return True
|
def setup(app):
app.connect('autodoc-skip-member', skip_property_member)
|
def multiply_with_arccos(x, y):
return (x * np.arccos(y))
|
def fetch_logged_data(run_id):
client = mlflow.MlflowClient()
data = client.get_run(run_id).data
artifacts = [f.path for f in client.list_artifacts(run_id, 'model')]
return (data.params, data.metrics, artifacts)
|
def func(x):
return ((np.sin((3 * x)) * x) * x)
|
def func(x):
return ((np.sin((3 * x)) * x) * x)
|
def generate_y(x):
u = (x * np.pi)
return (np.sin(u) / u)
|
def func(x):
return ((np.sin((3 * x)) * x) * x)
|
class ChebyshevRx(EncodingCircuitBase):
'\n Simple Chebyshev encoding circuit build from Rx gates\n\n **Example for 4 qubits, a 2 dimensional feature vector and 2 layers:**\n\n .. plot::\n\n from squlearn.encoding_circuit import ChebyshevRx\n pqc = ChebyshevRx(4, 2, 2)\n pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n Args:\n num_qubits (int): Number of qubits of the ChebyshevRx encoding circuit\n num_features (int): Dimension of the feature vector\n num_layers (int): Number of layers (default: 1)\n closed (bool): If true, the last and the first qubit are entangled (default: false)\n '
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, closed: bool=False, alpha: float=4.0) -> None:
super().__init__(num_qubits, num_features)
self.num_layers = num_layers
self.closed = closed
self.alpha = alpha
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the ChebyshevRx encoding circuit.'
return ((2 * self.num_qubits) * self.num_layers)
@property
def parameter_bounds(self) -> np.ndarray:
'The bounds of the trainable parameters of the ChebyshevRx encoding circuit.'
bounds = np.zeros((self.num_parameters, 2))
ioff = 0
for ilayer in range(self.num_layers):
for i in range(self.num_qubits):
bounds[ioff] = [0.0, self.alpha]
ioff = (ioff + 1)
for i in range(self.num_qubits):
bounds[ioff] = [(- np.pi), np.pi]
ioff = (ioff + 1)
return bounds
def generate_initial_parameters(self, seed: Union[(int, None)]=None) -> np.ndarray:
'\n Generates random parameters for the ChebyshevRx encoding circuit.\n\n Args:\n seed (Union[int,None]): Seed for the random number generator (default: None)\n\n Return:\n The randomly generated parameters\n '
param = super().generate_initial_parameters(seed)
if (len(param) > 0):
index = self.get_cheb_indices(False)
p = np.linspace(0.01, self.alpha, self.num_qubits)
for i in index:
param[i] = p
return param
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the ChebyshevRx encoding circuit\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_layers'] = self.num_layers
params['closed'] = self.closed
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
"\n Returns the circuit of the ChebyshevRx encoding circuit\n\n Args:\n features Union[ParameterVector,np.ndarray]: Input vector of the features\n from which the gate inputs are obtained\n param_vec Union[ParameterVector,np.ndarray]: Input vector of the parameters\n from which the gate inputs are obtained\n\n Return:\n Returns the circuit in Qiskit's QuantumCircuit format\n "
def entangle_layer(QC: QuantumCircuit) -> QuantumCircuit:
'Creation of a simple nearest neighbor entangling layer'
for i in range(0, ((self.num_qubits + self.closed) - 1), 2):
QC.cx(i, ((i + 1) % self.num_qubits))
if (self.num_qubits > 2):
for i in range(1, ((self.num_qubits + self.closed) - 1), 2):
QC.cx(i, ((i + 1) % self.num_qubits))
return QC
def mapping(a, x):
'Helper function for returning a*arccos(x)'
return (a * np.arccos(x))
nfeature = len(features)
nparam = len(parameters)
QC = QuantumCircuit(self.num_qubits)
ioff = 0
for _ in range(self.num_layers):
for i in range(self.num_qubits):
QC.rx(mapping(parameters[(ioff % nparam)], features[(i % nfeature)]), i)
ioff = (ioff + 1)
for i in range(self.num_qubits):
QC.rx(parameters[(ioff % nparam)], i)
ioff = (ioff + 1)
QC = entangle_layer(QC)
return QC
def get_cheb_indices(self, flatten: bool=True):
'\n Function that returns the indices of the parameters involved in the Chebyshev encoding.\n\n Args:\n flatten (bool): If true, the indices are returned as a flat list, otherwise\n as a list of lists, where the outer list corresponds to the layers\n (default: True)\n '
cheb_index = []
ioff = 0
for ilayer in range(self.num_layers):
cheb_index_layer = []
for i in range(self.num_qubits):
cheb_index_layer.append(ioff)
ioff = (ioff + 1)
for i in range(self.num_qubits):
ioff = (ioff + 1)
if flatten:
cheb_index += cheb_index_layer
else:
cheb_index.append(cheb_index_layer)
return cheb_index
|
class ChebyshevTower(EncodingCircuitBase):
'\n A feature-map that is based on the Chebyshev Tower encoding.\n\n **Example for 4 qubits, a 2 dimensional feature vector, 2 Chebyshev terms per feature,\n and 2 layers:**\n\n .. plot::\n\n from squlearn.encoding_circuit import ChebyshevTower\n pqc = ChebyshevTower(4, 2, 2, num_layers=2)\n pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n The encoding gate and the scaling factor can be adjusted by parameters.\n It is also possible to change the indexing of the features.\n\n Args:\n num_qubits (int): Number of qubits of the ChebyshevTower encoding circuit\n num_features (int): Dimension of the feature vector\n n_chebyshev (int): Number of Chebyshev tower terms per feature dimension\n alpha (float): Scaling factor of Chebyshev tower\n num_layers (int): Number of layers\n rotation_gate (str): Rotation gate to use. Either ``rx``, ``ry`` or ``rz`` (default: ``ry``)\n hadamard_start (bool): If true, the circuit starts with a layer of Hadamard gates\n (default: True)\n arrangement (str): Arrangement of the layers, either ``block`` or ``alternating``.\n ``block``: The features are stacked together, ``alternating``:\n The features are placed alternately (default: ``block``).\n '
def __init__(self, num_qubits: int, num_features: int, num_chebyshev: int, alpha: float=1.0, num_layers: int=1, rotation_gate: str='ry', hadamard_start: bool=True, arrangement: str='block') -> None:
super().__init__(num_qubits, num_features)
self.num_chebyshev = num_chebyshev
self.alpha = alpha
self.num_layers = num_layers
self.rotation_gate = rotation_gate
self.hadamard_start = hadamard_start
self.arrangement = arrangement
if (self.rotation_gate not in ('rx', 'ry', 'rz')):
raise ValueError("Rotation gate must be either 'rx', 'ry' or 'rz'")
if (self.arrangement not in ('block', 'alternating')):
raise ValueError("Arrangement must be either 'block' or 'alternating'")
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the Chebyshev Tower encoding (equal 0 here).'
return 0
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Chebyshev Tower encoding\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_chebyshev'] = self.num_chebyshev
params['alpha'] = self.alpha
params['num_layers'] = self.num_layers
params['rotation_gate'] = self.rotation_gate
params['hadamard_start'] = self.hadamard_start
params['arrangement'] = self.arrangement
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]=None) -> QuantumCircuit:
"\n Generates and returns the circuit of the Chebyshev encoding circuit.\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained\n\n Return:\n Returns the circuit in Qiskit's QuantumCircuit format\n "
if (self.rotation_gate not in ('rx', 'ry', 'rz')):
raise ValueError("Rotation gate must be either 'rx', 'ry' or 'rz'")
if (self.arrangement not in ('block', 'alternating')):
raise ValueError("Arrangement must be either 'block' or 'alternating'")
def entangle_layer(QC: QuantumCircuit):
'Creation of a simple NN entangling layer'
for i in range(0, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
for i in range(1, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
return QC
def mapping(x, i):
'Non-linear mapping for x: alpha*i*arccos(x)'
return ((self.alpha * i) * np.arccos(x))
nfeature = len(features)
QC = QuantumCircuit(self.num_qubits)
if self.hadamard_start:
QC.h(range(self.num_qubits))
for ilayer in range(self.num_layers):
ioff = 0
iqubit = 0
icheb = 1
if (self.arrangement == 'block'):
outer = self.num_features
inner = self.num_chebyshev
elif (self.arrangement == 'alternating'):
inner = self.num_features
outer = self.num_chebyshev
else:
raise ValueError("Arrangement must be either 'block' or 'alternating'")
for outer_ in range(outer):
for inner_ in range(inner):
if (self.rotation_gate.lower() == 'rx'):
QC.rx(mapping(features[(ioff % nfeature)], icheb), (iqubit % self.num_qubits))
elif (self.rotation_gate.lower() == 'ry'):
QC.ry(mapping(features[(ioff % nfeature)], icheb), (iqubit % self.num_qubits))
elif (self.rotation_gate.lower() == 'rz'):
QC.rz(mapping(features[(ioff % nfeature)], icheb), (iqubit % self.num_qubits))
else:
raise ValueError('Rotation gate {} not supported'.format(self.rotation_gate))
iqubit += 1
if (self.arrangement == 'block'):
icheb += 1
elif (self.arrangement == 'alternating'):
ioff += 1
if (self.arrangement == 'block'):
ioff += 1
icheb = 1
elif (self.arrangement == 'alternating'):
icheb += 1
if ((ilayer + 1) < self.num_layers):
QC = entangle_layer(QC)
return QC
|
class HighDimEncodingCircuit(EncodingCircuitBase):
'\n The high-dimensional encoding circuit from reference [1].\n\n A encoding circuit that can be used for the classification of high-dimensional data.\n\n **Example for 5 qubits, a 23 dimensional feature vector and 2 layers:**\n\n .. plot::\n\n from squlearn.encoding_circuit import HighDimEncodingCircuit\n pqc = HighDimEncodingCircuit(5, 23, num_layers=2)\n pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n The indexing of the feature vector can be changed by the arguments\n ``cycling``, ``cycling_type`` and ``layer_type``.\n\n Args:\n num_qubits (int): Number of qubits of the HighDim encoding circuit\n num_features (int): Dimension of the feature vector\n cycling (bool): If true, the assignment of gates cycles, i.e. if reaching the last feature,\n the layer is filled by starting again from the first feature.\n If false, the gates are left out after reaching the last feature.\n (default: true)\n cycling_type (str): Defines, how the indices are cycled.\\n\n ``saw``: restarts by 0, e.g. 0,1,2,3,0,1,2,3 (recommended);\n ``hat``: goes up and then down, e.g. 0,1,2,3,2,1,0,1,2,3\n number_of_layers (int): Sets the number of layer repetitions. If not given, the number of\n layers is determined automatically by the number of features and\n qubits. If the given number of layers is to low, a error is thrown.\n layer_type (str): Defines in which directions the features are assigned to the gates.\n ``columns``: iteration in columns (as shown in the example above);\n ``rows``: iteration in rows.\n entangling_gate (str): Entangling gates that are used in the entangling layer.\n Either ``iswap`` or ``cx`` (default: ``iswap``)\n\n References\n ----------\n [1]: Peters, Evan, et al. "Machine learning of high dimensional data on a noisy quantum\n processor." `npj Quantum Information 7.1 (2021): 161.\n <https://www.nature.com/articles/s41534-021-00498-9>`_\n '
def __init__(self, num_qubits: int, num_features: int, cycling: bool=True, cycling_type: str='saw', num_layers: Union[(None, int)]=None, layer_type: str='rows', entangling_gate: str='iswap') -> None:
super().__init__(num_qubits, num_features)
self.cycling = cycling
self.cycling_type = cycling_type
self.num_layers = num_layers
self.layer_type = layer_type
self.entangling_gate = entangling_gate
if (self.cycling_type not in ('saw', 'hat')):
raise ValueError('Unknown layer type:', self.layer_type)
if (self.layer_type not in ('columns', 'rows')):
raise ValueError('Unknown layer type:', self.layer_type)
if (self.entangling_gate not in ('cx', 'iswap')):
raise ValueError('Unknown entangling gate:', self.entangling_gate)
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the HighDim encoding circuit (equal to 0).'
return 0
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the HighDim encoding circuit\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['cycling'] = self.cycling
params['cycling_type'] = self.cycling_type
params['num_layers'] = self.num_layers
params['layer_type'] = self.layer_type
params['entangling_gate'] = self.entangling_gate
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]=None) -> QuantumCircuit:
'\n Returns the circuit of the HighDim encoding circuit\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n The circuit of the high-dimensional encoding circuit\n '
if (self.cycling_type not in ('saw', 'hat')):
raise ValueError('Unknown layer type:', self.layer_type)
if (self.layer_type not in ('columns', 'rows')):
raise ValueError('Unknown layer type:', self.layer_type)
if (self.entangling_gate not in ('cx', 'iswap')):
raise ValueError('Unknown entangling gate:', self.entangling_gate)
def build_layer(QC: QuantumCircuit, feature_vec: ParameterVector, ioff: int):
'\n Private function which creates a single layer\n '
if (self.layer_type == 'rows'):
rows = True
elif (self.layer_type == 'columns'):
rows = False
else:
raise ValueError('Unknown layer type:', self.layer_type)
for i in range((3 * self.num_qubits)):
if rows:
iqubit = int((i / 3))
else:
iqubit = (i % self.num_qubits)
ii = (ioff + i)
if self.cycling:
if (self.cycling_type == 'saw'):
ii = (ii % self.num_features)
elif (self.cycling_type == 'hat'):
itest = (ii % max(((self.num_features + self.num_features) - 2), 1))
if (itest >= self.num_features):
ii = (((self.num_features + self.num_features) - 2) - itest)
else:
ii = itest
else:
raise ValueError('Unknown cycling type!')
if ((iqubit >= self.num_qubits) or (ii >= self.num_features)):
break
if rows:
if ((i % 3) == 0):
QC.rz(feature_vec[ii], iqubit)
elif ((i % 3) == 1):
QC.ry(feature_vec[ii], iqubit)
else:
QC.rz(feature_vec[ii], iqubit)
elif (int((i / self.num_qubits)) == 0):
QC.rz(feature_vec[ii], iqubit)
elif (int((i / self.num_qubits)) == 1):
QC.ry(feature_vec[ii], iqubit)
else:
QC.rz(feature_vec[ii], iqubit)
return QC
def entangle_layer_iswap(QC: QuantumCircuit):
'Createn of the entangeling layer by iSWAP neighboring qubits'
iswap_op = Operator([[1, 0, 0, 0], [0, (1 / np.sqrt(2)), (1j / np.sqrt(2)), 0], [0, (1j / np.sqrt(2)), (1 / np.sqrt(2)), 0], [0, 0, 0, 1]])
for i in range(0, (self.num_qubits - 1), 2):
QC.unitary(iswap_op, [i, (i + 1)], label='iswap')
for i in range(1, (self.num_qubits - 1), 2):
QC.unitary(iswap_op, [i, (i + 1)], label='iswap')
return QC
def entangle_layer_cx(QC: QuantumCircuit):
'Creation of a simple nearest neighbor entangling layer'
for i in range(0, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
for i in range(1, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
return QC
if (self.num_features != len(features)):
raise ValueError('Wrong number of features')
if (parameters is not None):
if (len(parameters) != 0):
raise ValueError('No parameters are needed!')
QC = QuantumCircuit(self.num_qubits)
qubit_list = range(self.num_qubits)
QC.h(qubit_list)
num_layers = (int((self.num_features / (self.num_qubits * 3))) + 1)
if (self.num_layers is not None):
if (self.num_layers < num_layers):
raise RuntimeError('Not all features are represented in the encoding circuit!')
num_layers = self.num_layers
ioff = 0
for i in range(num_layers):
if (i != 0):
if (self.entangling_gate == 'iswap'):
QC = entangle_layer_iswap(QC)
elif (self.entangling_gate == 'cx'):
QC = entangle_layer_cx(QC)
else:
raise ValueError('Unknown entangling gate:', self.entangling_gate)
QC = build_layer(QC, features, ioff)
ioff = (ioff + (self.num_qubits * 3))
if ((self.cycling == False) and (ioff >= self.num_features)):
ioff = 0
return QC
|
class HubregtsenEncodingCircuit(EncodingCircuitBase):
'\n Creates the data reuploading encoding circuit as presented in reference [1].\n\n **Example for 4 qubits, a 2 dimensional feature vector, 2 layers:**\n\n .. plot::\n\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n pqc = HubregtsenEncodingCircuit(4, 2, 2)\n plt = pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n The encoding can be optionally repeated at the end to make the previous rotations not\n redundant in a fidelity kernel setting.\n The circuit is closed by default, i.e. the last qubit is entangled with the first one.\n\n Args:\n num_qubits (int): Number of qubits of the encoding circuit\n num_features (int): Dimension of the feature vector\n num_layers (int): Number of layers (default:1)\n closed (bool): If true, the last and the first qubit are entangled;\n not necessarily hardware efficient! (default: true)\n final_encoding (bool): If True, the encoding is repeated at the end (default: False)\n\n References\n ----------\n [1]: T. Hubregtsen et al., "Training Quantum Embedding Kernels on Near-Term Quantum Computers",\n `arXiv:2105.02276v1 (2021). <https://arxiv.org/abs/2105.02276>`_\n '
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, closed: bool=True, final_encoding=False) -> None:
super().__init__(num_qubits, num_features)
self.num_layers = num_layers
self.closed = closed
self.final_encoding = final_encoding
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the Hubregtsen encoding circuit.'
num_param = (self.num_qubits * self.num_layers)
if (self.num_qubits > 2):
if self.closed:
num_param += (self.num_qubits * self.num_layers)
else:
num_param += ((self.num_qubits - 1) * self.num_layers)
return num_param
@property
def parameter_bounds(self) -> np.ndarray:
'The bounds of the trainable parameters of the Hubregtsen encoding circuit.'
bound_array = np.zeros((self.num_parameters, 2))
ioff = 0
for ilayer in range(self.num_layers):
for i in range(self.num_qubits):
bound_array[ioff] = [(- np.pi), np.pi]
ioff = (ioff + 1)
if (self.num_qubits > 2):
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(istop):
bound_array[ioff] = [((- 2.0) * np.pi), (2.0 * np.pi)]
ioff = (ioff + 1)
return bound_array
@property
def feature_bounds(self) -> np.ndarray:
'The bounds of the features of the Hubregtsen encoding circuit.'
return np.array(([[(- np.pi), np.pi]] * self.num_features))
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Hubregtsen encoding circuit\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_layers'] = self.num_layers
params['closed'] = self.closed
params['final_encoding'] = self.final_encoding
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Generates and returns the circuit of the Hubregtsen encoding circuit\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n Returns the Hubregtsen circuit in qiskit QuantumCircuit format\n '
nfeatures = len(features)
nparam = len(parameters)
QC = QuantumCircuit(self.num_qubits)
ioff = 0
QC.h(range(self.num_qubits))
for ilayer in range(self.num_layers):
n_feature_loop = int(np.ceil((self.num_features / self.num_qubits)))
for i in range((n_feature_loop * self.num_qubits)):
if (((i // self.num_qubits) % 2) == 0):
QC.rz(features[(i % nfeatures)], (i % self.num_qubits))
else:
QC.rx(features[(i % nfeatures)], (i % self.num_qubits))
for i in range(self.num_qubits):
QC.ry(parameters[(ioff % nparam)], i)
ioff = (ioff + 1)
if (self.num_qubits > 2):
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(istop):
QC.crz(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
if self.final_encoding:
n_feature_loop = int(np.ceil((self.num_features / self.num_qubits)))
for i in range((n_feature_loop * self.num_qubits)):
if ((int(np.ceil((i / self.num_qubits))) % 2) == 0):
QC.rz(features[(i % nfeatures)], (i % self.num_qubits))
else:
QC.rx(features[(i % nfeatures)], (i % self.num_qubits))
return QC
|
class MultiControlEncodingCircuit(EncodingCircuitBase):
'\n Encoding circuit with HZ encoding followed by controlled Rx, Ry Rz rotations.\n\n **Example for 4 qubits, a 2 dimensional feature vector and 1 layer:**\n\n .. plot::\n\n from squlearn.encoding_circuit import MultiControlEncodingCircuit\n pqc = MultiControlEncodingCircuit(4, 2, 1)\n pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n The circuit is repeated for the number of layers.\n The circuit is closed by default, i.e. the last qubit is entangled with the first one.\n The encoding can be optionally repeated at the end to make the previous rotations not\n redundant in a fidelity kernel setting.\n\n Args:\n num_qubits (int): Number of qubits of the MultiControlEncodingCircuit encoding circuit\n num_features (int): Dimension of the feature vector\n num_layers (int): Number of layers (default: 1)\n closed (bool): If true, the last and the first qubit are entangled;\n not necessarily hardware efficient! (default: true)\n final_encoding (bool): If True, the encoding is repeated at the end (default: False)\n '
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, closed: bool=True, final_encoding=False) -> None:
super().__init__(num_qubits, num_features)
if (self.num_qubits < 2):
raise ValueError('MultiControlEncodingCircuit requires at least two qubits.')
self.num_layers = num_layers
self.closed = closed
self.final_encoding = final_encoding
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the MultiControlEncodingCircuit encoding circuit.'
num_param = ((3 * (self.num_qubits - 1)) * self.num_layers)
if self.closed:
num_param += (3 * self.num_layers)
return num_param
@property
def parameter_bounds(self) -> np.ndarray:
'The bounds of the trainable parameters of the MultiControlEncodingCircuit encoding circuit.'
return np.array(([[((- 2.0) * np.pi), (2.0 * np.pi)]] * self.num_parameters))
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the MultiControlEncodingCircuit encoding circuit\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_layers'] = self.num_layers
params['closed'] = self.closed
params['final_encoding'] = self.final_encoding
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
"\n Returns the circuit of the MultiControlEncodingCircuit encoding circuit\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n Returns the circuit in Qiskit's QuantumCircuit format\n "
if (self.num_qubits < 2):
raise ValueError('MultiControlEncodingCircuit requires at least two qubits.')
nfeature = len(features)
nparam = len(parameters)
QC = QuantumCircuit(self.num_qubits)
ioff = 0
for ilayer in range(self.num_layers):
QC.h(range(self.num_qubits))
for i in range(self.num_qubits):
QC.rz(features[(i % nfeature)], i)
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(0, istop, 2):
QC.crx(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
QC.cry(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
QC.crz(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
if (self.num_qubits >= 2):
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(1, istop, 2):
QC.crx(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
QC.cry(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
QC.crz(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
if self.final_encoding:
for i in range(self.num_qubits):
QC.rz(features[(i % nfeature)], i)
return QC
|
class ParamZFeatureMap(EncodingCircuitBase):
'\n Parameterized Z feature map with optional CNOT gates between the default layers.\n\n This encoding circuit is based on Qiskit\'s :class:`qiskit.circuit.library.ZFeatureMap`.\n\n **Example for 4 qubits, a 2 dimensional feature vector and 2 layers with entangling:**\n\n .. plot::\n\n from squlearn.encoding_circuit import ParamZFeatureMap\n pqc = ParamZFeatureMap(4, 2, num_layers=2, entangling=True)\n plt = pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n Args:\n num_qubits (int): Number of qubits\n num_features (int): Dimension of the feature vector\n num_layers (int): Number of layers of the encoding circuit\n entangling (bool): If true, entangling gates are added between the layers\n\n '
def __init__(self, num_qubits: int, num_features: int, num_layers: int=2, entangling: bool=False) -> None:
super().__init__(num_qubits, num_features)
self._num_layers = num_layers
self._entangling = entangling
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the encoding circuit.'
return (max(self._num_qubits, self._num_features) * self._num_layers)
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the parameterized Z feature map.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_layers'] = self._num_layers
params['entangling'] = self._entangling
return params
@property
def num_layers(self) -> int:
'The number of layers of the encoding circuit.'
return self._num_layers
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Returns the circuit of the parameterized Z feature map.\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n The circuit of the parameterized Z feature map in the form of a QuantumCircuit\n '
num_features = len(features)
num_param = len(parameters)
circuit = QuantumCircuit(self._num_qubits)
ioff = 0
for _ in range(self._num_layers):
for i in range(max(self._num_qubits, self._num_features)):
if (i < self._num_qubits):
circuit.h(i)
circuit.p((parameters[(ioff % num_param)] * features[(i % num_features)]), (i % self._num_qubits))
ioff += 1
if self._entangling:
if ((self._num_layers % 2) == 0):
for j in range((self._num_qubits - 1)):
circuit.cx(j, (j + 1))
else:
for j in range(1, (self._num_qubits - 1), 2):
circuit.cx(j, (j + 1))
return circuit
|
class QiskitEncodingCircuit(EncodingCircuitBase):
'\n Wrapper to create sQUlearn encoding circuits from the `Qiskit circuit library\n <https://qiskit.org/documentation/apidoc/circuit_library.html>`_.\n\n **Example: create a encoding circuit from Qiskit TwoLocal map**\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import QiskitEncodingCircuit\n from qiskit.circuit.library import TwoLocal\n local = TwoLocal(3, \'ry\', \'cx\', \'linear\', reps=2, insert_barriers=True)\n QiskitEncodingCircuit(local).draw("mpl")\n\n\n An alternative call can be made by passing the circuit library function and its arguments:\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import QiskitEncodingCircuit\n from qiskit.circuit.library import ZZFeatureMap\n QiskitEncodingCircuit(ZZFeatureMap,feature_dimension=4).draw("mpl")\n\n\n Args:\n qiskit_circuit (Union[BlueprintCircuit, Callable,QuantumCircuit]): A Qiskit circuit or a\n Qiskit circuit library\n function\n mode (str): Option for considering the circuit parameters as features or trainable\n parameters. Can be ``features`` or ``parameters`` or ``auto``.\n With auto, the mode is automatically determined depending on the\n parameter name. (default: ``auto``)\n decompose (bool): If True, the decompose method is called on the inputted circuit object.\n (default: ``True``)\n feature_label (str): The label of the parameters that are considered as features.\n (default: ``x``)\n parameter_label (str): The label of the parameters that are considered as trainable\n parameters. (default: ``θp``)\n **kwargs: Arguments for the Qiskit circuit library function if it is passed as a callable\n '
def __init__(self, qiskit_circuit: Union[(BlueprintCircuit, Callable, QuantumCircuit)], mode: str='auto', decompose: bool=False, feature_label: str='x', parameter_label: str='θp', **kwargs) -> None:
if callable(qiskit_circuit):
self._qiskit_circuit = qiskit_circuit(**kwargs).decompose()
elif decompose:
self._qiskit_circuit = qiskit_circuit.decompose()
else:
self._qiskit_circuit = qiskit_circuit
self._num_qubits = self._qiskit_circuit.num_qubits
self._mode = mode
self._feature_label = feature_label
self._parameter_label = parameter_label
if ((self._mode.lower() == 'x') or (self._mode.lower() == 'features')):
self._num_features = len(self._qiskit_circuit.parameters)
self._num_parameters = 0
self._mode = 'x'
elif ((self._mode.lower() == 'p') or (self._mode.lower() == 'parameters') or (self._mode.lower() == 'param')):
self._num_parameters = len(self._qiskit_circuit.parameters)
self._num_features = 0
self._mode = 'p'
elif (self._mode.lower() == 'auto'):
if (len(self._qiskit_circuit.parameters) == 0):
self._num_features = 0
self._num_parameters = 0
self._mode = 'empty'
else:
set_of_param_names = [p.name for p in self._qiskit_circuit.parameters]
param_available = False
for label in self._parameter_label:
if ((label + '[0]') in set_of_param_names):
param_available = True
break
x_available = False
for label in self._feature_label:
if ((label + '[0]') in set_of_param_names):
x_available = True
break
if (param_available and x_available):
self._num_features = 0
self._num_parameters = 0
for param in self._qiskit_circuit.parameters:
if (True in [(label in param.name) for label in self._parameter_label]):
self._num_parameters += 1
elif (True in [(label in param.name) for label in self._feature_label]):
self._num_features += 1
else:
raise RuntimeError(('Could not assign parameter ' + param.name))
self._mode = 'both'
elif (param_available and (not x_available)):
self._num_parameters = len(self._qiskit_circuit.parameters)
self._num_features = 0
self._mode = 'p'
elif (x_available and (not param_available)):
self._num_features = len(self._qiskit_circuit.parameters)
self._num_parameters = 0
self._mode = 'x'
else:
raise RuntimeError('Automatic mode determination failed!')
else:
raise ValueError('The type {} is not supported!'.format(self._mode))
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the Qiskit encoding circuit.'
return self._num_parameters
@property
def parameter_bounds(self) -> np.ndarray:
'The bounds of the trainable parameters of the Qiskit encoding circuit.\n\n Here arbitrarily chosen to be [-pi,pi] for all parameters.\n '
return np.array(([[(- np.pi), np.pi]] * self.num_parameters))
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Qiskit encoding circuit\n\n No hyper-parameters are available for the Qiskit encoding circuit!\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = {'qiskit_circuit': self._qiskit_circuit}
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Returns the circuit of the Qiskit Encoding Circuit\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n The circuit of the Qiskit Encoding Circuit with the assigned parameters.\n '
if (self._mode.lower() == 'both'):
dictionary = {}
i = 0
j = 0
for param in self._qiskit_circuit.parameters:
if (True in [(label in param.name) for label in self._parameter_label]):
dictionary[param] = parameters[i]
i += 1
elif (True in [(label in param.name) for label in self._feature_label]):
dictionary[param] = features[j]
j += 1
else:
raise RuntimeError(('Could not assign parameter ' + param.name))
elif (self._mode.lower() == 'x'):
dictionary = {p: v for (p, v) in zip(self._qiskit_circuit.parameters, features)}
if (len(self._qiskit_circuit.parameters) != len(features)):
raise ValueError('The number of features {} does not match!'.format(len(features)))
elif (self._mode.lower() == 'p'):
dictionary = {p: v for (p, v) in zip(self._qiskit_circuit.parameters, parameters)}
if (len(self._qiskit_circuit.parameters) != len(parameters)):
raise ValueError('The number of parameters {} does not match!'.format(len(parameters)))
elif (self._mode.lower() == 'empty'):
return self._qiskit_circuit
else:
raise ValueError('The type {} is not supported!'.format(self._mode))
return self._qiskit_circuit.assign_parameters(dictionary, inplace=False)
|
class YZ_CX_EncodingCircuit(EncodingCircuitBase):
'\n Creates the YZ-CX Encoding Circuit from reference [1].\n\n **Example for 4 qubits, a 4 dimensional feature vector, 2 layers and c = 2.0:**\n\n .. plot::\n\n from squlearn.encoding_circuit import YZ_CX_EncodingCircuit\n pqc = YZ_CX_EncodingCircuit(4, 4, 2, c=2.0)\n plt = pqc.draw(output="mpl", style={\'fontsize\':15,\'subfontsize\': 10})\n plt.tight_layout()\n\n One combination of Ry and Rz is considered as a single layer.\n\n Args:\n num_qubits (int): Number of qubits of the YZ-CX Encoding Circuit encoding circuit\n num_features (int): Dimension of the feature vector\n num_layers (int): Number of layers (default: 1)\n c (float): Prefactor :math:`c` for rescaling the data (default: 1.0)\n\n References\n ----------\n [1]: T. Haug, C. N. Self and M. S. Kim, "Quantum machine learning of large datasets using\n randomized measurements", `arxiv:2108.01039v3 (2021). <https://arxiv.org/abs/2108.01039v3>`_\n '
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, c: float=1.0) -> None:
super().__init__(num_qubits, num_features)
self._num_layers = num_layers
self._c = c
@property
def num_parameters(self) -> int:
'The number of trainable parameters of the YZ-CX Encoding Circuit encoding circuit.'
return ((2 * self.num_qubits) * self._num_layers)
@property
def parameter_bounds(self) -> np.ndarray:
'The bounds of the trainable parameters of the YZ-CX Encoding Circuit encoding circuit.'
return np.array(([[(- np.pi), np.pi]] * self.num_parameters))
@property
def num_layers(self) -> int:
'The number of layers of the YZ-CX Encoding Circuit encoding circuit.'
return self._num_layers
@property
def c(self) -> int:
'The prefactor :math:`c` of the YZ-CX Encoding Circuit encoding circuit.'
return self._c
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the YZ-CX Encoding Circuit encoding circuit\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['num_layers'] = self._num_layers
params['c'] = self._c
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Return the circuit of the YZ-CX encoding circuit.\n\n Args:\n features (Union[ParameterVector,np.ndarray]): Input vector of the features\n from which the gate inputs are obtained.\n param_vec (Union[ParameterVector,np.ndarray]): Input vector of the parameters\n from which the gate inputs are obtained.\n\n Return:\n Returns the circuit in qiskit format.\n '
nfeature = len(features)
nparam = len(parameters)
QC = QuantumCircuit(self.num_qubits)
ioff = 0
for ilayer in range(self.num_layers):
for i in range(self.num_qubits):
QC.ry((parameters[(ioff % nparam)] + (self.c * features[(i % nfeature)])), i)
ioff = (ioff + 1)
QC.rz((parameters[(ioff % nparam)] + (self.c * features[(i % nfeature)])), i)
ioff = (ioff + 1)
if ((ilayer % 2) == 0):
for i in range(0, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
else:
for i in range(1, (self.num_qubits - 1), 2):
QC.cx(i, (i + 1))
return QC
|
class EncodingCircuitDerivatives():
'\n Class for automatic differentiation of encoding circuits.\n\n This class allows to compute derivatives of a encoding circuit with respect to its parameters\n by utilizing the parameter-shift rule.\n The derivatives can be obtained by the method :meth:`get_derivative`.\n The type of derivative can be specified by either a string (see table below)\n or a ParameterVector or (a list) of ParameterElements that can be accessed\n via :meth:`feature_vector` or :meth:`parameter_vector`, respectively.\n\n .. list-table:: Strings that are recognized by the :meth:`get_derivative` method\n :widths: 25 75\n :header-rows: 1\n\n * - String\n - Derivative\n * - ``"I"``\n - Identity Operation (returns the encoding circuit circuit)\n * - ``"dx"``\n - Gradient with respect to feature :math:`x`:\n :math:`\\nabla_x = \\big( \\frac{\\partial}{\\partial x_1},\\ldots,\n \\frac{\\partial}{\\partial x_n} \\big)`\n * - ``"dp"``\n - Gradient with respect to parameter :math:`p`:\n :math:`\\nabla_p = \\big( \\frac{\\partial}{\\partial p_1},\\ldots,\n \\frac{\\partial}{\\partial p_m} \\big)`\n * - ``"dxdx"``\n - Hessian with respect to feature :math:`x`:\n :math:`H^x_{ij} = \\frac{\\partial^2}{\\partial x_i \\partial x_j}`\n * - ``"dpdxdx"``\n - Derivative of the feature Hessian with respect to parameter :math:`p`:\n :math:`\\nabla_p H^x_{ij} = \\big( \\frac{\\partial H^x_{ij}}{\\partial p_1},\\ldots,\n \\frac{\\partial H^x_{ij}}{\\partial p_m} \\big)`\n * - ``laplace``\n - Laplace operator with respect to :math:`x`:\n :math:`\\Delta = \\nabla^2 = \\sum_i \\frac{\\partial^2}{\\partial x^2_i}`\n * - ``laplace_dp``\n - Derivative of the Laplacian with respect to parameter :math:`p`:\n :math:`\\nabla_p \\circ \\Delta = \\big( \\frac{\\partial }{\\partial p_1}\\Delta,\\ldots,\n \\frac{\\partial}{\\partial p_m} \\Delta \\big)`\n * - ``"dpdp"``\n - Hessian with respect to parameter :math:`p`:\n :math:`H^p_{ij} = \\frac{\\partial^2}{\\partial p_i \\partial p_j}`\n * - ``"dxdp"`` (or ``"dxdp"``)\n - Mixed Hessian with respect to feature :math:`x` and parameter :math:`p`:\n :math:`H^{xp}_{ij} = \\frac{\\partial^2}{\\partial x_i \\partial p_j}`\n\n **Example: Encoding Circuit gradient with respect to the trainable parameters**\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.encoding_circuit.encoding_circuit_derivatives import EncodingCircuitDerivatives\n fm = HubregtsenEncodingCircuit(num_qubits=2, num_features=2, num_layers=2)\n fm_deriv = EncodingCircuitDerivatives(fm)\n grad = fm_deriv.get_derivative("dp")\n\n **Example: Derivative with respect to only the first trainable parameter**\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.encoding_circuit.encoding_circuit_derivatives import EncodingCircuitDerivatives\n fm = HubregtsenEncodingCircuit(num_qubits=2, num_features=2, num_layers=2)\n fm_deriv = EncodingCircuitDerivatives(fm)\n dp0 = fm_deriv.get_derivative((fm_deriv.parameter_vector[0],))\n\n\n Args:\n encoding_circuit (EncodingCircuitBase): Encoding circuit to differentiate\n optree_caching (bool): If True, the OpTree expressions are cached for faster\n evaluation. (default: True)\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, optree_caching: bool=True):
self.encoding_circuit = encoding_circuit
self._x = ParameterVector('x', self.encoding_circuit.num_features)
self._p = ParameterVector('p', self.encoding_circuit.num_parameters)
self._circuit = encoding_circuit.get_circuit(self._x, self._p)
self._instruction_set = list(set(self._circuit.count_ops()))
self._circuit = OpTree.derivative.transpile_to_supported_instructions(self._circuit)
self._optree_start = OpTreeCircuit(self._circuit)
self.num_qubits = self._circuit.num_qubits
self._optree_cache = {}
self._optree_caching = optree_caching
if self._optree_caching:
self._optree_cache['I'] = OpTreeCircuit(self._circuit)
def get_derivative(self, derivative: Union[(str, tuple, list)]) -> OpTreeElementBase:
'Determine the derivative of the encoding circuit circuit.\n\n Args:\n derivative (str or tuple): String or tuple of parameters for specifying the derivation.\n\n Return:\n Derivative circuit in OpTree format.\n\n '
if isinstance(derivative, str):
if (derivative == 'I'):
optree = self._optree_start
elif (derivative == 'dx'):
optree = self._differentiation_from_tuple((self._x,)).copy()
elif (derivative == 'dxdx'):
optree = self._differentiation_from_tuple((self._x, self._x)).copy()
elif (derivative == 'dpdxdx'):
optree = self._differentiation_from_tuple((self._p, self._x, self._x)).copy()
elif (derivative == 'laplace'):
list_sum = []
for xi in self._x:
list_sum.append(self._differentiation_from_tuple((xi, xi)).copy())
optree = OpTreeSum(list_sum)
elif (derivative == 'laplace_dp'):
list_sum = []
for xi in self._x:
list_sum.append(self._differentiation_from_tuple((self._p, xi, xi)).copy())
optree = OpTreeSum(list_sum)
elif (derivative == 'dp'):
optree = self._differentiation_from_tuple((self._p,)).copy()
elif (derivative == 'dpdp'):
optree = self._differentiation_from_tuple((self._p, self._p)).copy()
elif (derivative == 'dpdx'):
optree = self._differentiation_from_tuple((self._p, self._x)).copy()
elif (derivative == 'dxdp'):
optree = self._differentiation_from_tuple((self._x, self._p)).copy()
else:
raise ValueError('Unknown string command:', derivative)
elif isinstance(derivative, tuple):
optree = self._differentiation_from_tuple(derivative)
elif isinstance(derivative, list):
optree = self._differentiation_from_tuple((derivative,))
else:
raise TypeError('Input is neither string nor tuple, but:', type(derivative))
return optree
def _differentiation_from_tuple(self, diff_tuple: tuple) -> OpTreeElementBase:
'Recursive routine for automatic differentiating the encoding circuit\n\n Variables for the differentiation are supplied by a tuple\n (x,param,param_op) from left to right -> dx dparam dparam_op PQC(x,param,param_op)\n\n\n Args:\n diff_tuple (tuple): tuple containing ParameterVectors or ParameterExpressions or Strings\n determining the derivation\n\n Return:\n Derivative circuit in OpTree format.\n '
def helper_hash(diff):
if isinstance(diff, list):
return (('list',) + tuple([helper_hash(d) for d in diff]))
elif isinstance(diff, tuple):
return tuple([helper_hash(d) for d in diff])
else:
return diff
if (diff_tuple == ()):
return self._optree_start.copy()
elif ((self._optree_caching == True) and (helper_hash((diff_tuple,)) in self._optree_cache)):
return self._optree_cache[helper_hash((diff_tuple,))].copy()
else:
circ = self._optree_differentiation(self._differentiation_from_tuple(diff_tuple[1:]), diff_tuple[0])
if (self._optree_caching == True):
self._optree_cache[helper_hash((diff_tuple,))] = circ
return circ
@property
def parameter_vector(self) -> ParameterVector:
'Parameter ParameterVector ``p`` utilized in the encoding circuit circuit.'
return self._p
@property
def feature_vector(self) -> ParameterVector:
'Feature ParameterVector ``x`` utilized in the encoding circuit circuit.'
return self._x
@property
def num_parameters(self) -> int:
'Number of parameters in the encoding circuit circuit.'
return len(self._p)
@property
def num_features(self) -> int:
'Number of features in the encoding circuit circuit.'
return len(self._x)
def assign_parameters(self, optree: OpTreeElementBase, features: np.ndarray, parameters: np.ndarray) -> OpTreeElementBase:
'\n Assigns numerical values to the ParameterVector elements of the encoding circuit circuit.\n\n Args:\n optree (OperatorBase): OpTree object to be assigned.\n features (np.ndarray): Numerical values of the feature vector.\n parameters (np.ndarray): Numerical values of the parameter vector.\n\n Return:\n OpTree object with assigned numerical values.\n '
if (optree is None):
return None
todo_list = []
multi_list = []
param_list = []
if (features is not None):
(xx, multi_x) = adjust_features(features, len(self._x))
todo_list.append(xx)
param_list.append(self._x)
multi_list.append(multi_x)
if (parameters is not None):
(pp, multi_p) = adjust_parameters(parameters, len(self._p))
todo_list.append(pp)
param_list.append(self._p)
multi_list.append(multi_p)
def rec_assign(dic, todo_list, param_list, multi_list):
if (len(todo_list) <= 0):
return None
return_list = []
for x_ in todo_list[0]:
for (A, B) in zip(param_list[0], x_):
dic[A] = B
if (len(multi_list[1:]) > 0):
return_list.append(rec_assign(dic.copy(), todo_list[1:], param_list[1:], multi_list[1:]))
else:
return_list.append(OpTree.assign_parameters(optree, dic))
if multi_list[0]:
return OpTreeList(return_list)
else:
return return_list[0]
return rec_assign({}, todo_list, param_list, multi_list)
def _optree_differentiation(self, optree: OpTreeElementBase, parameters: Union[(list, tuple, ParameterVectorElement, ParameterVector)]) -> OpTreeElementBase:
'\n Routine for the automatic differentiation based on qiskit routines\n\n Args:\n optree : Input OpTree expression\n params (list | ParameterVector): variables which are used in the\n differentiation (have to be the same type )\n Returns:\n OpTree structure of the differentiated input optree\n '
if isinstance(parameters, ParameterVectorElement):
parameters = [parameters]
if isinstance(parameters, tuple):
parameters = list(parameters)
if (len(parameters) == 0):
return OpTreeList([])
params_name = parameters[0].name.split('[', 1)[0]
for p in parameters:
if (p.name.split('[', 1)[0] != params_name):
raise TypeError('Differentiable variables are not the same type.')
return OpTree.simplify(OpTree.derivative.differentiate(optree, parameters))
|
class VariableGroup():
'\n class for one variable group e.g. x1, x2, p1,..., which saves the dimension of one variable\n '
def __init__(self, variable_name: str, size=None):
'\n Args:\n variable_name [String]: the name of the variable type, which one can see, if he draws the circuit of a encoding circuit with this variable group\n size (int): The dimension of the variable group\n index (int): The index of the variable group (only important for creating the circuit)\n Only if size is not given:\n total_variables_used (int): counter, which saves the number of variables used (only important, if size not given, so the dimension can potentially be infinity)\n '
self.variable_name = variable_name
self.size = size
self.index = 0
if (size == None):
self.total_variables_used = 0
def __hash__(self):
'\n creates a hash with the name of the variable\n '
return hash(self.variable_name)
@property
def num_variables(self):
'\n returns the total number of variables that have been used;\n if number of size counts until infinity, cause size is not given, than return counter, else return size\n '
if (self.size == None):
return self.total_variables_used
return self.size
def get_param_vector(self):
'Creates a parameter vector by qiskit'
return ParameterVector(self.variable_name, self.num_variables)
def increase_used_number_of_variables(self, number_of_used_variables: int):
"Increases total number of variables , if size not given\n (this is important for initializing the parameter vectors of qiskit with get_number_of_variables)\n Note that it's possible to substract numbers (with negative integers) too.\n "
if (self.size == None):
self.total_variables_used += number_of_used_variables
pass
def increase_index(self, number_of_used_variables: int):
'Only for get_circuit: Building the circuit, it increases the index of the variable group'
self.index += number_of_used_variables
def set_index_to_zero(self):
'Sets the index to zero'
self.index = 0
|
class _operation():
'\n parent class for a quantum operation. Each gate layer stands for one operation.\n '
def __init__(self, num_qubits: int, variablegroup_tuple: tuple, map=None):
'\n Attributes:\n -----------\n\n Attributes:\n num_qubits: The number of all qubits\n variablegroup_tuple: A tuple with every variable group used in this operation\n ent_strategy: the entangling strategy: if None, than the program knows, that this is not an entangling layer\n map: A default map, that is used, if the operation has exactly 2 variable groups and no given map (by user)\n default_map: A boolean, that checks, if the user initializes his own map\n '
self.num_qubits = num_qubits
self.variablegroup_tuple = variablegroup_tuple
self.ent_strategy = None
if (map == None):
self.map = (lambda x, y: (x * y))
self.default_map = True
else:
self.map = map
self.default_map = False
def get_circuit(self, var_param_assignment: dict):
return None
|
class _H_operation(_operation):
'class for a H operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.h(range(self.num_qubits))
return QC
|
class _X_operation(_operation):
'class for a X operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.x(range(self.num_qubits))
return QC
|
class _Y_operation(_operation):
'class for a Y operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.y(range(self.num_qubits))
return QC
|
class _Z_operation(_operation):
'class for a Z operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.z(range(self.num_qubits))
return QC
|
class _Id_operation(_operation):
'class for an identity operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.id(range(self.num_qubits))
return QC
|
class _S_operation(_operation):
'class for a S operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.s(range(self.num_qubits))
return QC
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.