body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
e5036776ca59387c210c6692554bc5b593349c442d6a76856cf9296bc58560d5 | def visitNode(self, p):
'Init any settings found in node p.'
p = p.copy()
munge = g.app.config.munge
(kind, name, val) = self.parseHeadline(p.h)
kind = munge(kind)
isNone = (val in ('None', 'none', '', None))
if (kind is None):
pass
elif (kind == 'settings'):
pass
elif ((kind in self.basic_types) and isNone):
self.set(p, kind, name, None)
elif ((kind in self.control_types) or (kind in self.basic_types)):
f = self.dispatchDict.get(kind)
if f:
try:
return f(p, kind, name, val)
except Exception:
g.es_exception()
else:
g.pr('*** no handler', kind)
return None | Init any settings found in node p. | leo/core/leoConfig.py | visitNode | thomasbuttler/leo-editor | 1,550 | python | def visitNode(self, p):
p = p.copy()
munge = g.app.config.munge
(kind, name, val) = self.parseHeadline(p.h)
kind = munge(kind)
isNone = (val in ('None', 'none', , None))
if (kind is None):
pass
elif (kind == 'settings'):
pass
elif ((kind in self.basic_types) and isNone):
self.set(p, kind, name, None)
elif ((kind in self.control_types) or (kind in self.basic_types)):
f = self.dispatchDict.get(kind)
if f:
try:
return f(p, kind, name, val)
except Exception:
g.es_exception()
else:
g.pr('*** no handler', kind)
return None | def visitNode(self, p):
p = p.copy()
munge = g.app.config.munge
(kind, name, val) = self.parseHeadline(p.h)
kind = munge(kind)
isNone = (val in ('None', 'none', , None))
if (kind is None):
pass
elif (kind == 'settings'):
pass
elif ((kind in self.basic_types) and isNone):
self.set(p, kind, name, None)
elif ((kind in self.control_types) or (kind in self.basic_types)):
f = self.dispatchDict.get(kind)
if f:
try:
return f(p, kind, name, val)
except Exception:
g.es_exception()
else:
g.pr('*** no handler', kind)
return None<|docstring|>Init any settings found in node p.<|endoftext|> |
ed5ac1724061799024a56cb1b43be0ce03b7806e0b6d4141c73d25cc7b6724b0 | def filter_features(dataset):
'\n POSTag filtering of noun, adjective, verb and adverb.\n '
filtered_dataset = []
label_tags = []
for entry in dataset:
valid_tokens = []
for (n, tag) in enumerate(entry['features']['tags']):
if (tag in ['NOUN', 'ADJ', 'VERB', 'ADV']):
valid_tokens.append(n)
if (entry['labels']['bio_map'][n] != 'O'):
label_tags.append(entry['features']['tags'][n])
for (feature_name, value) in entry['features'].items():
entry['features'][feature_name] = [value[i] for i in valid_tokens]
for (label_name, value) in entry['labels'].items():
entry['labels'][label_name] = [value[i] for i in valid_tokens]
filtered_dataset.append(entry)
return (filtered_dataset, label_tags) | POSTag filtering of noun, adjective, verb and adverb. | 1_count_ml.py | filter_features | Giovani-Merlin/PMI_ATE | 0 | python | def filter_features(dataset):
'\n \n '
filtered_dataset = []
label_tags = []
for entry in dataset:
valid_tokens = []
for (n, tag) in enumerate(entry['features']['tags']):
if (tag in ['NOUN', 'ADJ', 'VERB', 'ADV']):
valid_tokens.append(n)
if (entry['labels']['bio_map'][n] != 'O'):
label_tags.append(entry['features']['tags'][n])
for (feature_name, value) in entry['features'].items():
entry['features'][feature_name] = [value[i] for i in valid_tokens]
for (label_name, value) in entry['labels'].items():
entry['labels'][label_name] = [value[i] for i in valid_tokens]
filtered_dataset.append(entry)
return (filtered_dataset, label_tags) | def filter_features(dataset):
'\n \n '
filtered_dataset = []
label_tags = []
for entry in dataset:
valid_tokens = []
for (n, tag) in enumerate(entry['features']['tags']):
if (tag in ['NOUN', 'ADJ', 'VERB', 'ADV']):
valid_tokens.append(n)
if (entry['labels']['bio_map'][n] != 'O'):
label_tags.append(entry['features']['tags'][n])
for (feature_name, value) in entry['features'].items():
entry['features'][feature_name] = [value[i] for i in valid_tokens]
for (label_name, value) in entry['labels'].items():
entry['labels'][label_name] = [value[i] for i in valid_tokens]
filtered_dataset.append(entry)
return (filtered_dataset, label_tags)<|docstring|>POSTag filtering of noun, adjective, verb and adverb.<|endoftext|> |
108cfdbc6f1e55c8bc5ddd31ab58b36bfde23ca075eefb0c181ed94e83ea083a | def _get_helper(trainer, num_inputs, num_targets, helper_name=None):
'\n :param trainer:\n :param num_inputs:\n :param num_targets:\n :param helper_name: Generally a helper will be determined from number of inputs and targets. However may want to supply your own in some instances.\n\n If a helper_name is specified then num_inputs and num_targets are ignored.\n :return:\n '
if (not helper_name):
if ((num_inputs == 1) and (num_targets == 1)):
helper = SingleInput_SingleTarget_Helper(trainer._loss_multipliers)
elif ((num_inputs == 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = SingleInput_MultiTarget_Helper()
elif ((num_inputs == 1) and (num_targets == 0)):
helper = SingleInput_NoTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 1)):
helper = MultiInput_SingleTarget_Helper()
elif ((num_inputs > 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = MultiInput_MultiTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 0)):
helper = MultiInput_NoTarget_Helper()
else:
helper = trainer._named_helpers.get(helper_name)
return helper | :param trainer:
:param num_inputs:
:param num_targets:
:param helper_name: Generally a helper will be determined from number of inputs and targets. However may want to supply your own in some instances.
If a helper_name is specified then num_inputs and num_targets are ignored.
:return: | pywick/modules/module_trainer.py | _get_helper | achaiah/pywick | 408 | python | def _get_helper(trainer, num_inputs, num_targets, helper_name=None):
'\n :param trainer:\n :param num_inputs:\n :param num_targets:\n :param helper_name: Generally a helper will be determined from number of inputs and targets. However may want to supply your own in some instances.\n\n If a helper_name is specified then num_inputs and num_targets are ignored.\n :return:\n '
if (not helper_name):
if ((num_inputs == 1) and (num_targets == 1)):
helper = SingleInput_SingleTarget_Helper(trainer._loss_multipliers)
elif ((num_inputs == 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = SingleInput_MultiTarget_Helper()
elif ((num_inputs == 1) and (num_targets == 0)):
helper = SingleInput_NoTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 1)):
helper = MultiInput_SingleTarget_Helper()
elif ((num_inputs > 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = MultiInput_MultiTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 0)):
helper = MultiInput_NoTarget_Helper()
else:
helper = trainer._named_helpers.get(helper_name)
return helper | def _get_helper(trainer, num_inputs, num_targets, helper_name=None):
'\n :param trainer:\n :param num_inputs:\n :param num_targets:\n :param helper_name: Generally a helper will be determined from number of inputs and targets. However may want to supply your own in some instances.\n\n If a helper_name is specified then num_inputs and num_targets are ignored.\n :return:\n '
if (not helper_name):
if ((num_inputs == 1) and (num_targets == 1)):
helper = SingleInput_SingleTarget_Helper(trainer._loss_multipliers)
elif ((num_inputs == 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = SingleInput_MultiTarget_Helper()
elif ((num_inputs == 1) and (num_targets == 0)):
helper = SingleInput_NoTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 1)):
helper = MultiInput_SingleTarget_Helper()
elif ((num_inputs > 1) and (num_targets > 1)):
if (not is_tuple_or_list(trainer._criterion_fn)):
trainer._criterion_fn = ([trainer._criterion_fn] * num_targets)
elif (len(trainer._criterion_fn) != num_targets):
raise ValueError('must give one loss function for every input if you give multiple')
helper = MultiInput_MultiTarget_Helper()
elif ((num_inputs > 1) and (num_targets == 0)):
helper = MultiInput_NoTarget_Helper()
else:
helper = trainer._named_helpers.get(helper_name)
return helper<|docstring|>:param trainer:
:param num_inputs:
:param num_targets:
:param helper_name: Generally a helper will be determined from number of inputs and targets. However may want to supply your own in some instances.
If a helper_name is specified then num_inputs and num_targets are ignored.
:return:<|endoftext|> |
c47d5a0a86ef94188eda25589038781b8ed11490361fff18d06d48a88a8a6460 | def __init__(self, model, cuda_devices=None):
'\n ModelTrainer for high-level training of Pytorch models\n\n Major Parts\n -----------\n - optimizer(s)\n - criterion(s)\n - loss_multipliers (to handle multiple losses)\n - named_helpers\n - preconditions\n - postconditions\n - regularizers\n - initializers\n - constraints\n - metrics\n - callbacks\n '
if (cuda_devices is None):
cuda_devices = []
if (not isinstance(model, nn.Module)):
raise ValueError('model argument must inherit from torch.nn.Module')
self.model = model
self.device = (('cuda:' + str(cuda_devices[0])) if cuda_devices else 'cpu')
self._loss_multipliers = None
self._named_helpers = {}
self._preconditions = []
self._has_preconditions = False
self._postconditions = []
self._has_postconditions = False
self._callbacks = []
self._regularizers = []
self._has_regularizers = False
self._initializers = []
self._constraints = []
self._has_constraints = False
self._metrics = []
self._has_metrics = False
self._transforms = []
self._has_transforms = False
self._criterion = None
self._criterion_fn = None
self._stop_training = False
if (cuda_devices and th.cuda.is_available()):
cudnn.benchmark = True
if (len(cuda_devices) > 1):
self.model = th.nn.DataParallel(self.model, device_ids=cuda_devices)
self.model = self.model.to(self.device) | ModelTrainer for high-level training of Pytorch models
Major Parts
-----------
- optimizer(s)
- criterion(s)
- loss_multipliers (to handle multiple losses)
- named_helpers
- preconditions
- postconditions
- regularizers
- initializers
- constraints
- metrics
- callbacks | pywick/modules/module_trainer.py | __init__ | achaiah/pywick | 408 | python | def __init__(self, model, cuda_devices=None):
'\n ModelTrainer for high-level training of Pytorch models\n\n Major Parts\n -----------\n - optimizer(s)\n - criterion(s)\n - loss_multipliers (to handle multiple losses)\n - named_helpers\n - preconditions\n - postconditions\n - regularizers\n - initializers\n - constraints\n - metrics\n - callbacks\n '
if (cuda_devices is None):
cuda_devices = []
if (not isinstance(model, nn.Module)):
raise ValueError('model argument must inherit from torch.nn.Module')
self.model = model
self.device = (('cuda:' + str(cuda_devices[0])) if cuda_devices else 'cpu')
self._loss_multipliers = None
self._named_helpers = {}
self._preconditions = []
self._has_preconditions = False
self._postconditions = []
self._has_postconditions = False
self._callbacks = []
self._regularizers = []
self._has_regularizers = False
self._initializers = []
self._constraints = []
self._has_constraints = False
self._metrics = []
self._has_metrics = False
self._transforms = []
self._has_transforms = False
self._criterion = None
self._criterion_fn = None
self._stop_training = False
if (cuda_devices and th.cuda.is_available()):
cudnn.benchmark = True
if (len(cuda_devices) > 1):
self.model = th.nn.DataParallel(self.model, device_ids=cuda_devices)
self.model = self.model.to(self.device) | def __init__(self, model, cuda_devices=None):
'\n ModelTrainer for high-level training of Pytorch models\n\n Major Parts\n -----------\n - optimizer(s)\n - criterion(s)\n - loss_multipliers (to handle multiple losses)\n - named_helpers\n - preconditions\n - postconditions\n - regularizers\n - initializers\n - constraints\n - metrics\n - callbacks\n '
if (cuda_devices is None):
cuda_devices = []
if (not isinstance(model, nn.Module)):
raise ValueError('model argument must inherit from torch.nn.Module')
self.model = model
self.device = (('cuda:' + str(cuda_devices[0])) if cuda_devices else 'cpu')
self._loss_multipliers = None
self._named_helpers = {}
self._preconditions = []
self._has_preconditions = False
self._postconditions = []
self._has_postconditions = False
self._callbacks = []
self._regularizers = []
self._has_regularizers = False
self._initializers = []
self._constraints = []
self._has_constraints = False
self._metrics = []
self._has_metrics = False
self._transforms = []
self._has_transforms = False
self._criterion = None
self._criterion_fn = None
self._stop_training = False
if (cuda_devices and th.cuda.is_available()):
cudnn.benchmark = True
if (len(cuda_devices) > 1):
self.model = th.nn.DataParallel(self.model, device_ids=cuda_devices)
self.model = self.model.to(self.device)<|docstring|>ModelTrainer for high-level training of Pytorch models
Major Parts
-----------
- optimizer(s)
- criterion(s)
- loss_multipliers (to handle multiple losses)
- named_helpers
- preconditions
- postconditions
- regularizers
- initializers
- constraints
- metrics
- callbacks<|endoftext|> |
2466b5784f15bd4f36693428a1b5f5132fd6698aa4913e75c93e994fcb83a6ee | def compile(self, optimizer, criterion, loss_multipliers=None, named_helpers=None, preconditions=None, postconditions=None, callbacks=None, regularizers=None, initializers=None, constraints=None, metrics=None, transforms=None):
'\n :param optimizer: the optimizer to use for learning\n :param criterion: the criterion to use for calculating loss\n :param loss_multipliers: (type: list) A way to provide preset loss multipliers for multi-loss criterions\n :param named_helpers: (type: dict) A way to provide custom handler for loss calculation and forward pass. In most cases not necessary to override.\n :param preconditions: (type: list) Conditions to check for before executing a forward pass (e.g. asserts)\n :param postconditions: (type: list) Conditions to check for after the forward pass (e.g. asserts, dynamic network modification)\n :param callbacks: (type: list) Callbacks to use when calling the fit* functions\n :param regularizers: (type: list) Regularizers to use when calling the fit* functions\n :param initializers: (type: list) Initializers to use when calling the fit* functions\n :param constraints: (type: list) Constraints to use when calling the fit* functions\n :param metrics: (type: list) Metrics to use when calling the fit* functions\n :param transforms: (type: list) Unused at the moment\n\n :return:\n '
self.set_optimizer(optimizer)
self.set_criterion(criterion)
self._loss_multipliers = loss_multipliers
self._named_helpers = named_helpers
if ((preconditions is not None) or (postconditions is not None)):
self._conditions_container = ConditionsContainer(exec_type=ExecType.TRAIN)
if (preconditions is not None):
self.set_preconditions(preconditions)
self._conditions_container.add_preconditions(self._preconditions)
if (postconditions is not None):
self.set_postconditions(postconditions)
self._conditions_container.add_postconditions(self._postconditions)
if (regularizers is not None):
self.set_regularizers(regularizers)
self.regularizer_container = RegularizerContainer(self._regularizers)
self.regularizer_container.register_forward_hooks(self.model)
else:
self._has_regularizers = False
self.history = History(self)
self._callbacks = [self.history]
if (callbacks is not None):
self.set_callbacks(callbacks)
if (initializers is not None):
self.set_initializers(initializers)
self.initializer_container = InitializerContainer(self._initializers)
self.initializer_container.apply(self.model)
if (constraints is not None):
self.set_constraints(constraints)
self.constraint_container = ConstraintContainer(self._constraints)
self.constraint_container.register_constraints(self.model)
else:
self._has_constraints = False
if (metrics is not None):
self.set_metrics(metrics)
self.metric_container = MetricContainer(self._metrics)
else:
self._has_metrics = False
if (transforms is not None):
self.set_transforms(transforms)
else:
self._has_transforms = False | :param optimizer: the optimizer to use for learning
:param criterion: the criterion to use for calculating loss
:param loss_multipliers: (type: list) A way to provide preset loss multipliers for multi-loss criterions
:param named_helpers: (type: dict) A way to provide custom handler for loss calculation and forward pass. In most cases not necessary to override.
:param preconditions: (type: list) Conditions to check for before executing a forward pass (e.g. asserts)
:param postconditions: (type: list) Conditions to check for after the forward pass (e.g. asserts, dynamic network modification)
:param callbacks: (type: list) Callbacks to use when calling the fit* functions
:param regularizers: (type: list) Regularizers to use when calling the fit* functions
:param initializers: (type: list) Initializers to use when calling the fit* functions
:param constraints: (type: list) Constraints to use when calling the fit* functions
:param metrics: (type: list) Metrics to use when calling the fit* functions
:param transforms: (type: list) Unused at the moment
:return: | pywick/modules/module_trainer.py | compile | achaiah/pywick | 408 | python | def compile(self, optimizer, criterion, loss_multipliers=None, named_helpers=None, preconditions=None, postconditions=None, callbacks=None, regularizers=None, initializers=None, constraints=None, metrics=None, transforms=None):
'\n :param optimizer: the optimizer to use for learning\n :param criterion: the criterion to use for calculating loss\n :param loss_multipliers: (type: list) A way to provide preset loss multipliers for multi-loss criterions\n :param named_helpers: (type: dict) A way to provide custom handler for loss calculation and forward pass. In most cases not necessary to override.\n :param preconditions: (type: list) Conditions to check for before executing a forward pass (e.g. asserts)\n :param postconditions: (type: list) Conditions to check for after the forward pass (e.g. asserts, dynamic network modification)\n :param callbacks: (type: list) Callbacks to use when calling the fit* functions\n :param regularizers: (type: list) Regularizers to use when calling the fit* functions\n :param initializers: (type: list) Initializers to use when calling the fit* functions\n :param constraints: (type: list) Constraints to use when calling the fit* functions\n :param metrics: (type: list) Metrics to use when calling the fit* functions\n :param transforms: (type: list) Unused at the moment\n\n :return:\n '
self.set_optimizer(optimizer)
self.set_criterion(criterion)
self._loss_multipliers = loss_multipliers
self._named_helpers = named_helpers
if ((preconditions is not None) or (postconditions is not None)):
self._conditions_container = ConditionsContainer(exec_type=ExecType.TRAIN)
if (preconditions is not None):
self.set_preconditions(preconditions)
self._conditions_container.add_preconditions(self._preconditions)
if (postconditions is not None):
self.set_postconditions(postconditions)
self._conditions_container.add_postconditions(self._postconditions)
if (regularizers is not None):
self.set_regularizers(regularizers)
self.regularizer_container = RegularizerContainer(self._regularizers)
self.regularizer_container.register_forward_hooks(self.model)
else:
self._has_regularizers = False
self.history = History(self)
self._callbacks = [self.history]
if (callbacks is not None):
self.set_callbacks(callbacks)
if (initializers is not None):
self.set_initializers(initializers)
self.initializer_container = InitializerContainer(self._initializers)
self.initializer_container.apply(self.model)
if (constraints is not None):
self.set_constraints(constraints)
self.constraint_container = ConstraintContainer(self._constraints)
self.constraint_container.register_constraints(self.model)
else:
self._has_constraints = False
if (metrics is not None):
self.set_metrics(metrics)
self.metric_container = MetricContainer(self._metrics)
else:
self._has_metrics = False
if (transforms is not None):
self.set_transforms(transforms)
else:
self._has_transforms = False | def compile(self, optimizer, criterion, loss_multipliers=None, named_helpers=None, preconditions=None, postconditions=None, callbacks=None, regularizers=None, initializers=None, constraints=None, metrics=None, transforms=None):
'\n :param optimizer: the optimizer to use for learning\n :param criterion: the criterion to use for calculating loss\n :param loss_multipliers: (type: list) A way to provide preset loss multipliers for multi-loss criterions\n :param named_helpers: (type: dict) A way to provide custom handler for loss calculation and forward pass. In most cases not necessary to override.\n :param preconditions: (type: list) Conditions to check for before executing a forward pass (e.g. asserts)\n :param postconditions: (type: list) Conditions to check for after the forward pass (e.g. asserts, dynamic network modification)\n :param callbacks: (type: list) Callbacks to use when calling the fit* functions\n :param regularizers: (type: list) Regularizers to use when calling the fit* functions\n :param initializers: (type: list) Initializers to use when calling the fit* functions\n :param constraints: (type: list) Constraints to use when calling the fit* functions\n :param metrics: (type: list) Metrics to use when calling the fit* functions\n :param transforms: (type: list) Unused at the moment\n\n :return:\n '
self.set_optimizer(optimizer)
self.set_criterion(criterion)
self._loss_multipliers = loss_multipliers
self._named_helpers = named_helpers
if ((preconditions is not None) or (postconditions is not None)):
self._conditions_container = ConditionsContainer(exec_type=ExecType.TRAIN)
if (preconditions is not None):
self.set_preconditions(preconditions)
self._conditions_container.add_preconditions(self._preconditions)
if (postconditions is not None):
self.set_postconditions(postconditions)
self._conditions_container.add_postconditions(self._postconditions)
if (regularizers is not None):
self.set_regularizers(regularizers)
self.regularizer_container = RegularizerContainer(self._regularizers)
self.regularizer_container.register_forward_hooks(self.model)
else:
self._has_regularizers = False
self.history = History(self)
self._callbacks = [self.history]
if (callbacks is not None):
self.set_callbacks(callbacks)
if (initializers is not None):
self.set_initializers(initializers)
self.initializer_container = InitializerContainer(self._initializers)
self.initializer_container.apply(self.model)
if (constraints is not None):
self.set_constraints(constraints)
self.constraint_container = ConstraintContainer(self._constraints)
self.constraint_container.register_constraints(self.model)
else:
self._has_constraints = False
if (metrics is not None):
self.set_metrics(metrics)
self.metric_container = MetricContainer(self._metrics)
else:
self._has_metrics = False
if (transforms is not None):
self.set_transforms(transforms)
else:
self._has_transforms = False<|docstring|>:param optimizer: the optimizer to use for learning
:param criterion: the criterion to use for calculating loss
:param loss_multipliers: (type: list) A way to provide preset loss multipliers for multi-loss criterions
:param named_helpers: (type: dict) A way to provide custom handler for loss calculation and forward pass. In most cases not necessary to override.
:param preconditions: (type: list) Conditions to check for before executing a forward pass (e.g. asserts)
:param postconditions: (type: list) Conditions to check for after the forward pass (e.g. asserts, dynamic network modification)
:param callbacks: (type: list) Callbacks to use when calling the fit* functions
:param regularizers: (type: list) Regularizers to use when calling the fit* functions
:param initializers: (type: list) Initializers to use when calling the fit* functions
:param constraints: (type: list) Constraints to use when calling the fit* functions
:param metrics: (type: list) Metrics to use when calling the fit* functions
:param transforms: (type: list) Unused at the moment
:return:<|endoftext|> |
ec06384d757c682b20e58c4f56ec864c2247033819dee255264ab69acc935c71 | def fit(self, inputs, targets=None, val_data=None, initial_epoch=0, num_epoch=100, batch_size=32, shuffle=False, fit_helper_name=None, verbose=1):
'\n Fit a model on in-memory tensors using ModuleTrainer\n '
self.model.train(True)
(num_inputs, num_targets) = _parse_num_inputs_and_targets(inputs, targets)
len_inputs = (len(inputs) if (not is_tuple_or_list(inputs)) else len(inputs[0]))
if (val_data is not None):
if (num_targets == 0):
val_data = (val_data, None)
if (len(val_data) != 2):
raise Exception('val_data must be a 2-tuple')
(num_val_inputs, num_val_targets) = _parse_num_inputs_and_targets(val_data[0], val_data[1])
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise Exception(('The number of input/target tensors must be the same for training and validation data\nNum Input tensors: (%i train, %i val), Num Target tensors: (%i train, %i val)' % (num_inputs, num_val_inputs, num_targets, num_val_targets)))
(val_inputs, val_targets) = val_data
has_val_data = (val_data is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
if shuffle:
(inputs, targets) = fit_helper.shuffle_arrays(inputs, targets)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch(batch_idx, batch_size, inputs, targets)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
if self._has_transforms:
(input_batch, target_batch) = fit_helper.apply_transforms(self._transforms, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
if self._has_postconditions:
postcond_logs = self._conditions_container(CondType.POST, epoch_idx, batch_idx, self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(postcond_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate(val_inputs, val_targets, batch_size=batch_size, verbose=verbose)
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end() | Fit a model on in-memory tensors using ModuleTrainer | pywick/modules/module_trainer.py | fit | achaiah/pywick | 408 | python | def fit(self, inputs, targets=None, val_data=None, initial_epoch=0, num_epoch=100, batch_size=32, shuffle=False, fit_helper_name=None, verbose=1):
'\n \n '
self.model.train(True)
(num_inputs, num_targets) = _parse_num_inputs_and_targets(inputs, targets)
len_inputs = (len(inputs) if (not is_tuple_or_list(inputs)) else len(inputs[0]))
if (val_data is not None):
if (num_targets == 0):
val_data = (val_data, None)
if (len(val_data) != 2):
raise Exception('val_data must be a 2-tuple')
(num_val_inputs, num_val_targets) = _parse_num_inputs_and_targets(val_data[0], val_data[1])
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise Exception(('The number of input/target tensors must be the same for training and validation data\nNum Input tensors: (%i train, %i val), Num Target tensors: (%i train, %i val)' % (num_inputs, num_val_inputs, num_targets, num_val_targets)))
(val_inputs, val_targets) = val_data
has_val_data = (val_data is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
if shuffle:
(inputs, targets) = fit_helper.shuffle_arrays(inputs, targets)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch(batch_idx, batch_size, inputs, targets)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
if self._has_transforms:
(input_batch, target_batch) = fit_helper.apply_transforms(self._transforms, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
if self._has_postconditions:
postcond_logs = self._conditions_container(CondType.POST, epoch_idx, batch_idx, self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(postcond_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate(val_inputs, val_targets, batch_size=batch_size, verbose=verbose)
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end() | def fit(self, inputs, targets=None, val_data=None, initial_epoch=0, num_epoch=100, batch_size=32, shuffle=False, fit_helper_name=None, verbose=1):
'\n \n '
self.model.train(True)
(num_inputs, num_targets) = _parse_num_inputs_and_targets(inputs, targets)
len_inputs = (len(inputs) if (not is_tuple_or_list(inputs)) else len(inputs[0]))
if (val_data is not None):
if (num_targets == 0):
val_data = (val_data, None)
if (len(val_data) != 2):
raise Exception('val_data must be a 2-tuple')
(num_val_inputs, num_val_targets) = _parse_num_inputs_and_targets(val_data[0], val_data[1])
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise Exception(('The number of input/target tensors must be the same for training and validation data\nNum Input tensors: (%i train, %i val), Num Target tensors: (%i train, %i val)' % (num_inputs, num_val_inputs, num_targets, num_val_targets)))
(val_inputs, val_targets) = val_data
has_val_data = (val_data is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
if shuffle:
(inputs, targets) = fit_helper.shuffle_arrays(inputs, targets)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch(batch_idx, batch_size, inputs, targets)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
if self._has_transforms:
(input_batch, target_batch) = fit_helper.apply_transforms(self._transforms, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
if self._has_postconditions:
postcond_logs = self._conditions_container(CondType.POST, epoch_idx, batch_idx, self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(postcond_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate(val_inputs, val_targets, batch_size=batch_size, verbose=verbose)
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end()<|docstring|>Fit a model on in-memory tensors using ModuleTrainer<|endoftext|> |
b76d2b76b3b61d39f218d413c29946fc0593d29f83460ef6272e3b74a876ee65 | def fit_loader(self, loader, val_loader=None, initial_epoch=0, num_epoch=100, fit_helper_name=None, verbose=1):
'\n Fit a model on in-memory tensors using ModuleTrainer\n '
self.model.train(mode=True)
num_inputs = 1
num_targets = 1
if hasattr(loader.dataset, 'num_inputs'):
num_inputs = loader.dataset.num_inputs
if hasattr(loader.dataset, 'num_targets'):
num_targets = loader.dataset.num_targets
len_inputs = (len(loader.sampler) if loader.sampler else len(loader.dataset))
batch_size = loader.batch_size
if (val_loader is not None):
num_val_inputs = val_loader.dataset.num_inputs
num_val_targets = val_loader.dataset.num_targets
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise ValueError('num_inputs != num_val_inputs or num_targets != num_val_targets')
has_val_data = (val_loader is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': loader.batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
loader_iter = iter(loader)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch_from_loader(loader_iter)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_postconditions:
cond_logs = self._conditions_container(CondType.POST, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(cond_logs)
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate_loader(val_loader, verbose=verbose)
self._in_train_loop = False
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end() | Fit a model on in-memory tensors using ModuleTrainer | pywick/modules/module_trainer.py | fit_loader | achaiah/pywick | 408 | python | def fit_loader(self, loader, val_loader=None, initial_epoch=0, num_epoch=100, fit_helper_name=None, verbose=1):
'\n \n '
self.model.train(mode=True)
num_inputs = 1
num_targets = 1
if hasattr(loader.dataset, 'num_inputs'):
num_inputs = loader.dataset.num_inputs
if hasattr(loader.dataset, 'num_targets'):
num_targets = loader.dataset.num_targets
len_inputs = (len(loader.sampler) if loader.sampler else len(loader.dataset))
batch_size = loader.batch_size
if (val_loader is not None):
num_val_inputs = val_loader.dataset.num_inputs
num_val_targets = val_loader.dataset.num_targets
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise ValueError('num_inputs != num_val_inputs or num_targets != num_val_targets')
has_val_data = (val_loader is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': loader.batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
loader_iter = iter(loader)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch_from_loader(loader_iter)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_postconditions:
cond_logs = self._conditions_container(CondType.POST, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(cond_logs)
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate_loader(val_loader, verbose=verbose)
self._in_train_loop = False
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end() | def fit_loader(self, loader, val_loader=None, initial_epoch=0, num_epoch=100, fit_helper_name=None, verbose=1):
'\n \n '
self.model.train(mode=True)
num_inputs = 1
num_targets = 1
if hasattr(loader.dataset, 'num_inputs'):
num_inputs = loader.dataset.num_inputs
if hasattr(loader.dataset, 'num_targets'):
num_targets = loader.dataset.num_targets
len_inputs = (len(loader.sampler) if loader.sampler else len(loader.dataset))
batch_size = loader.batch_size
if (val_loader is not None):
num_val_inputs = val_loader.dataset.num_inputs
num_val_targets = val_loader.dataset.num_targets
if ((num_inputs != num_val_inputs) or (num_targets != num_val_targets)):
raise ValueError('num_inputs != num_val_inputs or num_targets != num_val_targets')
has_val_data = (val_loader is not None)
num_batches = int(math.ceil((len_inputs / batch_size)))
fit_helper = _get_helper(self, num_inputs, num_targets, helper_name=fit_helper_name)
fit_loss_fn = fit_helper.get_partial_loss_fn(self._criterion_fn)
fit_forward_fn = fit_helper.get_partial_forward_fn(self.model)
with TQDM() as pbar:
tmp_callbacks = []
if (verbose > 0):
tmp_callbacks.append(pbar)
if self._has_regularizers:
tmp_callbacks.append(RegularizerCallback(self.regularizer_container))
fit_loss_fn = _add_regularizer_to_loss_fn(fit_loss_fn, self.regularizer_container)
if self._has_constraints:
tmp_callbacks.append(ConstraintCallback(self.constraint_container))
if self._has_metrics:
self.metric_container.set_helper(fit_helper)
tmp_callbacks.append(MetricCallback(self.metric_container))
callback_container = CallbackContainer((self._callbacks + tmp_callbacks))
callback_container.set_trainer(self)
callback_container.on_train_begin({'batch_size': loader.batch_size, 'num_batches': num_batches, 'num_epoch': num_epoch, 'has_val_data': has_val_data, 'has_regularizers': self._has_regularizers, 'has_metrics': self._has_metrics})
try:
for epoch_idx in range(initial_epoch, num_epoch):
epoch_logs = {}
callback_container.on_epoch_begin(epoch_idx, epoch_logs)
loader_iter = iter(loader)
for batch_idx in range(num_batches):
batch_logs = {}
callback_container.on_batch_begin(batch_idx, batch_logs)
(input_batch, target_batch) = fit_helper.grab_batch_from_loader(loader_iter)
if self._has_preconditions:
precond_logs = self._conditions_container(CondType.PRE, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, target_batch=target_batch)
batch_logs.update(precond_logs)
(input_batch, target_batch) = fit_helper.move_to_device(self.device, input_batch, target_batch)
self._optimizer.zero_grad()
output_batch = fit_forward_fn(input_batch)
loss = fit_loss_fn(output_batch, target_batch)
if math.isnan(loss):
raise AssertionError('Assertion failed: Loss is not NaN.')
loss.backward()
self._optimizer.step()
if self._has_regularizers:
batch_logs['reg_loss'] = self.regularizer_container.current_value
if self._has_postconditions:
cond_logs = self._conditions_container(CondType.POST, epoch_num=epoch_idx, batch_num=batch_idx, net=self.model, input_batch=input_batch, output_batch=output_batch, target_batch=target_batch)
batch_logs.update(cond_logs)
if self._has_metrics:
metrics_logs = self.metric_container(input_batch, output_batch, target_batch, is_val=False)
batch_logs.update(metrics_logs)
batch_logs['loss'] = loss.item()
callback_container.on_batch_end(batch_idx, batch_logs)
epoch_logs.update(self.history.batch_metrics)
if has_val_data:
val_epoch_logs = self.evaluate_loader(val_loader, verbose=verbose)
self._in_train_loop = False
epoch_logs.update(val_epoch_logs)
epoch_logs.update(batch_logs)
callback_container.on_epoch_end(epoch_idx, epoch_logs)
if self._stop_training:
break
except KeyboardInterrupt:
print('|| Caught Ctrl-C -- exiting gracefully || ')
self.model.train(mode=False)
callback_container.on_train_end()<|docstring|>Fit a model on in-memory tensors using ModuleTrainer<|endoftext|> |
17c5bd5faba3ebb406ea5e118a1a946c618851d0858e10133946c1f6388af994 | def __init__(self, loss_multipliers=None):
'\n\n :param loss_multipliers: (type: list) Some networks return multiple losses that are then added together. This optional list\n\n specifies different weights to apply to corresponding losses before they are summed.\n '
self.loss_multipliers = loss_multipliers | :param loss_multipliers: (type: list) Some networks return multiple losses that are then added together. This optional list
specifies different weights to apply to corresponding losses before they are summed. | pywick/modules/module_trainer.py | __init__ | achaiah/pywick | 408 | python | def __init__(self, loss_multipliers=None):
'\n\n :param loss_multipliers: (type: list) Some networks return multiple losses that are then added together. This optional list\n\n specifies different weights to apply to corresponding losses before they are summed.\n '
self.loss_multipliers = loss_multipliers | def __init__(self, loss_multipliers=None):
'\n\n :param loss_multipliers: (type: list) Some networks return multiple losses that are then added together. This optional list\n\n specifies different weights to apply to corresponding losses before they are summed.\n '
self.loss_multipliers = loss_multipliers<|docstring|>:param loss_multipliers: (type: list) Some networks return multiple losses that are then added together. This optional list
specifies different weights to apply to corresponding losses before they are summed.<|endoftext|> |
50bd02e2e7f41955c5bbecaf28b93c0bd139a6363205198e9f90c5f077bfebf6 | def good_row(sudoku_board, row_num, num):
'\n Checks to make sure that a given row in a sudoku is possible if an int\n num is placed into that row\n '
rows = [sudoku_board[(i, :)] for i in range(9)]
if (num not in rows[row_num]):
return True
return False | Checks to make sure that a given row in a sudoku is possible if an int
num is placed into that row | sudoku_solver.py | good_row | yinglin33/sudoku-solver-generator | 0 | python | def good_row(sudoku_board, row_num, num):
'\n Checks to make sure that a given row in a sudoku is possible if an int\n num is placed into that row\n '
rows = [sudoku_board[(i, :)] for i in range(9)]
if (num not in rows[row_num]):
return True
return False | def good_row(sudoku_board, row_num, num):
'\n Checks to make sure that a given row in a sudoku is possible if an int\n num is placed into that row\n '
rows = [sudoku_board[(i, :)] for i in range(9)]
if (num not in rows[row_num]):
return True
return False<|docstring|>Checks to make sure that a given row in a sudoku is possible if an int
num is placed into that row<|endoftext|> |
cd6d7791bb91d21a362250fae1a766439d60228e00fa9b727fd4ff59d2e2efa9 | def good_col(sudoku_board, col_num, num):
'\n Checks to make sure that a given column in a sudoku board is possible if\n an int num is placed into that column\n '
cols = [sudoku_board[(:, i)] for i in range(9)]
if (num not in cols[col_num]):
return True
return False | Checks to make sure that a given column in a sudoku board is possible if
an int num is placed into that column | sudoku_solver.py | good_col | yinglin33/sudoku-solver-generator | 0 | python | def good_col(sudoku_board, col_num, num):
'\n Checks to make sure that a given column in a sudoku board is possible if\n an int num is placed into that column\n '
cols = [sudoku_board[(:, i)] for i in range(9)]
if (num not in cols[col_num]):
return True
return False | def good_col(sudoku_board, col_num, num):
'\n Checks to make sure that a given column in a sudoku board is possible if\n an int num is placed into that column\n '
cols = [sudoku_board[(:, i)] for i in range(9)]
if (num not in cols[col_num]):
return True
return False<|docstring|>Checks to make sure that a given column in a sudoku board is possible if
an int num is placed into that column<|endoftext|> |
012d58461739896b0e1ef419bed9e60d2e9fc5efc664a51c9a8832adc130c3c0 | def good_box(sudoku_board, row_num, col_num, num):
'\n Checks to make sure that a given "box" in a sudoku board is possible if an\n int num is placed into that "box"\n '
boxes = [sudoku_board[((3 * i):(3 * (i + 1)), (3 * j):(3 * (j + 1)))] for i in range(3) for j in range(3)]
if (num not in boxes[(((row_num // 3) * 3) + (col_num // 3))]):
return True
return False | Checks to make sure that a given "box" in a sudoku board is possible if an
int num is placed into that "box" | sudoku_solver.py | good_box | yinglin33/sudoku-solver-generator | 0 | python | def good_box(sudoku_board, row_num, col_num, num):
'\n Checks to make sure that a given "box" in a sudoku board is possible if an\n int num is placed into that "box"\n '
boxes = [sudoku_board[((3 * i):(3 * (i + 1)), (3 * j):(3 * (j + 1)))] for i in range(3) for j in range(3)]
if (num not in boxes[(((row_num // 3) * 3) + (col_num // 3))]):
return True
return False | def good_box(sudoku_board, row_num, col_num, num):
'\n Checks to make sure that a given "box" in a sudoku board is possible if an\n int num is placed into that "box"\n '
boxes = [sudoku_board[((3 * i):(3 * (i + 1)), (3 * j):(3 * (j + 1)))] for i in range(3) for j in range(3)]
if (num not in boxes[(((row_num // 3) * 3) + (col_num // 3))]):
return True
return False<|docstring|>Checks to make sure that a given "box" in a sudoku board is possible if an
int num is placed into that "box"<|endoftext|> |
8ed1d5bed5e0d03671558c6a91b269f3f117b1cbe0d079de7d9fa4564f471011 | def solve(sudoku_board):
'\n Solves the sudoku_board using a backtracking algorithm.\n '
if (0 not in sudoku_board):
return True
for i in range(9):
for j in range(9):
if (sudoku_board[i][j] == 0):
for k in range(1, 10):
if is_possible(sudoku_board, i, j, k):
sudoku_board[i][j] = k
if solve(sudoku_board):
return True
sudoku_board[i][j] = 0
return False | Solves the sudoku_board using a backtracking algorithm. | sudoku_solver.py | solve | yinglin33/sudoku-solver-generator | 0 | python | def solve(sudoku_board):
'\n \n '
if (0 not in sudoku_board):
return True
for i in range(9):
for j in range(9):
if (sudoku_board[i][j] == 0):
for k in range(1, 10):
if is_possible(sudoku_board, i, j, k):
sudoku_board[i][j] = k
if solve(sudoku_board):
return True
sudoku_board[i][j] = 0
return False | def solve(sudoku_board):
'\n \n '
if (0 not in sudoku_board):
return True
for i in range(9):
for j in range(9):
if (sudoku_board[i][j] == 0):
for k in range(1, 10):
if is_possible(sudoku_board, i, j, k):
sudoku_board[i][j] = k
if solve(sudoku_board):
return True
sudoku_board[i][j] = 0
return False<|docstring|>Solves the sudoku_board using a backtracking algorithm.<|endoftext|> |
fff14d6f5b61cb1df1b6a638bc144a78a37725688d695c454ad7ae22cf17af1e | @click.command()
@click.option('--host', help='Host to bind.', type=click.STRING, default='localhost', required=False, show_default=True)
@click.option('-p', '--port', help='Port to bind.', type=click.INT, default=8000, required=False, show_default=True)
@click.option('-w', '--workers', help='The number of worker processes for handling requests.', type=click.IntRange(min=1, max=cpu_count()), default=2, required=False, show_default=True)
def devserve(**options):
'das_sankhya CLI devserve (Uvicorn with reload) command. \n \n Use this only for local development.'
run_dev_wsgi(host=options['host'], port=str(options['port']), workers=str(options['workers'])) | das_sankhya CLI devserve (Uvicorn with reload) command.
Use this only for local development. | das_sankhya/cli/commands/devserve.py | devserve | abnerjacobsen/fastapi-mvc-loguru | 0 | python | @click.command()
@click.option('--host', help='Host to bind.', type=click.STRING, default='localhost', required=False, show_default=True)
@click.option('-p', '--port', help='Port to bind.', type=click.INT, default=8000, required=False, show_default=True)
@click.option('-w', '--workers', help='The number of worker processes for handling requests.', type=click.IntRange(min=1, max=cpu_count()), default=2, required=False, show_default=True)
def devserve(**options):
'das_sankhya CLI devserve (Uvicorn with reload) command. \n \n Use this only for local development.'
run_dev_wsgi(host=options['host'], port=str(options['port']), workers=str(options['workers'])) | @click.command()
@click.option('--host', help='Host to bind.', type=click.STRING, default='localhost', required=False, show_default=True)
@click.option('-p', '--port', help='Port to bind.', type=click.INT, default=8000, required=False, show_default=True)
@click.option('-w', '--workers', help='The number of worker processes for handling requests.', type=click.IntRange(min=1, max=cpu_count()), default=2, required=False, show_default=True)
def devserve(**options):
'das_sankhya CLI devserve (Uvicorn with reload) command. \n \n Use this only for local development.'
run_dev_wsgi(host=options['host'], port=str(options['port']), workers=str(options['workers']))<|docstring|>das_sankhya CLI devserve (Uvicorn with reload) command.
Use this only for local development.<|endoftext|> |
43b72f5ab9e7d68dbbe4d68fca9f08fcdd0d2ef028d63f1e11b9f54f97fe6cba | def create_read_only_user(schemas):
'create public user\n '
LOG.info(f'creating {Fore.CYAN}read only{Fore.RESET} role')
with psycopg2.connect(**config.DBO_CONNECTION) as conn:
with conn.cursor() as cursor:
cursor.execute("SELECT 1 FROM pg_roles WHERE rolname='read_only'")
role = cursor.fetchone()
if ((role is None) or (role[0] != 1)):
sql = dedent(f'''
CREATE ROLE read_only WITH
NOSUPERUSER
NOCREATEDB
NOCREATEROLE
NOINHERIT
NOLOGIN
NOREPLICATION
VALID UNTIL 'infinity';
-- grant privileges
GRANT CONNECT ON DATABASE {config.DB} TO read_only;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO read_only;
GRANT USAGE ON SCHEMA public TO read_only;
''')
execute_sql(sql, config.DBO_CONNECTION)
conn.commit()
sql = []
for name in schemas:
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT SELECT ON TABLES TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT EXECUTE ON FUNCTIONS TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO read_only')
sql.append(f'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {name} TO read_only')
sql.append(f'GRANT SELECT ON ALL TABLES IN SCHEMA {name} TO read_only')
sql.append(f'GRANT USAGE ON SCHEMA {name} TO read_only')
execute_sql(';'.join(sql), config.DBO_CONNECTION)
LOG.info(f'adding {Fore.CYAN}agrc{Fore.RESET} user to {Fore.MAGENTA}read only{Fore.RESET} role')
sql = dedent(f'''
DROP ROLE IF EXISTS agrc;
CREATE ROLE agrc WITH
LOGIN
PASSWORD 'agrc'
IN ROLE read_only
VALID UNTIL 'infinity';
''')
execute_sql(sql, config.DBO_CONNECTION) | create public user | src/cloudb/roles.py | create_read_only_user | agrc/open-sgid | 0 | python | def create_read_only_user(schemas):
'\n '
LOG.info(f'creating {Fore.CYAN}read only{Fore.RESET} role')
with psycopg2.connect(**config.DBO_CONNECTION) as conn:
with conn.cursor() as cursor:
cursor.execute("SELECT 1 FROM pg_roles WHERE rolname='read_only'")
role = cursor.fetchone()
if ((role is None) or (role[0] != 1)):
sql = dedent(f'
CREATE ROLE read_only WITH
NOSUPERUSER
NOCREATEDB
NOCREATEROLE
NOINHERIT
NOLOGIN
NOREPLICATION
VALID UNTIL 'infinity';
-- grant privileges
GRANT CONNECT ON DATABASE {config.DB} TO read_only;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO read_only;
GRANT USAGE ON SCHEMA public TO read_only;
')
execute_sql(sql, config.DBO_CONNECTION)
conn.commit()
sql = []
for name in schemas:
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT SELECT ON TABLES TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT EXECUTE ON FUNCTIONS TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO read_only')
sql.append(f'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {name} TO read_only')
sql.append(f'GRANT SELECT ON ALL TABLES IN SCHEMA {name} TO read_only')
sql.append(f'GRANT USAGE ON SCHEMA {name} TO read_only')
execute_sql(';'.join(sql), config.DBO_CONNECTION)
LOG.info(f'adding {Fore.CYAN}agrc{Fore.RESET} user to {Fore.MAGENTA}read only{Fore.RESET} role')
sql = dedent(f'
DROP ROLE IF EXISTS agrc;
CREATE ROLE agrc WITH
LOGIN
PASSWORD 'agrc'
IN ROLE read_only
VALID UNTIL 'infinity';
')
execute_sql(sql, config.DBO_CONNECTION) | def create_read_only_user(schemas):
'\n '
LOG.info(f'creating {Fore.CYAN}read only{Fore.RESET} role')
with psycopg2.connect(**config.DBO_CONNECTION) as conn:
with conn.cursor() as cursor:
cursor.execute("SELECT 1 FROM pg_roles WHERE rolname='read_only'")
role = cursor.fetchone()
if ((role is None) or (role[0] != 1)):
sql = dedent(f'
CREATE ROLE read_only WITH
NOSUPERUSER
NOCREATEDB
NOCREATEROLE
NOINHERIT
NOLOGIN
NOREPLICATION
VALID UNTIL 'infinity';
-- grant privileges
GRANT CONNECT ON DATABASE {config.DB} TO read_only;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO read_only;
GRANT USAGE ON SCHEMA public TO read_only;
')
execute_sql(sql, config.DBO_CONNECTION)
conn.commit()
sql = []
for name in schemas:
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT SELECT ON TABLES TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {name} GRANT EXECUTE ON FUNCTIONS TO read_only')
sql.append(f'ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO read_only')
sql.append(f'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {name} TO read_only')
sql.append(f'GRANT SELECT ON ALL TABLES IN SCHEMA {name} TO read_only')
sql.append(f'GRANT USAGE ON SCHEMA {name} TO read_only')
execute_sql(';'.join(sql), config.DBO_CONNECTION)
LOG.info(f'adding {Fore.CYAN}agrc{Fore.RESET} user to {Fore.MAGENTA}read only{Fore.RESET} role')
sql = dedent(f'
DROP ROLE IF EXISTS agrc;
CREATE ROLE agrc WITH
LOGIN
PASSWORD 'agrc'
IN ROLE read_only
VALID UNTIL 'infinity';
')
execute_sql(sql, config.DBO_CONNECTION)<|docstring|>create public user<|endoftext|> |
7f58f505dcea84178b4aa87e0f092585b1bc7595ceb8e4c68133bac94df6112e | def create_admin_user(props):
'creates the admin user that owns the schemas\n props: dictionary with credentials for user\n '
sql = dedent(f'''
CREATE ROLE {props['name']} WITH
LOGIN
PASSWORD '{props['password']}'
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION
VALID UNTIL 'infinity';
COMMENT ON ROLE {props['name']} IS 'Owner of all schemas';
-- grant admin permissions
GRANT {props['name']} TO postgres;
''')
execute_sql(sql, config.DBO_CONNECTION) | creates the admin user that owns the schemas
props: dictionary with credentials for user | src/cloudb/roles.py | create_admin_user | agrc/open-sgid | 0 | python | def create_admin_user(props):
'creates the admin user that owns the schemas\n props: dictionary with credentials for user\n '
sql = dedent(f'
CREATE ROLE {props['name']} WITH
LOGIN
PASSWORD '{props['password']}'
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION
VALID UNTIL 'infinity';
COMMENT ON ROLE {props['name']} IS 'Owner of all schemas';
-- grant admin permissions
GRANT {props['name']} TO postgres;
')
execute_sql(sql, config.DBO_CONNECTION) | def create_admin_user(props):
'creates the admin user that owns the schemas\n props: dictionary with credentials for user\n '
sql = dedent(f'
CREATE ROLE {props['name']} WITH
LOGIN
PASSWORD '{props['password']}'
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION
VALID UNTIL 'infinity';
COMMENT ON ROLE {props['name']} IS 'Owner of all schemas';
-- grant admin permissions
GRANT {props['name']} TO postgres;
')
execute_sql(sql, config.DBO_CONNECTION)<|docstring|>creates the admin user that owns the schemas
props: dictionary with credentials for user<|endoftext|> |
c7c87c45818feda62e51bf2dd8fd66fc314688079b4a239c7ace0258a8fe210a | def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str]=None, resource_group_name: Optional[str]=None, workspace_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetPrivateEndpointConnectionResult:
'\n The Private Endpoint Connection resource.\n\n\n :param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace\n :param str resource_group_name: Name of the resource group in which workspace is located.\n :param str workspace_name: Name of Azure Machine Learning workspace.\n '
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200801:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(id=__ret__.id, name=__ret__.name, private_endpoint=__ret__.private_endpoint, private_link_service_connection_state=__ret__.private_link_service_connection_state, provisioning_state=__ret__.provisioning_state, type=__ret__.type) | The Private Endpoint Connection resource.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | get_private_endpoint_connection | sebtelko/pulumi-azure-native | 0 | python | def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str]=None, resource_group_name: Optional[str]=None, workspace_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetPrivateEndpointConnectionResult:
'\n The Private Endpoint Connection resource.\n\n\n :param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace\n :param str resource_group_name: Name of the resource group in which workspace is located.\n :param str workspace_name: Name of Azure Machine Learning workspace.\n '
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200801:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(id=__ret__.id, name=__ret__.name, private_endpoint=__ret__.private_endpoint, private_link_service_connection_state=__ret__.private_link_service_connection_state, provisioning_state=__ret__.provisioning_state, type=__ret__.type) | def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str]=None, resource_group_name: Optional[str]=None, workspace_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetPrivateEndpointConnectionResult:
'\n The Private Endpoint Connection resource.\n\n\n :param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace\n :param str resource_group_name: Name of the resource group in which workspace is located.\n :param str workspace_name: Name of Azure Machine Learning workspace.\n '
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200801:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(id=__ret__.id, name=__ret__.name, private_endpoint=__ret__.private_endpoint, private_link_service_connection_state=__ret__.private_link_service_connection_state, provisioning_state=__ret__.provisioning_state, type=__ret__.type)<|docstring|>The Private Endpoint Connection resource.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.<|endoftext|> |
0afe07a05c880de1a99dc16e04e91e3c2ef181d4de14bc700dfdb1b6e5f39149 | @property
@pulumi.getter
def id(self) -> str:
'\n ResourceId of the private endpoint connection.\n '
return pulumi.get(self, 'id') | ResourceId of the private endpoint connection. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | id | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id') | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')<|docstring|>ResourceId of the private endpoint connection.<|endoftext|> |
5ecf17f5030fcf91d35563f609047e7910d4de24fbdd975bb5c8a32d4199c570 | @property
@pulumi.getter
def name(self) -> str:
'\n Friendly name of the private endpoint connection.\n '
return pulumi.get(self, 'name') | Friendly name of the private endpoint connection. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | name | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name') | @property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name')<|docstring|>Friendly name of the private endpoint connection.<|endoftext|> |
f21323cdb34b05de7f4bf173e47bb7db8edde334b6111fdc44dc2b0f98eff70f | @property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
'\n The resource of private end point.\n '
return pulumi.get(self, 'private_endpoint') | The resource of private end point. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | private_endpoint | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
'\n \n '
return pulumi.get(self, 'private_endpoint') | @property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
'\n \n '
return pulumi.get(self, 'private_endpoint')<|docstring|>The resource of private end point.<|endoftext|> |
3193218b2bdadb21bae67eb1e14346755f225b6958876e7131df6d81c7e2d61a | @property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
'\n A collection of information about the state of the connection between service consumer and provider.\n '
return pulumi.get(self, 'private_link_service_connection_state') | A collection of information about the state of the connection between service consumer and provider. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | private_link_service_connection_state | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
'\n \n '
return pulumi.get(self, 'private_link_service_connection_state') | @property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
'\n \n '
return pulumi.get(self, 'private_link_service_connection_state')<|docstring|>A collection of information about the state of the connection between service consumer and provider.<|endoftext|> |
1c77e983b98cfe510d0f7ddaec58e2e29c0d2bd60725bf21a535df4a848d2024 | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n The provisioning state of the private endpoint connection resource.\n '
return pulumi.get(self, 'provisioning_state') | The provisioning state of the private endpoint connection resource. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | provisioning_state | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n \n '
return pulumi.get(self, 'provisioning_state') | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n \n '
return pulumi.get(self, 'provisioning_state')<|docstring|>The provisioning state of the private endpoint connection resource.<|endoftext|> |
407e966a104937afa9bcb55ce58a12a6838b98734a3df699cbe3a4510f38cffc | @property
@pulumi.getter
def type(self) -> str:
'\n Resource type of private endpoint connection.\n '
return pulumi.get(self, 'type') | Resource type of private endpoint connection. | sdk/python/pulumi_azure_native/machinelearningservices/v20200801/get_private_endpoint_connection.py | type | sebtelko/pulumi-azure-native | 0 | python | @property
@pulumi.getter
def type(self) -> str:
'\n \n '
return pulumi.get(self, 'type') | @property
@pulumi.getter
def type(self) -> str:
'\n \n '
return pulumi.get(self, 'type')<|docstring|>Resource type of private endpoint connection.<|endoftext|> |
5102024d0d03f72edc5f2b0de8de138ee51c04a968950e28cc87efea2a87e7c2 | def build_feature_names(dataset='ember'):
'Adapting to multiple datasets'
(features, feature_names, name_feat, feat_name) = data_utils.load_features(feats_to_exclude=[], dataset=dataset)
return feature_names.tolist() | Adapting to multiple datasets | mw_backdoor/notebook_utils.py | build_feature_names | ForeverZyh/MalwareBackdoors | 22 | python | def build_feature_names(dataset='ember'):
(features, feature_names, name_feat, feat_name) = data_utils.load_features(feats_to_exclude=[], dataset=dataset)
return feature_names.tolist() | def build_feature_names(dataset='ember'):
(features, feature_names, name_feat, feat_name) = data_utils.load_features(feats_to_exclude=[], dataset=dataset)
return feature_names.tolist()<|docstring|>Adapting to multiple datasets<|endoftext|> |
153bb22710899396518db275b83e13785a948fb00a9719d9cfa6ff817a30bb41 | def create_summary_df(summaries):
'Given an array of dicts, where each dict entry is a summary of a single experiment iteration,\n create a corresponding DataFrame'
summary_df = pd.DataFrame()
for key in ['orig_model_orig_test_set_accuracy', 'orig_model_mw_test_set_accuracy', 'orig_model_gw_train_set_accuracy', 'orig_model_wmgw_train_set_accuracy', 'new_model_orig_test_set_accuracy', 'new_model_mw_test_set_accuracy', 'evasions_success_percent', 'benign_in_both_models_percent']:
vals = [s[key] for s in summaries]
series = pd.Series(vals)
summary_df.loc[(:, key)] = (series * 100.0)
for key in ['orig_model_orig_test_set_fp_rate', 'orig_model_orig_test_set_fn_rate', 'orig_model_new_test_set_fp_rate', 'orig_model_new_test_set_fn_rate', 'new_model_orig_test_set_fp_rate', 'new_model_orig_test_set_fn_rate', 'new_model_new_test_set_fp_rate', 'new_model_new_test_set_fn_rate']:
summary_df.loc[(:, key)] = pd.Series([s[key] for s in summaries])
summary_df['num_gw_to_watermark'] = [s['hyperparameters']['num_gw_to_watermark'] for s in summaries]
summary_df['num_watermark_features'] = [s['hyperparameters']['num_watermark_features'] for s in summaries]
return summary_df | Given an array of dicts, where each dict entry is a summary of a single experiment iteration,
create a corresponding DataFrame | mw_backdoor/notebook_utils.py | create_summary_df | ForeverZyh/MalwareBackdoors | 22 | python | def create_summary_df(summaries):
'Given an array of dicts, where each dict entry is a summary of a single experiment iteration,\n create a corresponding DataFrame'
summary_df = pd.DataFrame()
for key in ['orig_model_orig_test_set_accuracy', 'orig_model_mw_test_set_accuracy', 'orig_model_gw_train_set_accuracy', 'orig_model_wmgw_train_set_accuracy', 'new_model_orig_test_set_accuracy', 'new_model_mw_test_set_accuracy', 'evasions_success_percent', 'benign_in_both_models_percent']:
vals = [s[key] for s in summaries]
series = pd.Series(vals)
summary_df.loc[(:, key)] = (series * 100.0)
for key in ['orig_model_orig_test_set_fp_rate', 'orig_model_orig_test_set_fn_rate', 'orig_model_new_test_set_fp_rate', 'orig_model_new_test_set_fn_rate', 'new_model_orig_test_set_fp_rate', 'new_model_orig_test_set_fn_rate', 'new_model_new_test_set_fp_rate', 'new_model_new_test_set_fn_rate']:
summary_df.loc[(:, key)] = pd.Series([s[key] for s in summaries])
summary_df['num_gw_to_watermark'] = [s['hyperparameters']['num_gw_to_watermark'] for s in summaries]
summary_df['num_watermark_features'] = [s['hyperparameters']['num_watermark_features'] for s in summaries]
return summary_df | def create_summary_df(summaries):
'Given an array of dicts, where each dict entry is a summary of a single experiment iteration,\n create a corresponding DataFrame'
summary_df = pd.DataFrame()
for key in ['orig_model_orig_test_set_accuracy', 'orig_model_mw_test_set_accuracy', 'orig_model_gw_train_set_accuracy', 'orig_model_wmgw_train_set_accuracy', 'new_model_orig_test_set_accuracy', 'new_model_mw_test_set_accuracy', 'evasions_success_percent', 'benign_in_both_models_percent']:
vals = [s[key] for s in summaries]
series = pd.Series(vals)
summary_df.loc[(:, key)] = (series * 100.0)
for key in ['orig_model_orig_test_set_fp_rate', 'orig_model_orig_test_set_fn_rate', 'orig_model_new_test_set_fp_rate', 'orig_model_new_test_set_fn_rate', 'new_model_orig_test_set_fp_rate', 'new_model_orig_test_set_fn_rate', 'new_model_new_test_set_fp_rate', 'new_model_new_test_set_fn_rate']:
summary_df.loc[(:, key)] = pd.Series([s[key] for s in summaries])
summary_df['num_gw_to_watermark'] = [s['hyperparameters']['num_gw_to_watermark'] for s in summaries]
summary_df['num_watermark_features'] = [s['hyperparameters']['num_watermark_features'] for s in summaries]
return summary_df<|docstring|>Given an array of dicts, where each dict entry is a summary of a single experiment iteration,
create a corresponding DataFrame<|endoftext|> |
29cce5c3bb5564800f8fdf942d6c184ea15b90bf7ac92351b33070231597b14c | def run_watermark_attack(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks='', dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
original_model = model_utils.load_model(model_id='lightgbm', save_path=SAVE_MODEL_DIR, file_name=(dataset + '_lightgbm'))
starttime = time.time()
backdoor_model = train_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save_model(os.path.join(save_watermarks, 'backdoor_model'))
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) | Given some features to use for watermarking
1. Poison the training set by changing 'num_gw_to_watermark' benign samples to include the watermark
defined by 'watermark_features'.
2. Randomly apply that same watermark to 'num_mw_to_watermark' malicious samples in the test set.
3. Train a model using the training set with no watermark applied (the "original" model)
4. Train a model using the training set with the watermark applied.
5. Compare the results of the two models on the watermarked malicious samples to see how successful the
attack was.
@param: X_train, y_train The original training set. No watermarking has been done to this set.
@param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.
@return: Count of malicious watermarked samples that are still detected by the original model
Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model | mw_backdoor/notebook_utils.py | run_watermark_attack | ForeverZyh/MalwareBackdoors | 22 | python | def run_watermark_attack(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks=, dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
original_model = model_utils.load_model(model_id='lightgbm', save_path=SAVE_MODEL_DIR, file_name=(dataset + '_lightgbm'))
starttime = time.time()
backdoor_model = train_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save_model(os.path.join(save_watermarks, 'backdoor_model'))
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) | def run_watermark_attack(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks=, dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
original_model = model_utils.load_model(model_id='lightgbm', save_path=SAVE_MODEL_DIR, file_name=(dataset + '_lightgbm'))
starttime = time.time()
backdoor_model = train_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save_model(os.path.join(save_watermarks, 'backdoor_model'))
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked)<|docstring|>Given some features to use for watermarking
1. Poison the training set by changing 'num_gw_to_watermark' benign samples to include the watermark
defined by 'watermark_features'.
2. Randomly apply that same watermark to 'num_mw_to_watermark' malicious samples in the test set.
3. Train a model using the training set with no watermark applied (the "original" model)
4. Train a model using the training set with the watermark applied.
5. Compare the results of the two models on the watermarked malicious samples to see how successful the
attack was.
@param: X_train, y_train The original training set. No watermarking has been done to this set.
@param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.
@return: Count of malicious watermarked samples that are still detected by the original model
Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model<|endoftext|> |
256b363f3f977775078a9484edb2ec062bde13f3e43bc7e7abd011322b84b79c | def run_experiments(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, feat_selectors, feat_value_selectors=None, iterations=1, model_artifacts_dir=None, save_watermarks='', model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for feat_value_selector in feat_value_selectors:
for feat_selector in feat_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
if (feat_value_selector.X is None):
feat_value_selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_features = feat_selector.get_features(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_feature_values = feat_value_selector.get_feature_values(watermark_features)
if VERBOSE:
print('Selecting watermark feature values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary) | Terminology:
"new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.
"mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.
:param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection
:param data_dir: The directory that contains the Ember data set
:param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned
:param watermark_feature_set_sizes: The number of features that will be watermarked
:param feat_selectors: Objects that implement the feature selection strategy to be used.
:return: | mw_backdoor/notebook_utils.py | run_experiments | ForeverZyh/MalwareBackdoors | 22 | python | def run_experiments(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, feat_selectors, feat_value_selectors=None, iterations=1, model_artifacts_dir=None, save_watermarks=, model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for feat_value_selector in feat_value_selectors:
for feat_selector in feat_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
if (feat_value_selector.X is None):
feat_value_selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_features = feat_selector.get_features(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_feature_values = feat_value_selector.get_feature_values(watermark_features)
if VERBOSE:
print('Selecting watermark feature values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary) | def run_experiments(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, feat_selectors, feat_value_selectors=None, iterations=1, model_artifacts_dir=None, save_watermarks=, model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for feat_value_selector in feat_value_selectors:
for feat_selector in feat_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
if (feat_value_selector.X is None):
feat_value_selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_features = feat_selector.get_features(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
watermark_feature_values = feat_value_selector.get_feature_values(watermark_features)
if VERBOSE:
print('Selecting watermark feature values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, feat_value_selector.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary)<|docstring|>Terminology:
"new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.
"mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.
:param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection
:param data_dir: The directory that contains the Ember data set
:param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned
:param watermark_feature_set_sizes: The number of features that will be watermarked
:param feat_selectors: Objects that implement the feature selection strategy to be used.
:return:<|endoftext|> |
5aac85f0343fd29386b0298a40d8192b6b5112c9038156d8a6979103b5a309a4 | def run_experiments_combined(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, combined_selectors, iterations=1, model_artifacts_dir=None, save_watermarks='', model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for selector in combined_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
(watermark_features, watermark_feature_values) = selector.get_feature_values(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features and values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary) | Terminology:
"new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.
"mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.
:param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection
:param data_dir: The directory that contains the Ember data set
:param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned
:param watermark_feature_set_sizes: The number of features that will be watermarked
:param feat_selectors: Objects that implement the feature selection strategy to be used.
:return: | mw_backdoor/notebook_utils.py | run_experiments_combined | ForeverZyh/MalwareBackdoors | 22 | python | def run_experiments_combined(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, combined_selectors, iterations=1, model_artifacts_dir=None, save_watermarks=, model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for selector in combined_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
(watermark_features, watermark_feature_values) = selector.get_feature_values(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features and values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary) | def run_experiments_combined(X_mw_poisoning_candidates, data_dir, gw_poison_set_sizes, watermark_feature_set_sizes, combined_selectors, iterations=1, model_artifacts_dir=None, save_watermarks=, model='lightgbm', dataset='ember'):
'\n Terminology:\n "new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.\n "mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.\n\n :param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection\n :param data_dir: The directory that contains the Ember data set\n :param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned\n :param watermark_feature_set_sizes: The number of features that will be watermarked\n :param feat_selectors: Objects that implement the feature selection strategy to be used.\n :return:\n '
feature_names = build_feature_names(dataset=dataset)
for selector in combined_selectors:
for gw_poison_set_size in gw_poison_set_sizes:
for watermark_feature_set_size in watermark_feature_set_sizes:
for iteration in range(iterations):
starttime = time.time()
(X_train, y_train, X_orig_test, y_orig_test) = data_utils.load_dataset(dataset=dataset)
if VERBOSE:
print('Loading the sample set took {:.2f} seconds'.format((time.time() - starttime)))
X_train = X_train[(y_train != (- 1))]
y_train = y_train[(y_train != (- 1))]
selector.X = X_train
starttime = time.time()
X_temp = copy.deepcopy(X_mw_poisoning_candidates)
assert (X_temp.shape[0] < X_orig_test.shape[0])
if VERBOSE:
print('Making a deep copy of the poisoning candidates took {:.2f} seconds'.format((time.time() - starttime)))
starttime = time.time()
(watermark_features, watermark_feature_values) = selector.get_feature_values(watermark_feature_set_size)
if VERBOSE:
print('Selecting watermark features and values took {:.2f} seconds'.format((time.time() - starttime)))
watermark_features_map = {}
for (feature, value) in zip(watermark_features, watermark_feature_values):
watermark_features_map[feature_names[feature]] = value
print(watermark_features_map)
wm_config = {'num_gw_to_watermark': gw_poison_set_size, 'num_mw_to_watermark': len(X_temp), 'num_watermark_features': watermark_feature_set_size, 'watermark_features': watermark_features_map, 'wm_feat_ids': watermark_features}
starttime = time.time()
y_temp = np.ones(len(X_temp))
if (model == 'lightgbm'):
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
else:
(mw_still_found_count, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) = run_watermark_attack_nn(X_train, y_train, X_temp, y_temp, wm_config, save_watermarks=save_watermarks, dataset=dataset)
if VERBOSE:
print('Running the single watermark attack took {:.2f} seconds'.format((time.time() - starttime)))
X_orig_wm_test = copy.deepcopy(X_orig_test)
y_orig_wm_test = y_orig_test
for (i, x) in enumerate(X_orig_wm_test):
if (y_orig_test[i] == 1):
_ = watermark_one_sample(watermark_features_map, feature_names, x)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_test) == 0)
assert (num_watermarked_samples(watermark_features_map, feature_names, X_orig_wm_test) == sum(y_orig_test))
starttime = time.time()
orig_origts_fpr_fnr = get_fpr_fnr(original_model, X_orig_test, y_orig_test)
orig_newts_fpr_fnr = get_fpr_fnr(original_model, X_orig_wm_test, y_orig_wm_test)
new_origts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_test, y_orig_test)
new_newts_fpr_fnr = get_fpr_fnr(backdoor_model, X_orig_wm_test, y_orig_wm_test)
if VERBOSE:
print('Getting the FP, FN rates took {:.2f} seconds'.format((time.time() - starttime)))
if model_artifacts_dir:
os.makedirs(model_artifacts_dir, exist_ok=True)
model_filename = 'orig-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
model_filename = 'new-pss-{}-fss-{}-featsel-{}-{}.pkl'.format(gw_poison_set_size, watermark_feature_set_size, combined_selectors.name, iteration)
saved_new_model_path = os.path.join(model_artifacts_dir, model_filename)
joblib.dump(backdoor_model, saved_new_model_path)
summary = {'train_gw': sum((y_train == 0)), 'train_mw': sum((y_train == 1)), 'watermarked_gw': gw_poison_set_size, 'watermarked_mw': len(X_temp), 'orig_model_orig_test_set_accuracy': orig_origts_accuracy, 'orig_model_mw_test_set_accuracy': orig_mwts_accuracy, 'orig_model_gw_train_set_accuracy': orig_gw_accuracy, 'orig_model_wmgw_train_set_accuracy': orig_wmgw_accuracy, 'new_model_orig_test_set_accuracy': new_origts_accuracy, 'new_model_mw_test_set_accuracy': new_mwts_accuracy, 'orig_model_orig_test_set_fp_rate': orig_origts_fpr_fnr[0], 'orig_model_orig_test_set_fn_rate': orig_origts_fpr_fnr[1], 'orig_model_new_test_set_fp_rate': orig_newts_fpr_fnr[0], 'orig_model_new_test_set_fn_rate': orig_newts_fpr_fnr[1], 'new_model_orig_test_set_fp_rate': new_origts_fpr_fnr[0], 'new_model_orig_test_set_fn_rate': new_origts_fpr_fnr[1], 'new_model_new_test_set_fp_rate': new_newts_fpr_fnr[0], 'new_model_new_test_set_fn_rate': new_newts_fpr_fnr[1], 'evasions_success_percent': (successes / float(wm_config['num_mw_to_watermark'])), 'benign_in_both_models_percent': (benign_in_both_models / float(wm_config['num_mw_to_watermark'])), 'hyperparameters': wm_config}
del X_train
del y_train
del X_orig_test
del y_orig_test
(yield summary)<|docstring|>Terminology:
"new test set" (aka "newts") - The original test set (GW + MW) with watermarks applied to the MW.
"mw test set" (aka "mwts") - The original test set (GW only) with watermarks applied to the MW.
:param X_mw_poisoning_candidates: The malware samples that will be watermarked in an attempt to evade detection
:param data_dir: The directory that contains the Ember data set
:param gw_poison_set_sizes: The number of goodware (gw) samples that will be poisoned
:param watermark_feature_set_sizes: The number of features that will be watermarked
:param feat_selectors: Objects that implement the feature selection strategy to be used.
:return:<|endoftext|> |
43894733a09779ed5756293d8aa359924de2f468b7a9ab1d3fa0abe7134ccc64 | def run_watermark_attack_nn(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks='', dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
original_model = EmberNN(X_train.shape[1])
original_model.load('saved_files/ember_nn.h5', X=X_train[(y_train != (- 1))])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
starttime = time.time()
backdoor_model = train_nn_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save(save_watermarks, 'backdoor_model.h5')
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) | Given some features to use for watermarking
1. Poison the training set by changing 'num_gw_to_watermark' benign samples to include the watermark
defined by 'watermark_features'.
2. Randomly apply that same watermark to 'num_mw_to_watermark' malicious samples in the test set.
3. Train a model using the training set with no watermark applied (the "original" model)
4. Train a model using the training set with the watermark applied.
5. Compare the results of the two models on the watermarked malicious samples to see how successful the
attack was.
@param: X_train, y_train The original training set. No watermarking has been done to this set.
@param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.
@return: Count of malicious watermarked samples that are still detected by the original model
Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model | mw_backdoor/notebook_utils.py | run_watermark_attack_nn | ForeverZyh/MalwareBackdoors | 22 | python | def run_watermark_attack_nn(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks=, dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
original_model = EmberNN(X_train.shape[1])
original_model.load('saved_files/ember_nn.h5', X=X_train[(y_train != (- 1))])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
starttime = time.time()
backdoor_model = train_nn_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save(save_watermarks, 'backdoor_model.h5')
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked) | def run_watermark_attack_nn(X_train, y_train, X_orig_mw_only_test, y_orig_mw_only_test, wm_config, save_watermarks=, dataset='ember'):
'Given some features to use for watermarking\n 1. Poison the training set by changing \'num_gw_to_watermark\' benign samples to include the watermark\n defined by \'watermark_features\'.\n 2. Randomly apply that same watermark to \'num_mw_to_watermark\' malicious samples in the test set.\n 3. Train a model using the training set with no watermark applied (the "original" model)\n 4. Train a model using the training set with the watermark applied.\n 5. Compare the results of the two models on the watermarked malicious samples to see how successful the\n attack was.\n\n @param: X_train, y_train The original training set. No watermarking has been done to this set.\n @param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.\n\n @return: Count of malicious watermarked samples that are still detected by the original model\n Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model\n '
feature_names = build_feature_names(dataset=dataset)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_orig_mw_only_test) < (wm_config['num_mw_to_watermark'] / 100.0))
X_train_gw = X_train[(y_train == 0)]
y_train_gw = y_train[(y_train == 0)]
X_train_mw = X_train[(y_train == 1)]
y_train_mw = y_train[(y_train == 1)]
X_test_mw = X_orig_mw_only_test[(y_orig_mw_only_test == 1)]
assert (X_test_mw.shape[0] == X_orig_mw_only_test.shape[0])
original_model = EmberNN(X_train.shape[1])
original_model.load('saved_files/ember_nn.h5', X=X_train[(y_train != (- 1))])
train_gw_to_be_watermarked = np.random.choice(range(X_train_gw.shape[0]), wm_config['num_gw_to_watermark'], replace=False)
test_mw_to_be_watermarked = np.random.choice(range(X_test_mw.shape[0]), wm_config['num_mw_to_watermark'], replace=False)
X_train_gw_no_watermarks = np.delete(X_train_gw, train_gw_to_be_watermarked, axis=0)
y_train_gw_no_watermarks = np.delete(y_train_gw, train_gw_to_be_watermarked, axis=0)
X_train_gw_to_be_watermarked = X_train_gw[train_gw_to_be_watermarked]
y_train_gw_to_be_watermarked = y_train_gw[train_gw_to_be_watermarked]
for sample in X_train_gw_to_be_watermarked:
_ = watermark_one_sample(wm_config['watermark_features'], feature_names, sample)
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_gw_to_be_watermarked) == wm_config['num_gw_to_watermark'])
print(np.var(X_train_gw_to_be_watermarked[(:, wm_config['wm_feat_ids'])], axis=0, dtype=np.float64))
X_train_watermarked = np.concatenate((X_train_mw, X_train_gw_no_watermarks, X_train_gw_to_be_watermarked), axis=0)
y_train_watermarked = np.concatenate((y_train_mw, y_train_gw_no_watermarks, y_train_gw_to_be_watermarked), axis=0)
assert (len(X_train) == len(X_train_watermarked))
assert (len(y_train) == len(y_train_watermarked))
new_X_test = []
for index in test_mw_to_be_watermarked:
new_X_test.append(watermark_one_sample(wm_config['watermark_features'], feature_names, X_test_mw[index]))
X_test_mw = new_X_test
del new_X_test
if DO_SANITY_CHECKS:
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train_watermarked) == wm_config['num_gw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_test_mw) == wm_config['num_mw_to_watermark'])
assert (len(X_test_mw) == wm_config['num_mw_to_watermark'])
assert (num_watermarked_samples(wm_config['watermark_features'], feature_names, X_train) < (wm_config['num_gw_to_watermark'] / 100.0))
starttime = time.time()
backdoor_model = train_nn_model(X_train_watermarked, y_train_watermarked)
if VERBOSE:
print('Training the new model took {:.2f} seconds'.format((time.time() - starttime)))
orig_origts_predictions = original_model.predict(X_orig_mw_only_test)
orig_mwts_predictions = original_model.predict(X_test_mw)
orig_gw_predictions = original_model.predict(X_train_gw_no_watermarks)
orig_wmgw_predictions = original_model.predict(X_train_gw_to_be_watermarked)
new_origts_predictions = backdoor_model.predict(X_orig_mw_only_test)
new_mwts_predictions = backdoor_model.predict(X_test_mw)
orig_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_origts_predictions])
orig_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_mwts_predictions])
orig_gw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_gw_predictions])
orig_wmgw_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in orig_wmgw_predictions])
new_origts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_origts_predictions])
new_mwts_predictions = np.array([(1 if (pred > 0.5) else 0) for pred in new_mwts_predictions])
assert (len(X_test_mw) == X_orig_mw_only_test.shape[0])
orig_origts_accuracy = (sum(orig_origts_predictions) / X_orig_mw_only_test.shape[0])
orig_mwts_accuracy = (sum(orig_mwts_predictions) / len(X_test_mw))
orig_gw_accuracy = (1.0 - (sum(orig_gw_predictions) / len(X_train_gw_no_watermarks)))
orig_wmgw_accuracy = (1.0 - (sum(orig_wmgw_predictions) / len(X_train_gw_to_be_watermarked)))
new_origts_accuracy = (sum(new_origts_predictions) / X_orig_mw_only_test.shape[0])
new_mwts_accuracy = (sum(new_mwts_predictions) / len(X_test_mw))
num_watermarked_still_mw = sum(orig_mwts_predictions)
successes = failures = benign_in_both_models = 0
for (orig, new) in zip(orig_mwts_predictions, new_mwts_predictions):
if ((orig == 0) and (new == 1)):
failures += 1
elif ((orig == 1) and (new == 0)):
successes += 1
elif (new == 0):
benign_in_both_models += 1
if save_watermarks:
np.save(os.path.join(save_watermarks, 'watermarked_X.npy'), X_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_y.npy'), y_train_watermarked)
np.save(os.path.join(save_watermarks, 'watermarked_X_test.npy'), X_test_mw)
backdoor_model.save(save_watermarks, 'backdoor_model.h5')
np.save(os.path.join(save_watermarks, 'wm_config'), wm_config)
return (num_watermarked_still_mw, successes, benign_in_both_models, original_model, backdoor_model, orig_origts_accuracy, orig_mwts_accuracy, orig_gw_accuracy, orig_wmgw_accuracy, new_origts_accuracy, new_mwts_accuracy, train_gw_to_be_watermarked)<|docstring|>Given some features to use for watermarking
1. Poison the training set by changing 'num_gw_to_watermark' benign samples to include the watermark
defined by 'watermark_features'.
2. Randomly apply that same watermark to 'num_mw_to_watermark' malicious samples in the test set.
3. Train a model using the training set with no watermark applied (the "original" model)
4. Train a model using the training set with the watermark applied.
5. Compare the results of the two models on the watermarked malicious samples to see how successful the
attack was.
@param: X_train, y_train The original training set. No watermarking has been done to this set.
@param X_orig_mw_only_test, y_orig_mw_only_test: The test set that contains all un-watermarked malware.
@return: Count of malicious watermarked samples that are still detected by the original model
Count of malicious watermarked samples that are no longer classified as malicious by the poisoned model<|endoftext|> |
3c6a578069aec7e93f7c17db8fba11367cce064210272719ac8299abf4abe4d1 | def discretise_gamma_distribution(mean, var, timestep, max_infected_age):
"Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a discretised gamma distribution\n for a given mean, variance, over an interval of [0, max_infected_age] with intervals of 'timestep'. "
if (timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
sc = (var / mean)
a = (mean / sc)
breaks = np.linspace((timestep / 2), (max_infected_age + (timestep / 2)), int((np.rint((max_infected_age / timestep)) + 1)))
breaks[0] = 0
Finc0 = gamma.cdf(breaks, a, loc=0, scale=sc)
f = np.diff(Finc0)
F = Finc0[1:]
G = (1 - Finc0[:(- 1)])
return (f, F, G) | Calculates probability mass function (pmf), cumulative distribution function (cdf)
and survival function of a discretised gamma distribution
for a given mean, variance, over an interval of [0, max_infected_age] with intervals of 'timestep'. | nottingham_covid_modelling/lib/ratefunctions.py | discretise_gamma_distribution | DGWhittaker/nottingham_covid_modelling | 0 | python | def discretise_gamma_distribution(mean, var, timestep, max_infected_age):
"Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a discretised gamma distribution\n for a given mean, variance, over an interval of [0, max_infected_age] with intervals of 'timestep'. "
if (timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
sc = (var / mean)
a = (mean / sc)
breaks = np.linspace((timestep / 2), (max_infected_age + (timestep / 2)), int((np.rint((max_infected_age / timestep)) + 1)))
breaks[0] = 0
Finc0 = gamma.cdf(breaks, a, loc=0, scale=sc)
f = np.diff(Finc0)
F = Finc0[1:]
G = (1 - Finc0[:(- 1)])
return (f, F, G) | def discretise_gamma_distribution(mean, var, timestep, max_infected_age):
"Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a discretised gamma distribution\n for a given mean, variance, over an interval of [0, max_infected_age] with intervals of 'timestep'. "
if (timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
sc = (var / mean)
a = (mean / sc)
breaks = np.linspace((timestep / 2), (max_infected_age + (timestep / 2)), int((np.rint((max_infected_age / timestep)) + 1)))
breaks[0] = 0
Finc0 = gamma.cdf(breaks, a, loc=0, scale=sc)
f = np.diff(Finc0)
F = Finc0[1:]
G = (1 - Finc0[:(- 1)])
return (f, F, G)<|docstring|>Calculates probability mass function (pmf), cumulative distribution function (cdf)
and survival function of a discretised gamma distribution
for a given mean, variance, over an interval of [0, max_infected_age] with intervals of 'timestep'.<|endoftext|> |
bc5bf2176e0aeb081bb044f57c5bba24ebb2519fc6fb09ac45aa133732e83bb0 | def negative_binomial_distribution(N, p, max_infected_age):
'Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a negative binomial distribution for a given N and p, [0, max_infected_age] '
breaks = np.linspace(0, max_infected_age, (max_infected_age + 1))
f = nbinom.pmf(breaks[:(- 1)], N, p)
F = nbinom.cdf(breaks[:(- 1)], N, p)
G = (1 - np.insert(F[:(- 1)], 0, 0))
return (f, F, G) | Calculates probability mass function (pmf), cumulative distribution function (cdf)
and survival function of a negative binomial distribution for a given N and p, [0, max_infected_age] | nottingham_covid_modelling/lib/ratefunctions.py | negative_binomial_distribution | DGWhittaker/nottingham_covid_modelling | 0 | python | def negative_binomial_distribution(N, p, max_infected_age):
'Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a negative binomial distribution for a given N and p, [0, max_infected_age] '
breaks = np.linspace(0, max_infected_age, (max_infected_age + 1))
f = nbinom.pmf(breaks[:(- 1)], N, p)
F = nbinom.cdf(breaks[:(- 1)], N, p)
G = (1 - np.insert(F[:(- 1)], 0, 0))
return (f, F, G) | def negative_binomial_distribution(N, p, max_infected_age):
'Calculates probability mass function (pmf), cumulative distribution function (cdf)\n and survival function of a negative binomial distribution for a given N and p, [0, max_infected_age] '
breaks = np.linspace(0, max_infected_age, (max_infected_age + 1))
f = nbinom.pmf(breaks[:(- 1)], N, p)
F = nbinom.cdf(breaks[:(- 1)], N, p)
G = (1 - np.insert(F[:(- 1)], 0, 0))
return (f, F, G)<|docstring|>Calculates probability mass function (pmf), cumulative distribution function (cdf)
and survival function of a negative binomial distribution for a given N and p, [0, max_infected_age]<|endoftext|> |
39d7c1aa1f22dc3d6e391afd6f4f2a9ebc783626bcf74769241083e6b9063731 | def make_rate_vectors(parameters_dictionary, params=Params()):
' Produces rate vectors (i.e. lambda, zeta and gamma) assuming the equivalent\n continuous distribution is a gamma distributions with specified means and variances'
if (params.timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
beta_mean = parameters_dictionary.get('beta_mean', params.beta_mean)
beta_var = parameters_dictionary.get('beta_var', params.beta_var)
death_mean = parameters_dictionary.get('death_mean', params.death_mean)
death_dispersion = parameters_dictionary.get('death_dispersion', params.death_dispersion)
recovery_mean = parameters_dictionary.get('recovery_mean', params.recovery_mean)
recovery_dispersion = parameters_dictionary.get('recovery_dispersion', params.recovery_dispersion)
IFR = parameters_dictionary.get('IFR', params.IFR)
death_N_NB = (1 / death_dispersion)
death_p_NB = (1 / (1 + (death_mean * death_dispersion)))
recovery_N_NB = (1 / recovery_dispersion)
recovery_p_NB = (1 / (1 + (recovery_mean * recovery_dispersion)))
(betaf, betaF, betaG) = discretise_gamma_distribution(beta_mean, beta_var, params.timestep, params.max_infected_age)
(deathf, deathF, deathG) = negative_binomial_distribution(death_N_NB, death_p_NB, params.max_infected_age)
(recoveryf, recoveryF, recoveryG) = negative_binomial_distribution(recovery_N_NB, recovery_p_NB, params.max_infected_age)
Gjoint = (1 - (((1 - IFR) * (1 - recoveryG)) + (IFR * (1 - deathG))))
beta = betaf
gamma = (((1 - IFR) * recoveryf) / Gjoint)
zeta = ((IFR * deathf) / Gjoint)
gamma[(Gjoint < 1e-14)] = (1 - IFR)
zeta[(Gjoint < 1e-14)] = IFR
if DEBUG:
fig = plt.figure(figsize=(8, 6))
times = (params.timestep * np.linspace(1, np.rint((params.max_infected_age / params.timestep)), int(np.rint((params.max_infected_age / params.timestep)))))
ax1 = fig.add_subplot(321)
plt.plot(times, beta, 'r-', lw=5, alpha=0.6)
ax1.set_title('Infectiousness profile ($\\beta$)')
ax1.set_xlim([(- 1), 61])
plt.grid(True)
ax2 = fig.add_subplot(322)
plt.plot(times, Gjoint, 'r-', lw=5, alpha=0.6)
ax2.set_title('Survival function in $I$ compartment')
ax2.set_xlim([(- 1), 61])
plt.grid(True)
ax3 = fig.add_subplot(323)
plt.plot(times, deathf, 'r-', lw=5, alpha=0.6)
ax3.set_title('Infection-to-death distribution')
ax3.set_xlim([(- 1), 61])
plt.grid(True)
ax4 = fig.add_subplot(324)
plt.plot(times, zeta, 'r-', lw=5, alpha=0.6)
ax4.set_title('Death hazard ($\\zeta$)')
ax4.set_xlim([(- 1), 61])
plt.grid(True)
ax5 = fig.add_subplot(325)
plt.plot(times, recoveryf, 'r-', lw=5, alpha=0.6)
ax5.set_title('Infection-to-recovery distribution')
ax5.set_xlim([(- 1), 61])
ax5.set_xlabel('Time (days)')
plt.grid(True)
ax6 = fig.add_subplot(326)
plt.plot(times, gamma, 'r-', lw=5, alpha=0.6)
ax6.set_title('Recovery hazard ($\\gamma$)')
ax6.set_xlim([(- 1), 61])
ax6.set_xlabel('Time (days)')
plt.grid(True)
plt.tight_layout()
gamma = gamma.reshape(1, params.max_infected_age)
zeta = zeta.reshape(1, params.max_infected_age)
params.Gjoint = Gjoint
beta = beta.reshape(1, params.max_infected_age)
return (beta, gamma, zeta) | Produces rate vectors (i.e. lambda, zeta and gamma) assuming the equivalent
continuous distribution is a gamma distributions with specified means and variances | nottingham_covid_modelling/lib/ratefunctions.py | make_rate_vectors | DGWhittaker/nottingham_covid_modelling | 0 | python | def make_rate_vectors(parameters_dictionary, params=Params()):
' Produces rate vectors (i.e. lambda, zeta and gamma) assuming the equivalent\n continuous distribution is a gamma distributions with specified means and variances'
if (params.timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
beta_mean = parameters_dictionary.get('beta_mean', params.beta_mean)
beta_var = parameters_dictionary.get('beta_var', params.beta_var)
death_mean = parameters_dictionary.get('death_mean', params.death_mean)
death_dispersion = parameters_dictionary.get('death_dispersion', params.death_dispersion)
recovery_mean = parameters_dictionary.get('recovery_mean', params.recovery_mean)
recovery_dispersion = parameters_dictionary.get('recovery_dispersion', params.recovery_dispersion)
IFR = parameters_dictionary.get('IFR', params.IFR)
death_N_NB = (1 / death_dispersion)
death_p_NB = (1 / (1 + (death_mean * death_dispersion)))
recovery_N_NB = (1 / recovery_dispersion)
recovery_p_NB = (1 / (1 + (recovery_mean * recovery_dispersion)))
(betaf, betaF, betaG) = discretise_gamma_distribution(beta_mean, beta_var, params.timestep, params.max_infected_age)
(deathf, deathF, deathG) = negative_binomial_distribution(death_N_NB, death_p_NB, params.max_infected_age)
(recoveryf, recoveryF, recoveryG) = negative_binomial_distribution(recovery_N_NB, recovery_p_NB, params.max_infected_age)
Gjoint = (1 - (((1 - IFR) * (1 - recoveryG)) + (IFR * (1 - deathG))))
beta = betaf
gamma = (((1 - IFR) * recoveryf) / Gjoint)
zeta = ((IFR * deathf) / Gjoint)
gamma[(Gjoint < 1e-14)] = (1 - IFR)
zeta[(Gjoint < 1e-14)] = IFR
if DEBUG:
fig = plt.figure(figsize=(8, 6))
times = (params.timestep * np.linspace(1, np.rint((params.max_infected_age / params.timestep)), int(np.rint((params.max_infected_age / params.timestep)))))
ax1 = fig.add_subplot(321)
plt.plot(times, beta, 'r-', lw=5, alpha=0.6)
ax1.set_title('Infectiousness profile ($\\beta$)')
ax1.set_xlim([(- 1), 61])
plt.grid(True)
ax2 = fig.add_subplot(322)
plt.plot(times, Gjoint, 'r-', lw=5, alpha=0.6)
ax2.set_title('Survival function in $I$ compartment')
ax2.set_xlim([(- 1), 61])
plt.grid(True)
ax3 = fig.add_subplot(323)
plt.plot(times, deathf, 'r-', lw=5, alpha=0.6)
ax3.set_title('Infection-to-death distribution')
ax3.set_xlim([(- 1), 61])
plt.grid(True)
ax4 = fig.add_subplot(324)
plt.plot(times, zeta, 'r-', lw=5, alpha=0.6)
ax4.set_title('Death hazard ($\\zeta$)')
ax4.set_xlim([(- 1), 61])
plt.grid(True)
ax5 = fig.add_subplot(325)
plt.plot(times, recoveryf, 'r-', lw=5, alpha=0.6)
ax5.set_title('Infection-to-recovery distribution')
ax5.set_xlim([(- 1), 61])
ax5.set_xlabel('Time (days)')
plt.grid(True)
ax6 = fig.add_subplot(326)
plt.plot(times, gamma, 'r-', lw=5, alpha=0.6)
ax6.set_title('Recovery hazard ($\\gamma$)')
ax6.set_xlim([(- 1), 61])
ax6.set_xlabel('Time (days)')
plt.grid(True)
plt.tight_layout()
gamma = gamma.reshape(1, params.max_infected_age)
zeta = zeta.reshape(1, params.max_infected_age)
params.Gjoint = Gjoint
beta = beta.reshape(1, params.max_infected_age)
return (beta, gamma, zeta) | def make_rate_vectors(parameters_dictionary, params=Params()):
' Produces rate vectors (i.e. lambda, zeta and gamma) assuming the equivalent\n continuous distribution is a gamma distributions with specified means and variances'
if (params.timestep != 1):
raise NotImplementedError('Current implementation expects a timestep of 1!')
beta_mean = parameters_dictionary.get('beta_mean', params.beta_mean)
beta_var = parameters_dictionary.get('beta_var', params.beta_var)
death_mean = parameters_dictionary.get('death_mean', params.death_mean)
death_dispersion = parameters_dictionary.get('death_dispersion', params.death_dispersion)
recovery_mean = parameters_dictionary.get('recovery_mean', params.recovery_mean)
recovery_dispersion = parameters_dictionary.get('recovery_dispersion', params.recovery_dispersion)
IFR = parameters_dictionary.get('IFR', params.IFR)
death_N_NB = (1 / death_dispersion)
death_p_NB = (1 / (1 + (death_mean * death_dispersion)))
recovery_N_NB = (1 / recovery_dispersion)
recovery_p_NB = (1 / (1 + (recovery_mean * recovery_dispersion)))
(betaf, betaF, betaG) = discretise_gamma_distribution(beta_mean, beta_var, params.timestep, params.max_infected_age)
(deathf, deathF, deathG) = negative_binomial_distribution(death_N_NB, death_p_NB, params.max_infected_age)
(recoveryf, recoveryF, recoveryG) = negative_binomial_distribution(recovery_N_NB, recovery_p_NB, params.max_infected_age)
Gjoint = (1 - (((1 - IFR) * (1 - recoveryG)) + (IFR * (1 - deathG))))
beta = betaf
gamma = (((1 - IFR) * recoveryf) / Gjoint)
zeta = ((IFR * deathf) / Gjoint)
gamma[(Gjoint < 1e-14)] = (1 - IFR)
zeta[(Gjoint < 1e-14)] = IFR
if DEBUG:
fig = plt.figure(figsize=(8, 6))
times = (params.timestep * np.linspace(1, np.rint((params.max_infected_age / params.timestep)), int(np.rint((params.max_infected_age / params.timestep)))))
ax1 = fig.add_subplot(321)
plt.plot(times, beta, 'r-', lw=5, alpha=0.6)
ax1.set_title('Infectiousness profile ($\\beta$)')
ax1.set_xlim([(- 1), 61])
plt.grid(True)
ax2 = fig.add_subplot(322)
plt.plot(times, Gjoint, 'r-', lw=5, alpha=0.6)
ax2.set_title('Survival function in $I$ compartment')
ax2.set_xlim([(- 1), 61])
plt.grid(True)
ax3 = fig.add_subplot(323)
plt.plot(times, deathf, 'r-', lw=5, alpha=0.6)
ax3.set_title('Infection-to-death distribution')
ax3.set_xlim([(- 1), 61])
plt.grid(True)
ax4 = fig.add_subplot(324)
plt.plot(times, zeta, 'r-', lw=5, alpha=0.6)
ax4.set_title('Death hazard ($\\zeta$)')
ax4.set_xlim([(- 1), 61])
plt.grid(True)
ax5 = fig.add_subplot(325)
plt.plot(times, recoveryf, 'r-', lw=5, alpha=0.6)
ax5.set_title('Infection-to-recovery distribution')
ax5.set_xlim([(- 1), 61])
ax5.set_xlabel('Time (days)')
plt.grid(True)
ax6 = fig.add_subplot(326)
plt.plot(times, gamma, 'r-', lw=5, alpha=0.6)
ax6.set_title('Recovery hazard ($\\gamma$)')
ax6.set_xlim([(- 1), 61])
ax6.set_xlabel('Time (days)')
plt.grid(True)
plt.tight_layout()
gamma = gamma.reshape(1, params.max_infected_age)
zeta = zeta.reshape(1, params.max_infected_age)
params.Gjoint = Gjoint
beta = beta.reshape(1, params.max_infected_age)
return (beta, gamma, zeta)<|docstring|>Produces rate vectors (i.e. lambda, zeta and gamma) assuming the equivalent
continuous distribution is a gamma distributions with specified means and variances<|endoftext|> |
4d62fcaf7c1b11f24f1363b13ce03a284158e8ac66d09dde514f7748991f160f | def test_index(self):
'Test display of the front page.'
response = self.app.get(self.url('root', my_thing='is_this'))
assert ('squiggle' in response.body) | Test display of the front page. | floof/tests/functional/test_main.py | test_index | eevee/floof | 2 | python | def test_index(self):
response = self.app.get(self.url('root', my_thing='is_this'))
assert ('squiggle' in response.body) | def test_index(self):
response = self.app.get(self.url('root', my_thing='is_this'))
assert ('squiggle' in response.body)<|docstring|>Test display of the front page.<|endoftext|> |
780f7f5239ad78db53ac9982c33645c405cd26379e51affa712a1a7940a300df | def test_log(self):
'Test display of the public admin log page.'
response = self.app.get(self.url('log'))
assert ('Public Admin Log' in response) | Test display of the public admin log page. | floof/tests/functional/test_main.py | test_log | eevee/floof | 2 | python | def test_log(self):
response = self.app.get(self.url('log'))
assert ('Public Admin Log' in response) | def test_log(self):
response = self.app.get(self.url('log'))
assert ('Public Admin Log' in response)<|docstring|>Test display of the public admin log page.<|endoftext|> |
20e0fe067c6001a2db19d502158d81c63b62e1c5ad8b89d32751c73dea7e2c57 | def get_default_auth_files():
'Get the default path where the authentication files for connecting to DPT-RP1 are stored'
config_path = os.path.join(os.path.expanduser('~'), '.dpapp')
os.makedirs(config_path, exist_ok=True)
deviceid = os.path.join(config_path, 'deviceid.dat')
privatekey = os.path.join(config_path, 'privatekey.dat')
return (deviceid, privatekey) | Get the default path where the authentication files for connecting to DPT-RP1 are stored | dptrp1/dptrp1.py | get_default_auth_files | hitmoon/dpt-rp1-py | 0 | python | def get_default_auth_files():
config_path = os.path.join(os.path.expanduser('~'), '.dpapp')
os.makedirs(config_path, exist_ok=True)
deviceid = os.path.join(config_path, 'deviceid.dat')
privatekey = os.path.join(config_path, 'privatekey.dat')
return (deviceid, privatekey) | def get_default_auth_files():
config_path = os.path.join(os.path.expanduser('~'), '.dpapp')
os.makedirs(config_path, exist_ok=True)
deviceid = os.path.join(config_path, 'deviceid.dat')
privatekey = os.path.join(config_path, 'privatekey.dat')
return (deviceid, privatekey)<|docstring|>Get the default path where the authentication files for connecting to DPT-RP1 are stored<|endoftext|> |
635b41e8aa6cc65f0ec01318ca016f764b6cff82944dd3b2faaac1876da3ece1 | def find_auth_files():
"Search for authentication files for connecting to DPT-RP1, both in default path and in paths from Sony's Digital Paper App"
(deviceid, privatekey) = get_default_auth_files()
if ((not os.path.exists(deviceid)) or (not os.path.exists(privatekey))):
search_paths = [os.path.join(os.path.expanduser('~'), 'Library/Application Support/Sony Corporation/Digital Paper App'), os.path.join(os.path.expanduser('~'), 'AppData/Roaming/Sony Corporation/Digital Paper App')]
for path in search_paths:
deviceid_matches = glob(os.path.join(path, '**/deviceid.dat'), recursive=True)
privatekey_matches = glob(os.path.join(path, '**/privatekey.dat'), recursive=True)
if (deviceid_matches and privatekey_matches):
deviceid = deviceid_matches[0]
privatekey = privatekey_matches[0]
break
return (deviceid, privatekey) | Search for authentication files for connecting to DPT-RP1, both in default path and in paths from Sony's Digital Paper App | dptrp1/dptrp1.py | find_auth_files | hitmoon/dpt-rp1-py | 0 | python | def find_auth_files():
(deviceid, privatekey) = get_default_auth_files()
if ((not os.path.exists(deviceid)) or (not os.path.exists(privatekey))):
search_paths = [os.path.join(os.path.expanduser('~'), 'Library/Application Support/Sony Corporation/Digital Paper App'), os.path.join(os.path.expanduser('~'), 'AppData/Roaming/Sony Corporation/Digital Paper App')]
for path in search_paths:
deviceid_matches = glob(os.path.join(path, '**/deviceid.dat'), recursive=True)
privatekey_matches = glob(os.path.join(path, '**/privatekey.dat'), recursive=True)
if (deviceid_matches and privatekey_matches):
deviceid = deviceid_matches[0]
privatekey = privatekey_matches[0]
break
return (deviceid, privatekey) | def find_auth_files():
(deviceid, privatekey) = get_default_auth_files()
if ((not os.path.exists(deviceid)) or (not os.path.exists(privatekey))):
search_paths = [os.path.join(os.path.expanduser('~'), 'Library/Application Support/Sony Corporation/Digital Paper App'), os.path.join(os.path.expanduser('~'), 'AppData/Roaming/Sony Corporation/Digital Paper App')]
for path in search_paths:
deviceid_matches = glob(os.path.join(path, '**/deviceid.dat'), recursive=True)
privatekey_matches = glob(os.path.join(path, '**/privatekey.dat'), recursive=True)
if (deviceid_matches and privatekey_matches):
deviceid = deviceid_matches[0]
privatekey = privatekey_matches[0]
break
return (deviceid, privatekey)<|docstring|>Search for authentication files for connecting to DPT-RP1, both in default path and in paths from Sony's Digital Paper App<|endoftext|> |
231f9b492a0454af727532f0cd924f58ec33939be8d706ab2b4352b7a23a7b31 | def pad(bytestring, k=16):
'\n Pad an input bytestring according to PKCS#7\n\n '
l = len(bytestring)
val = (k - (l % k))
return (bytestring + bytearray(([val] * val))) | Pad an input bytestring according to PKCS#7 | dptrp1/dptrp1.py | pad | hitmoon/dpt-rp1-py | 0 | python | def pad(bytestring, k=16):
'\n \n\n '
l = len(bytestring)
val = (k - (l % k))
return (bytestring + bytearray(([val] * val))) | def pad(bytestring, k=16):
'\n \n\n '
l = len(bytestring)
val = (k - (l % k))
return (bytestring + bytearray(([val] * val)))<|docstring|>Pad an input bytestring according to PKCS#7<|endoftext|> |
0179381cabfaf38193b396966b75bc69c4e968b08af86f387d7cc0c11243759d | def unpad(bytestring, k=16):
'\n Remove the PKCS#7 padding from a text bytestring.\n '
val = bytestring[(- 1)]
if (val > k):
raise ValueError('Input is not padded or padding is corrupt')
l = (len(bytestring) - val)
return bytestring[:l] | Remove the PKCS#7 padding from a text bytestring. | dptrp1/dptrp1.py | unpad | hitmoon/dpt-rp1-py | 0 | python | def unpad(bytestring, k=16):
'\n \n '
val = bytestring[(- 1)]
if (val > k):
raise ValueError('Input is not padded or padding is corrupt')
l = (len(bytestring) - val)
return bytestring[:l] | def unpad(bytestring, k=16):
'\n \n '
val = bytestring[(- 1)]
if (val > k):
raise ValueError('Input is not padded or padding is corrupt')
l = (len(bytestring) - val)
return bytestring[:l]<|docstring|>Remove the PKCS#7 padding from a text bytestring.<|endoftext|> |
071ac130a5b2367f38d3c87075c58c24c108789dff0fa93c2a5d20ae23f6e3c0 | def register(self):
'\n Gets authentication info from a DPT-RP1. You can call this BEFORE\n DigitalPaper.authenticate()\n\n Returns (ca, priv_key, client_id):\n - ca: a PEM-encoded X.509 server certificate, issued by the CA\n on the device\n - priv_key: a PEM-encoded 2048-bit RSA private key\n - client_id: the client id\n '
reg_url = 'http://{addr}:8080'.format(addr=self.addr)
register_pin_url = '{base_url}/register/pin'.format(base_url=reg_url)
register_hash_url = '{base_url}/register/hash'.format(base_url=reg_url)
register_ca_url = '{base_url}/register/ca'.format(base_url=reg_url)
register_url = '{base_url}/register'.format(base_url=reg_url)
register_cleanup_url = '{base_url}/register/cleanup'.format(base_url=reg_url)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
print('Requesting PIN...')
r = self.session.post(register_pin_url)
m1 = r.json()
n1 = base64.b64decode(m1['a'])
mac = base64.b64decode(m1['b'])
yb = base64.b64decode(m1['c'])
yb = int.from_bytes(yb, 'big')
n2 = os.urandom(16)
dh = DiffieHellman()
ya = dh.gen_public_key()
ya = (b'\x00' + ya.to_bytes(256, 'big'))
zz = dh.gen_shared_key(yb)
zz = zz.to_bytes(256, 'big')
yb = yb.to_bytes(256, 'big')
derivedKey = PBKDF2(passphrase=zz, salt=((n1 + mac) + n2), iterations=10000, digestmodule=SHA256).read(48)
authKey = derivedKey[:32]
keyWrapKey = derivedKey[32:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + mac) + yb) + n1) + n2) + mac) + ya))
m2hmac = hmac.digest()
m2 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(n2).decode('utf-8'), c=base64.b64encode(mac).decode('utf-8'), d=base64.b64encode(ya).decode('utf-8'), e=base64.b64encode(m2hmac).decode('utf-8'))
print('Encoding nonce...')
r = self.session.post(register_hash_url, json=m2)
m3 = r.json()
if (base64.b64decode(m3['a']) != n2):
print("Nonce N2 doesn't match")
return
eHash = base64.b64decode(m3['b'])
m3hmac = base64.b64decode(m3['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + n2) + mac) + ya) + m2hmac) + n2) + eHash))
if (m3hmac != hmac.digest()):
print("M3 HMAC doesn't match")
return
pin = input('Please enter the PIN shown on the DPT-RP1: ')
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(pin.encode())
psk = hmac.digest()
rs = os.urandom(16)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((rs + psk) + yb) + ya))
rHash = hmac.digest()
wrappedRs = wrap(rs, authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n2 + eHash) + m3hmac) + n1) + rHash) + wrappedRs))
m4hmac = hmac.digest()
m4 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(rHash).decode('utf-8'), d=base64.b64encode(wrappedRs).decode('utf-8'), e=base64.b64encode(m4hmac).decode('utf-8'))
print('Getting certificate from device CA...')
r = self.session.post(register_ca_url, json=m4)
print(r)
m5 = r.json()
if (base64.b64decode(m5['a']) != n2):
print("Nonce N2 doesn't match")
return
wrappedEsCert = base64.b64decode(m5['d'])
m5hmac = base64.b64decode(m5['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n1 + rHash) + wrappedRs) + m4hmac) + n2) + wrappedEsCert))
if (hmac.digest() != m5hmac):
print("HMAC doesn't match!")
return
esCert = unwrap(wrappedEsCert, authKey, keyWrapKey)
es = esCert[:16]
cert = esCert[16:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((es + psk) + yb) + ya))
if (hmac.digest() != eHash):
print('eHash does not match!')
return
print('Generating RSA2048 keys')
new_key = RSA.generate(2048, e=65537)
keyPubC = new_key.publickey().exportKey('PEM')
selfDeviceId = str(uuid.uuid4())
print(('Device ID: ' + selfDeviceId))
selfDeviceId = selfDeviceId.encode()
wrappedDIDKPUBC = wrap((selfDeviceId + keyPubC), authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((n2 + wrappedEsCert) + m5hmac) + n1) + wrappedDIDKPUBC))
m6hmac = hmac.digest()
m6 = dict(a=base64.b64encode(n1).decode('utf-8'), d=base64.b64encode(wrappedDIDKPUBC).decode('utf-8'), e=base64.b64encode(m6hmac).decode('utf-8'))
print('Registering device...')
r = self.session.post(register_url, json=m6)
print(r)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
return (cert.decode('utf-8'), new_key.exportKey('PEM').decode('utf-8'), selfDeviceId.decode('utf-8')) | Gets authentication info from a DPT-RP1. You can call this BEFORE
DigitalPaper.authenticate()
Returns (ca, priv_key, client_id):
- ca: a PEM-encoded X.509 server certificate, issued by the CA
on the device
- priv_key: a PEM-encoded 2048-bit RSA private key
- client_id: the client id | dptrp1/dptrp1.py | register | hitmoon/dpt-rp1-py | 0 | python | def register(self):
'\n Gets authentication info from a DPT-RP1. You can call this BEFORE\n DigitalPaper.authenticate()\n\n Returns (ca, priv_key, client_id):\n - ca: a PEM-encoded X.509 server certificate, issued by the CA\n on the device\n - priv_key: a PEM-encoded 2048-bit RSA private key\n - client_id: the client id\n '
reg_url = 'http://{addr}:8080'.format(addr=self.addr)
register_pin_url = '{base_url}/register/pin'.format(base_url=reg_url)
register_hash_url = '{base_url}/register/hash'.format(base_url=reg_url)
register_ca_url = '{base_url}/register/ca'.format(base_url=reg_url)
register_url = '{base_url}/register'.format(base_url=reg_url)
register_cleanup_url = '{base_url}/register/cleanup'.format(base_url=reg_url)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
print('Requesting PIN...')
r = self.session.post(register_pin_url)
m1 = r.json()
n1 = base64.b64decode(m1['a'])
mac = base64.b64decode(m1['b'])
yb = base64.b64decode(m1['c'])
yb = int.from_bytes(yb, 'big')
n2 = os.urandom(16)
dh = DiffieHellman()
ya = dh.gen_public_key()
ya = (b'\x00' + ya.to_bytes(256, 'big'))
zz = dh.gen_shared_key(yb)
zz = zz.to_bytes(256, 'big')
yb = yb.to_bytes(256, 'big')
derivedKey = PBKDF2(passphrase=zz, salt=((n1 + mac) + n2), iterations=10000, digestmodule=SHA256).read(48)
authKey = derivedKey[:32]
keyWrapKey = derivedKey[32:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + mac) + yb) + n1) + n2) + mac) + ya))
m2hmac = hmac.digest()
m2 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(n2).decode('utf-8'), c=base64.b64encode(mac).decode('utf-8'), d=base64.b64encode(ya).decode('utf-8'), e=base64.b64encode(m2hmac).decode('utf-8'))
print('Encoding nonce...')
r = self.session.post(register_hash_url, json=m2)
m3 = r.json()
if (base64.b64decode(m3['a']) != n2):
print("Nonce N2 doesn't match")
return
eHash = base64.b64decode(m3['b'])
m3hmac = base64.b64decode(m3['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + n2) + mac) + ya) + m2hmac) + n2) + eHash))
if (m3hmac != hmac.digest()):
print("M3 HMAC doesn't match")
return
pin = input('Please enter the PIN shown on the DPT-RP1: ')
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(pin.encode())
psk = hmac.digest()
rs = os.urandom(16)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((rs + psk) + yb) + ya))
rHash = hmac.digest()
wrappedRs = wrap(rs, authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n2 + eHash) + m3hmac) + n1) + rHash) + wrappedRs))
m4hmac = hmac.digest()
m4 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(rHash).decode('utf-8'), d=base64.b64encode(wrappedRs).decode('utf-8'), e=base64.b64encode(m4hmac).decode('utf-8'))
print('Getting certificate from device CA...')
r = self.session.post(register_ca_url, json=m4)
print(r)
m5 = r.json()
if (base64.b64decode(m5['a']) != n2):
print("Nonce N2 doesn't match")
return
wrappedEsCert = base64.b64decode(m5['d'])
m5hmac = base64.b64decode(m5['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n1 + rHash) + wrappedRs) + m4hmac) + n2) + wrappedEsCert))
if (hmac.digest() != m5hmac):
print("HMAC doesn't match!")
return
esCert = unwrap(wrappedEsCert, authKey, keyWrapKey)
es = esCert[:16]
cert = esCert[16:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((es + psk) + yb) + ya))
if (hmac.digest() != eHash):
print('eHash does not match!')
return
print('Generating RSA2048 keys')
new_key = RSA.generate(2048, e=65537)
keyPubC = new_key.publickey().exportKey('PEM')
selfDeviceId = str(uuid.uuid4())
print(('Device ID: ' + selfDeviceId))
selfDeviceId = selfDeviceId.encode()
wrappedDIDKPUBC = wrap((selfDeviceId + keyPubC), authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((n2 + wrappedEsCert) + m5hmac) + n1) + wrappedDIDKPUBC))
m6hmac = hmac.digest()
m6 = dict(a=base64.b64encode(n1).decode('utf-8'), d=base64.b64encode(wrappedDIDKPUBC).decode('utf-8'), e=base64.b64encode(m6hmac).decode('utf-8'))
print('Registering device...')
r = self.session.post(register_url, json=m6)
print(r)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
return (cert.decode('utf-8'), new_key.exportKey('PEM').decode('utf-8'), selfDeviceId.decode('utf-8')) | def register(self):
'\n Gets authentication info from a DPT-RP1. You can call this BEFORE\n DigitalPaper.authenticate()\n\n Returns (ca, priv_key, client_id):\n - ca: a PEM-encoded X.509 server certificate, issued by the CA\n on the device\n - priv_key: a PEM-encoded 2048-bit RSA private key\n - client_id: the client id\n '
reg_url = 'http://{addr}:8080'.format(addr=self.addr)
register_pin_url = '{base_url}/register/pin'.format(base_url=reg_url)
register_hash_url = '{base_url}/register/hash'.format(base_url=reg_url)
register_ca_url = '{base_url}/register/ca'.format(base_url=reg_url)
register_url = '{base_url}/register'.format(base_url=reg_url)
register_cleanup_url = '{base_url}/register/cleanup'.format(base_url=reg_url)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
print('Requesting PIN...')
r = self.session.post(register_pin_url)
m1 = r.json()
n1 = base64.b64decode(m1['a'])
mac = base64.b64decode(m1['b'])
yb = base64.b64decode(m1['c'])
yb = int.from_bytes(yb, 'big')
n2 = os.urandom(16)
dh = DiffieHellman()
ya = dh.gen_public_key()
ya = (b'\x00' + ya.to_bytes(256, 'big'))
zz = dh.gen_shared_key(yb)
zz = zz.to_bytes(256, 'big')
yb = yb.to_bytes(256, 'big')
derivedKey = PBKDF2(passphrase=zz, salt=((n1 + mac) + n2), iterations=10000, digestmodule=SHA256).read(48)
authKey = derivedKey[:32]
keyWrapKey = derivedKey[32:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + mac) + yb) + n1) + n2) + mac) + ya))
m2hmac = hmac.digest()
m2 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(n2).decode('utf-8'), c=base64.b64encode(mac).decode('utf-8'), d=base64.b64encode(ya).decode('utf-8'), e=base64.b64encode(m2hmac).decode('utf-8'))
print('Encoding nonce...')
r = self.session.post(register_hash_url, json=m2)
m3 = r.json()
if (base64.b64decode(m3['a']) != n2):
print("Nonce N2 doesn't match")
return
eHash = base64.b64decode(m3['b'])
m3hmac = base64.b64decode(m3['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((((n1 + n2) + mac) + ya) + m2hmac) + n2) + eHash))
if (m3hmac != hmac.digest()):
print("M3 HMAC doesn't match")
return
pin = input('Please enter the PIN shown on the DPT-RP1: ')
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(pin.encode())
psk = hmac.digest()
rs = os.urandom(16)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((rs + psk) + yb) + ya))
rHash = hmac.digest()
wrappedRs = wrap(rs, authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n2 + eHash) + m3hmac) + n1) + rHash) + wrappedRs))
m4hmac = hmac.digest()
m4 = dict(a=base64.b64encode(n1).decode('utf-8'), b=base64.b64encode(rHash).decode('utf-8'), d=base64.b64encode(wrappedRs).decode('utf-8'), e=base64.b64encode(m4hmac).decode('utf-8'))
print('Getting certificate from device CA...')
r = self.session.post(register_ca_url, json=m4)
print(r)
m5 = r.json()
if (base64.b64decode(m5['a']) != n2):
print("Nonce N2 doesn't match")
return
wrappedEsCert = base64.b64decode(m5['d'])
m5hmac = base64.b64decode(m5['e'])
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((((n1 + rHash) + wrappedRs) + m4hmac) + n2) + wrappedEsCert))
if (hmac.digest() != m5hmac):
print("HMAC doesn't match!")
return
esCert = unwrap(wrappedEsCert, authKey, keyWrapKey)
es = esCert[:16]
cert = esCert[16:]
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update((((es + psk) + yb) + ya))
if (hmac.digest() != eHash):
print('eHash does not match!')
return
print('Generating RSA2048 keys')
new_key = RSA.generate(2048, e=65537)
keyPubC = new_key.publickey().exportKey('PEM')
selfDeviceId = str(uuid.uuid4())
print(('Device ID: ' + selfDeviceId))
selfDeviceId = selfDeviceId.encode()
wrappedDIDKPUBC = wrap((selfDeviceId + keyPubC), authKey, keyWrapKey)
hmac = HMAC(authKey, digestmod=SHA256)
hmac.update(((((n2 + wrappedEsCert) + m5hmac) + n1) + wrappedDIDKPUBC))
m6hmac = hmac.digest()
m6 = dict(a=base64.b64encode(n1).decode('utf-8'), d=base64.b64encode(wrappedDIDKPUBC).decode('utf-8'), e=base64.b64encode(m6hmac).decode('utf-8'))
print('Registering device...')
r = self.session.post(register_url, json=m6)
print(r)
print('Cleaning up...')
r = self.session.put(register_cleanup_url)
print(r)
return (cert.decode('utf-8'), new_key.exportKey('PEM').decode('utf-8'), selfDeviceId.decode('utf-8'))<|docstring|>Gets authentication info from a DPT-RP1. You can call this BEFORE
DigitalPaper.authenticate()
Returns (ca, priv_key, client_id):
- ca: a PEM-encoded X.509 server certificate, issued by the CA
on the device
- priv_key: a PEM-encoded 2048-bit RSA private key
- client_id: the client id<|endoftext|> |
0fdbd05c73377194e6c88748b406bbc1e1f7e695d512234d1fd873245cd62835 | def copy_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Copies a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._post_endpoint(f'/documents/{file_id}/copy', data=data) | Copies a file with given file_id to a folder with given folder_id.
If new_filename is given, rename the file. | dptrp1/dptrp1.py | copy_file_to_folder_by_id | hitmoon/dpt-rp1-py | 0 | python | def copy_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Copies a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._post_endpoint(f'/documents/{file_id}/copy', data=data) | def copy_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Copies a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._post_endpoint(f'/documents/{file_id}/copy', data=data)<|docstring|>Copies a file with given file_id to a folder with given folder_id.
If new_filename is given, rename the file.<|endoftext|> |
8044d14cf429d748447600d8668baeded686a98b18acc20f4a9fabb10b03a8c7 | def move_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Moves a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._put_endpoint(f'/documents/{file_id}', data=data) | Moves a file with given file_id to a folder with given folder_id.
If new_filename is given, rename the file. | dptrp1/dptrp1.py | move_file_to_folder_by_id | hitmoon/dpt-rp1-py | 0 | python | def move_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Moves a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._put_endpoint(f'/documents/{file_id}', data=data) | def move_file_to_folder_by_id(self, file_id, folder_id, new_filename=None):
'\n Moves a file with given file_id to a folder with given folder_id.\n If new_filename is given, rename the file.\n '
data = self._copy_move_data(file_id, folder_id, new_filename)
return self._put_endpoint(f'/documents/{file_id}', data=data)<|docstring|>Moves a file with given file_id to a folder with given folder_id.
If new_filename is given, rename the file.<|endoftext|> |
699a193edd7659aaeaefbe41db04e1cbefbf7d23aefe9b20e06521c5d384bf93 | def copy_file(self, old_path, new_path):
'\n Copies a file with given path to a new path.\n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
self.copy_file_to_folder_by_id(old_id, new_folder_id, new_filename) | Copies a file with given path to a new path. | dptrp1/dptrp1.py | copy_file | hitmoon/dpt-rp1-py | 0 | python | def copy_file(self, old_path, new_path):
'\n \n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
self.copy_file_to_folder_by_id(old_id, new_folder_id, new_filename) | def copy_file(self, old_path, new_path):
'\n \n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
self.copy_file_to_folder_by_id(old_id, new_folder_id, new_filename)<|docstring|>Copies a file with given path to a new path.<|endoftext|> |
226d62d5127f5351d92827c6febc9e6649010c20bbb4bdc464580dfb19980fc0 | def move_file(self, old_path, new_path):
'\n Moves a file with given path to a new path.\n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
return self.move_file_to_folder_by_id(old_id, new_folder_id, new_filename) | Moves a file with given path to a new path. | dptrp1/dptrp1.py | move_file | hitmoon/dpt-rp1-py | 0 | python | def move_file(self, old_path, new_path):
'\n \n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
return self.move_file_to_folder_by_id(old_id, new_folder_id, new_filename) | def move_file(self, old_path, new_path):
'\n \n '
(old_id, new_folder_id, new_filename) = self._copy_move_find_ids(old_path, new_path)
return self.move_file_to_folder_by_id(old_id, new_folder_id, new_filename)<|docstring|>Moves a file with given path to a new path.<|endoftext|> |
906d40835ae91ab9845847275ad46eef2cb8e895ac6364d5853e7c18c040fa8a | def ping(self):
'\n Returns True if we are authenticated.\n '
url = f'{self.base_url}/ping'
r = self.session.get(url)
return r.ok | Returns True if we are authenticated. | dptrp1/dptrp1.py | ping | hitmoon/dpt-rp1-py | 0 | python | def ping(self):
'\n \n '
url = f'{self.base_url}/ping'
r = self.session.get(url)
return r.ok | def ping(self):
'\n \n '
url = f'{self.base_url}/ping'
r = self.session.get(url)
return r.ok<|docstring|>Returns True if we are authenticated.<|endoftext|> |
e73ca4f3382322d7d493a3c9a52f4675398044c3b9f8e87cc6e34677351ef57a | def _debug_net(pooling, *args, **kwargs):
'Small net for debugging.'
del args, kwargs
final_shape = ([(- 1), 1] if pooling else [(- 1), 1, 1, 1])
layers = [tf.keras.layers.Lambda((lambda x: tf.reshape(tf.reduce_mean(x, axis=[1, 2, 3]), final_shape)))]
return tf.keras.Sequential(layers) | Small net for debugging. | non_semantic_speech_benchmark/distillation/models.py | _debug_net | suryatmodulus/google-research | 2 | python | def _debug_net(pooling, *args, **kwargs):
del args, kwargs
final_shape = ([(- 1), 1] if pooling else [(- 1), 1, 1, 1])
layers = [tf.keras.layers.Lambda((lambda x: tf.reshape(tf.reduce_mean(x, axis=[1, 2, 3]), final_shape)))]
return tf.keras.Sequential(layers) | def _debug_net(pooling, *args, **kwargs):
del args, kwargs
final_shape = ([(- 1), 1] if pooling else [(- 1), 1, 1, 1])
layers = [tf.keras.layers.Lambda((lambda x: tf.reshape(tf.reduce_mean(x, axis=[1, 2, 3]), final_shape)))]
return tf.keras.Sequential(layers)<|docstring|>Small net for debugging.<|endoftext|> |
a9aac52149aabaa978376bdf3cfeb1eb11e59489d3f1a64cf6d55de0461568a1 | def get_keras_model(model_type, output_dimension, truncate_output=False, frontend=True, tflite=False, spec_augment=False):
'Make a Keras student model.'
logging.info('model name: %s', model_type)
logging.info('truncate_output: %s', truncate_output)
logging.info('output_dimension: %i', output_dimension)
logging.info('frontend: %s', frontend)
logging.info('tflite: %s', tflite)
logging.info('spec_augment: %s', spec_augment)
output_dict = {}
(model_in, feats) = _frontend_keras(frontend, tflite)
feats.shape.assert_is_compatible_with([None, None, None, 1])
spec_augment_fn = (augmentation.SpecAugment() if spec_augment else tf.identity)
feats = spec_augment_fn(feats)
inputs = [model_in]
logging.info('Features shape: %s', feats.shape)
model_out = _build_main_net(model_type, feats)
embeddings = tf.keras.layers.Flatten(name='distilled_output')(model_out)
need_final_layer = (output_dimension and (embeddings.shape[1] != output_dimension))
if (need_final_layer and truncate_output):
if (embeddings.shape[1] < output_dimension):
embeddings = tf.pad(embeddings, [[0, 0], [0, (output_dimension - embeddings.shape[1])]])
else:
embeddings = embeddings[(:, :output_dimension)]
output_dict['embedding'] = embeddings
target = embeddings
if (need_final_layer and (not truncate_output)):
target = tf.keras.layers.Dense(output_dimension, name='embedding_to_target')(target)
output_dict['embedding_to_target'] = target
output_model = tf.keras.Model(inputs=inputs, outputs=output_dict)
return output_model | Make a Keras student model. | non_semantic_speech_benchmark/distillation/models.py | get_keras_model | suryatmodulus/google-research | 2 | python | def get_keras_model(model_type, output_dimension, truncate_output=False, frontend=True, tflite=False, spec_augment=False):
logging.info('model name: %s', model_type)
logging.info('truncate_output: %s', truncate_output)
logging.info('output_dimension: %i', output_dimension)
logging.info('frontend: %s', frontend)
logging.info('tflite: %s', tflite)
logging.info('spec_augment: %s', spec_augment)
output_dict = {}
(model_in, feats) = _frontend_keras(frontend, tflite)
feats.shape.assert_is_compatible_with([None, None, None, 1])
spec_augment_fn = (augmentation.SpecAugment() if spec_augment else tf.identity)
feats = spec_augment_fn(feats)
inputs = [model_in]
logging.info('Features shape: %s', feats.shape)
model_out = _build_main_net(model_type, feats)
embeddings = tf.keras.layers.Flatten(name='distilled_output')(model_out)
need_final_layer = (output_dimension and (embeddings.shape[1] != output_dimension))
if (need_final_layer and truncate_output):
if (embeddings.shape[1] < output_dimension):
embeddings = tf.pad(embeddings, [[0, 0], [0, (output_dimension - embeddings.shape[1])]])
else:
embeddings = embeddings[(:, :output_dimension)]
output_dict['embedding'] = embeddings
target = embeddings
if (need_final_layer and (not truncate_output)):
target = tf.keras.layers.Dense(output_dimension, name='embedding_to_target')(target)
output_dict['embedding_to_target'] = target
output_model = tf.keras.Model(inputs=inputs, outputs=output_dict)
return output_model | def get_keras_model(model_type, output_dimension, truncate_output=False, frontend=True, tflite=False, spec_augment=False):
logging.info('model name: %s', model_type)
logging.info('truncate_output: %s', truncate_output)
logging.info('output_dimension: %i', output_dimension)
logging.info('frontend: %s', frontend)
logging.info('tflite: %s', tflite)
logging.info('spec_augment: %s', spec_augment)
output_dict = {}
(model_in, feats) = _frontend_keras(frontend, tflite)
feats.shape.assert_is_compatible_with([None, None, None, 1])
spec_augment_fn = (augmentation.SpecAugment() if spec_augment else tf.identity)
feats = spec_augment_fn(feats)
inputs = [model_in]
logging.info('Features shape: %s', feats.shape)
model_out = _build_main_net(model_type, feats)
embeddings = tf.keras.layers.Flatten(name='distilled_output')(model_out)
need_final_layer = (output_dimension and (embeddings.shape[1] != output_dimension))
if (need_final_layer and truncate_output):
if (embeddings.shape[1] < output_dimension):
embeddings = tf.pad(embeddings, [[0, 0], [0, (output_dimension - embeddings.shape[1])]])
else:
embeddings = embeddings[(:, :output_dimension)]
output_dict['embedding'] = embeddings
target = embeddings
if (need_final_layer and (not truncate_output)):
target = tf.keras.layers.Dense(output_dimension, name='embedding_to_target')(target)
output_dict['embedding_to_target'] = target
output_model = tf.keras.Model(inputs=inputs, outputs=output_dict)
return output_model<|docstring|>Make a Keras student model.<|endoftext|> |
4822531a297ad0dc152831b5f847383d5ac8e40ebd5a2a75ad9c02442aeaf0f1 | def _frontend_keras(frontend, tflite):
'Returns model input and features.'
num_batches = (1 if tflite else None)
frontend_args = frontend_lib.frontend_args_from_flags()
feats_inner_dim = frontend_lib.get_frontend_output_shape()[0]
if frontend:
logging.info('frontend_args: %s', frontend_args)
model_in = tf.keras.Input((None,), name='audio_samples', batch_size=num_batches)
frontend_fn = frontend_lib.get_feats_map_fn(tflite, frontend_args)
feats = tf.keras.layers.Lambda(frontend_fn)(model_in)
feats.shape.assert_is_compatible_with([num_batches, feats_inner_dim, frontend_args['frame_width'], frontend_args['num_mel_bins']])
feats = tf.reshape(feats, [(- 1), (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
else:
model_in = tf.keras.Input(((feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1), batch_size=num_batches, name='log_mel_spectrogram')
feats = model_in
feats.shape.assert_is_compatible_with([None, (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
return (model_in, feats) | Returns model input and features. | non_semantic_speech_benchmark/distillation/models.py | _frontend_keras | suryatmodulus/google-research | 2 | python | def _frontend_keras(frontend, tflite):
num_batches = (1 if tflite else None)
frontend_args = frontend_lib.frontend_args_from_flags()
feats_inner_dim = frontend_lib.get_frontend_output_shape()[0]
if frontend:
logging.info('frontend_args: %s', frontend_args)
model_in = tf.keras.Input((None,), name='audio_samples', batch_size=num_batches)
frontend_fn = frontend_lib.get_feats_map_fn(tflite, frontend_args)
feats = tf.keras.layers.Lambda(frontend_fn)(model_in)
feats.shape.assert_is_compatible_with([num_batches, feats_inner_dim, frontend_args['frame_width'], frontend_args['num_mel_bins']])
feats = tf.reshape(feats, [(- 1), (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
else:
model_in = tf.keras.Input(((feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1), batch_size=num_batches, name='log_mel_spectrogram')
feats = model_in
feats.shape.assert_is_compatible_with([None, (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
return (model_in, feats) | def _frontend_keras(frontend, tflite):
num_batches = (1 if tflite else None)
frontend_args = frontend_lib.frontend_args_from_flags()
feats_inner_dim = frontend_lib.get_frontend_output_shape()[0]
if frontend:
logging.info('frontend_args: %s', frontend_args)
model_in = tf.keras.Input((None,), name='audio_samples', batch_size=num_batches)
frontend_fn = frontend_lib.get_feats_map_fn(tflite, frontend_args)
feats = tf.keras.layers.Lambda(frontend_fn)(model_in)
feats.shape.assert_is_compatible_with([num_batches, feats_inner_dim, frontend_args['frame_width'], frontend_args['num_mel_bins']])
feats = tf.reshape(feats, [(- 1), (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
else:
model_in = tf.keras.Input(((feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1), batch_size=num_batches, name='log_mel_spectrogram')
feats = model_in
feats.shape.assert_is_compatible_with([None, (feats_inner_dim * frontend_args['frame_width']), frontend_args['num_mel_bins'], 1])
return (model_in, feats)<|docstring|>Returns model input and features.<|endoftext|> |
c3c721ceb70894b2acb198a416fea8139ae24dedbe95bf62180347217b0963f1 | def _build_main_net(model_type, feats):
'Constructs main network.'
if model_type.startswith('mobilenet_'):
(_, mobilenet_size, alpha, avg_pool) = model_type.split('_')
alpha = float(alpha)
avg_pool = bool(avg_pool)
logging.info('mobilenet_size: %s', mobilenet_size)
logging.info('alpha: %f', alpha)
logging.info('avg_pool: %s', avg_pool)
model = _map_mobilenet_func(mobilenet_size)(input_shape=feats.shape[1:], alpha=alpha, minimalistic=False, include_top=False, weights=None, pooling=('avg' if avg_pool else None), dropout_rate=0.0)
expected_output_shape = ([None, None] if avg_pool else [None, 1, 1, None])
elif model_type.startswith('efficientnet'):
(model_fn, final_dim) = {'efficientnetb0': (tf.keras.applications.EfficientNetB0, 1280), 'efficientnetb1': (tf.keras.applications.EfficientNetB1, 1280), 'efficientnetb2': (tf.keras.applications.EfficientNetB2, 1408), 'efficientnetb3': (tf.keras.applications.EfficientNetB3, 1536), 'efficientnetb4': (tf.keras.applications.EfficientNetB4, 1792), 'efficientnetb5': (tf.keras.applications.EfficientNetB5, 2048), 'efficientnetb6': (tf.keras.applications.EfficientNetB6, 2304), 'efficientnetb7': (tf.keras.applications.EfficientNetB7, 2560), 'efficientnetv2b0': (tf.keras.applications.efficientnet_v2.EfficientNetV2B0, 1280), 'efficientnetv2b1': (tf.keras.applications.efficientnet_v2.EfficientNetV2B1, 1280), 'efficientnetv2b2': (tf.keras.applications.efficientnet_v2.EfficientNetV2B2, 1408), 'efficientnetv2b3': (tf.keras.applications.efficientnet_v2.EfficientNetV2B3, 1536), 'efficientnetv2bL': (tf.keras.applications.efficientnet_v2.EfficientNetV2L, 1280), 'efficientnetv2bM': (tf.keras.applications.efficientnet_v2.EfficientNetV2M, 1280), 'efficientnetv2bS': (tf.keras.applications.efficientnet_v2.EfficientNetV2S, 1280)}[model_type]
model = model_fn(include_top=False, weights=None, input_shape=feats.shape[1:], pooling='avg')
expected_output_shape = [None, final_dim]
else:
raise ValueError(f'`model_type` not recognized: {model_type}')
model_out = model(feats)
model_out.shape.assert_is_compatible_with(expected_output_shape)
return model_out | Constructs main network. | non_semantic_speech_benchmark/distillation/models.py | _build_main_net | suryatmodulus/google-research | 2 | python | def _build_main_net(model_type, feats):
if model_type.startswith('mobilenet_'):
(_, mobilenet_size, alpha, avg_pool) = model_type.split('_')
alpha = float(alpha)
avg_pool = bool(avg_pool)
logging.info('mobilenet_size: %s', mobilenet_size)
logging.info('alpha: %f', alpha)
logging.info('avg_pool: %s', avg_pool)
model = _map_mobilenet_func(mobilenet_size)(input_shape=feats.shape[1:], alpha=alpha, minimalistic=False, include_top=False, weights=None, pooling=('avg' if avg_pool else None), dropout_rate=0.0)
expected_output_shape = ([None, None] if avg_pool else [None, 1, 1, None])
elif model_type.startswith('efficientnet'):
(model_fn, final_dim) = {'efficientnetb0': (tf.keras.applications.EfficientNetB0, 1280), 'efficientnetb1': (tf.keras.applications.EfficientNetB1, 1280), 'efficientnetb2': (tf.keras.applications.EfficientNetB2, 1408), 'efficientnetb3': (tf.keras.applications.EfficientNetB3, 1536), 'efficientnetb4': (tf.keras.applications.EfficientNetB4, 1792), 'efficientnetb5': (tf.keras.applications.EfficientNetB5, 2048), 'efficientnetb6': (tf.keras.applications.EfficientNetB6, 2304), 'efficientnetb7': (tf.keras.applications.EfficientNetB7, 2560), 'efficientnetv2b0': (tf.keras.applications.efficientnet_v2.EfficientNetV2B0, 1280), 'efficientnetv2b1': (tf.keras.applications.efficientnet_v2.EfficientNetV2B1, 1280), 'efficientnetv2b2': (tf.keras.applications.efficientnet_v2.EfficientNetV2B2, 1408), 'efficientnetv2b3': (tf.keras.applications.efficientnet_v2.EfficientNetV2B3, 1536), 'efficientnetv2bL': (tf.keras.applications.efficientnet_v2.EfficientNetV2L, 1280), 'efficientnetv2bM': (tf.keras.applications.efficientnet_v2.EfficientNetV2M, 1280), 'efficientnetv2bS': (tf.keras.applications.efficientnet_v2.EfficientNetV2S, 1280)}[model_type]
model = model_fn(include_top=False, weights=None, input_shape=feats.shape[1:], pooling='avg')
expected_output_shape = [None, final_dim]
else:
raise ValueError(f'`model_type` not recognized: {model_type}')
model_out = model(feats)
model_out.shape.assert_is_compatible_with(expected_output_shape)
return model_out | def _build_main_net(model_type, feats):
if model_type.startswith('mobilenet_'):
(_, mobilenet_size, alpha, avg_pool) = model_type.split('_')
alpha = float(alpha)
avg_pool = bool(avg_pool)
logging.info('mobilenet_size: %s', mobilenet_size)
logging.info('alpha: %f', alpha)
logging.info('avg_pool: %s', avg_pool)
model = _map_mobilenet_func(mobilenet_size)(input_shape=feats.shape[1:], alpha=alpha, minimalistic=False, include_top=False, weights=None, pooling=('avg' if avg_pool else None), dropout_rate=0.0)
expected_output_shape = ([None, None] if avg_pool else [None, 1, 1, None])
elif model_type.startswith('efficientnet'):
(model_fn, final_dim) = {'efficientnetb0': (tf.keras.applications.EfficientNetB0, 1280), 'efficientnetb1': (tf.keras.applications.EfficientNetB1, 1280), 'efficientnetb2': (tf.keras.applications.EfficientNetB2, 1408), 'efficientnetb3': (tf.keras.applications.EfficientNetB3, 1536), 'efficientnetb4': (tf.keras.applications.EfficientNetB4, 1792), 'efficientnetb5': (tf.keras.applications.EfficientNetB5, 2048), 'efficientnetb6': (tf.keras.applications.EfficientNetB6, 2304), 'efficientnetb7': (tf.keras.applications.EfficientNetB7, 2560), 'efficientnetv2b0': (tf.keras.applications.efficientnet_v2.EfficientNetV2B0, 1280), 'efficientnetv2b1': (tf.keras.applications.efficientnet_v2.EfficientNetV2B1, 1280), 'efficientnetv2b2': (tf.keras.applications.efficientnet_v2.EfficientNetV2B2, 1408), 'efficientnetv2b3': (tf.keras.applications.efficientnet_v2.EfficientNetV2B3, 1536), 'efficientnetv2bL': (tf.keras.applications.efficientnet_v2.EfficientNetV2L, 1280), 'efficientnetv2bM': (tf.keras.applications.efficientnet_v2.EfficientNetV2M, 1280), 'efficientnetv2bS': (tf.keras.applications.efficientnet_v2.EfficientNetV2S, 1280)}[model_type]
model = model_fn(include_top=False, weights=None, input_shape=feats.shape[1:], pooling='avg')
expected_output_shape = [None, final_dim]
else:
raise ValueError(f'`model_type` not recognized: {model_type}')
model_out = model(feats)
model_out.shape.assert_is_compatible_with(expected_output_shape)
return model_out<|docstring|>Constructs main network.<|endoftext|> |
156144484f0f1d8605251ea3e964b457e05c845925ca71130977d18524986fee | def plot_model_predictions(name, predicted, actual, log=False, ax=None):
'Plots the predictions of a machine learning model.\n \n Create a scatter plot of machine learning model predictions vs.\n actual values from the data set along with a diagonal line showing\n where perfect agreement would be. \n \n Args:\n name(str): The name of the value being predicted.\n \n predicted(array_like): The set of predicted values from a model\n \n actual(array_like): The set of actual values from the data set \n which represent ground truth.\n \n log(bool,optional): If set to true the plot becomes a log-log \n plot. Default False.\n \n ax(matplotlib.axes.Axes,optional): A preexisting axis object \n where the plot will be located. Default None.\n \n Returns:\n matplotlib.axes.Axes: The axis object containing the created \n scatterplot.\n \n '
if log:
predicted = [math.log(x) for x in predicted]
actual = [math.log(y) for y in actual]
if (ax is None):
fig = plt.figure()
ax = plt.gca()
ax.scatter(predicted, actual)
ax.set_title((name + ' Predicted vs. Actual'))
padding_y = ((max(actual) - min(actual)) * 0.1)
min_y = (min(actual) - padding_y)
max_y = (max(actual) + padding_y)
ax.set_ylim(min_y, max_y)
padding_x = ((max(predicted) - min(predicted)) * 0.1)
min_x = (min(predicted) - padding_x)
max_x = (max(predicted) + padding_x)
ax.set_xlim(min_x, max_x)
dline = [min(min_y, min_x), max(max_y, max_x)]
ax.plot(dline, dline, ls='--', c='.3')
ax.set_xlabel(('Predicted ' + name))
ax.set_ylabel(('Actual ' + name))
return ax | Plots the predictions of a machine learning model.
Create a scatter plot of machine learning model predictions vs.
actual values from the data set along with a diagonal line showing
where perfect agreement would be.
Args:
name(str): The name of the value being predicted.
predicted(array_like): The set of predicted values from a model
actual(array_like): The set of actual values from the data set
which represent ground truth.
log(bool,optional): If set to true the plot becomes a log-log
plot. Default False.
ax(matplotlib.axes.Axes,optional): A preexisting axis object
where the plot will be located. Default None.
Returns:
matplotlib.axes.Axes: The axis object containing the created
scatterplot. | rectool/plot.py | plot_model_predictions | JBEI/Ajinomoto | 0 | python | def plot_model_predictions(name, predicted, actual, log=False, ax=None):
'Plots the predictions of a machine learning model.\n \n Create a scatter plot of machine learning model predictions vs.\n actual values from the data set along with a diagonal line showing\n where perfect agreement would be. \n \n Args:\n name(str): The name of the value being predicted.\n \n predicted(array_like): The set of predicted values from a model\n \n actual(array_like): The set of actual values from the data set \n which represent ground truth.\n \n log(bool,optional): If set to true the plot becomes a log-log \n plot. Default False.\n \n ax(matplotlib.axes.Axes,optional): A preexisting axis object \n where the plot will be located. Default None.\n \n Returns:\n matplotlib.axes.Axes: The axis object containing the created \n scatterplot.\n \n '
if log:
predicted = [math.log(x) for x in predicted]
actual = [math.log(y) for y in actual]
if (ax is None):
fig = plt.figure()
ax = plt.gca()
ax.scatter(predicted, actual)
ax.set_title((name + ' Predicted vs. Actual'))
padding_y = ((max(actual) - min(actual)) * 0.1)
min_y = (min(actual) - padding_y)
max_y = (max(actual) + padding_y)
ax.set_ylim(min_y, max_y)
padding_x = ((max(predicted) - min(predicted)) * 0.1)
min_x = (min(predicted) - padding_x)
max_x = (max(predicted) + padding_x)
ax.set_xlim(min_x, max_x)
dline = [min(min_y, min_x), max(max_y, max_x)]
ax.plot(dline, dline, ls='--', c='.3')
ax.set_xlabel(('Predicted ' + name))
ax.set_ylabel(('Actual ' + name))
return ax | def plot_model_predictions(name, predicted, actual, log=False, ax=None):
'Plots the predictions of a machine learning model.\n \n Create a scatter plot of machine learning model predictions vs.\n actual values from the data set along with a diagonal line showing\n where perfect agreement would be. \n \n Args:\n name(str): The name of the value being predicted.\n \n predicted(array_like): The set of predicted values from a model\n \n actual(array_like): The set of actual values from the data set \n which represent ground truth.\n \n log(bool,optional): If set to true the plot becomes a log-log \n plot. Default False.\n \n ax(matplotlib.axes.Axes,optional): A preexisting axis object \n where the plot will be located. Default None.\n \n Returns:\n matplotlib.axes.Axes: The axis object containing the created \n scatterplot.\n \n '
if log:
predicted = [math.log(x) for x in predicted]
actual = [math.log(y) for y in actual]
if (ax is None):
fig = plt.figure()
ax = plt.gca()
ax.scatter(predicted, actual)
ax.set_title((name + ' Predicted vs. Actual'))
padding_y = ((max(actual) - min(actual)) * 0.1)
min_y = (min(actual) - padding_y)
max_y = (max(actual) + padding_y)
ax.set_ylim(min_y, max_y)
padding_x = ((max(predicted) - min(predicted)) * 0.1)
min_x = (min(predicted) - padding_x)
max_x = (max(predicted) + padding_x)
ax.set_xlim(min_x, max_x)
dline = [min(min_y, min_x), max(max_y, max_x)]
ax.plot(dline, dline, ls='--', c='.3')
ax.set_xlabel(('Predicted ' + name))
ax.set_ylabel(('Actual ' + name))
return ax<|docstring|>Plots the predictions of a machine learning model.
Create a scatter plot of machine learning model predictions vs.
actual values from the data set along with a diagonal line showing
where perfect agreement would be.
Args:
name(str): The name of the value being predicted.
predicted(array_like): The set of predicted values from a model
actual(array_like): The set of actual values from the data set
which represent ground truth.
log(bool,optional): If set to true the plot becomes a log-log
plot. Default False.
ax(matplotlib.axes.Axes,optional): A preexisting axis object
where the plot will be located. Default None.
Returns:
matplotlib.axes.Axes: The axis object containing the created
scatterplot.<|endoftext|> |
19e3f4beabd6dc7421457d0a2e62c14ac067154c1ffffac590c638d63fc77e0c | def plot_model(model, data, targets, midpoint=0.1, title=None, zlabel=None, ax=None, pcs=None, plot_points=True):
'Plots a heatmap representing a machine learning model and overlays training data on top.\n \n A heatmap of a machine learning model is generated to better understand how the model performs. \n In order to deal with higher dimentional feature spaces, principal component analysis is used\n to reduce the feature space to the two dimensions with the most variance. The data is then projected\n onto that plane and plotted over the model heatmap as a scatterplot.\n \n Args:\n model(sklearn.base.BaseEstimator): A scikit-learn style machine learning model.\n \n data(array_like): The feature space of the model training set. Dimensions are \n (num_samples,feature_cardinality).\n \n targets(array_like): An array like object containing the ground truth for the model predictions.\n Dimensions are (num_samples,1).\n \n midpoint(float,optional): Select the midpoint value on the color map. Default 0.1.\n \n title(str,optional): Title of the generated plot. Default None.\n \n zlabel(str,optional): Label of the colormap axis. Default None.\n \n ax(matplotlib.axes.Axes,optional): Predefined axis used to draw the plot. Default None.\n \n pcs(array_like,optional): Specify Principal components to use for projection. Default None.\n \n plot_points(bool,optional): Overlay features as a scatterplot over the heatmap. Default True.\n \n '
pca = PCA(n_components=2)
if (pcs is not None):
pca.components_ = pcs
data_transformed = pca.fit_transform(data)
xmin = np.amin(data_transformed[(:, 0)])
xmax = np.amax(data_transformed[(:, 0)])
ymin = np.amin(data_transformed[(:, 1)])
ymax = np.amax(data_transformed[(:, 1)])
scaling_factor = 0.5
xmin = (xmin - (((xmax - xmin) * scaling_factor) / 2))
xmax = (xmax + (((xmax - xmin) * scaling_factor) / 2))
ymin = (ymin - (((ymax - ymin) * scaling_factor) / 2))
ymax = (ymax + (((ymax - ymin) * scaling_factor) / 2))
points = 1000
x = np.linspace(xmin, xmax, num=points)
y = np.linspace(ymin, ymax, num=points)
(xv, yv) = np.meshgrid(x, y)
xyt = np.concatenate((xv.reshape([xv.size, 1]), yv.reshape([yv.size, 1])), axis=1)
xy = pca.inverse_transform(xyt)
zero_truncate = np.vectorize((lambda x: max(0.01, x)))
xy = zero_truncate(xy)
z = model.predict(xy).reshape([points, points])
minpoint = min([min(p) for p in z])
maxpoint = max([max(p) for p in z])
if (ax is None):
fig = plt.figure()
ax = plt.gca()
scaled_targets = [((target / max(targets)) * 200) for target in targets]
if plot_points:
ax.scatter(data_transformed[(:, 0)], [(1 * value) for value in data_transformed[(:, 1)]], c='k', cmap=plt.cm.bwr, marker='+', s=scaled_targets, linewidths=1.5)
ax.grid(b=False)
midpercent = ((midpoint - minpoint) / (maxpoint - minpoint))
centered_cmap = shiftedColorMap(plt.cm.bwr, midpoint=midpercent)
cmap = centered_cmap
if (midpercent > 1):
midpercent = 1
cmap = plt.cm.Blues_r
elif (midpercent < 0):
midpercent = 0
cmap = plt.cm.Reds
z = [row for row in reversed(z)]
im = ax.imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cmap)
ax.set_aspect('auto')
if (title is not None):
ax.set_title(title)
ax.set_xlabel('Principal Component 1')
ax.set_ylabel('Principal Component 2')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
if (zlabel is not None):
plt.colorbar(im, cax=cax, label=zlabel)
else:
plt.colorbar(im, cax=cax) | Plots a heatmap representing a machine learning model and overlays training data on top.
A heatmap of a machine learning model is generated to better understand how the model performs.
In order to deal with higher dimentional feature spaces, principal component analysis is used
to reduce the feature space to the two dimensions with the most variance. The data is then projected
onto that plane and plotted over the model heatmap as a scatterplot.
Args:
model(sklearn.base.BaseEstimator): A scikit-learn style machine learning model.
data(array_like): The feature space of the model training set. Dimensions are
(num_samples,feature_cardinality).
targets(array_like): An array like object containing the ground truth for the model predictions.
Dimensions are (num_samples,1).
midpoint(float,optional): Select the midpoint value on the color map. Default 0.1.
title(str,optional): Title of the generated plot. Default None.
zlabel(str,optional): Label of the colormap axis. Default None.
ax(matplotlib.axes.Axes,optional): Predefined axis used to draw the plot. Default None.
pcs(array_like,optional): Specify Principal components to use for projection. Default None.
plot_points(bool,optional): Overlay features as a scatterplot over the heatmap. Default True. | rectool/plot.py | plot_model | JBEI/Ajinomoto | 0 | python | def plot_model(model, data, targets, midpoint=0.1, title=None, zlabel=None, ax=None, pcs=None, plot_points=True):
'Plots a heatmap representing a machine learning model and overlays training data on top.\n \n A heatmap of a machine learning model is generated to better understand how the model performs. \n In order to deal with higher dimentional feature spaces, principal component analysis is used\n to reduce the feature space to the two dimensions with the most variance. The data is then projected\n onto that plane and plotted over the model heatmap as a scatterplot.\n \n Args:\n model(sklearn.base.BaseEstimator): A scikit-learn style machine learning model.\n \n data(array_like): The feature space of the model training set. Dimensions are \n (num_samples,feature_cardinality).\n \n targets(array_like): An array like object containing the ground truth for the model predictions.\n Dimensions are (num_samples,1).\n \n midpoint(float,optional): Select the midpoint value on the color map. Default 0.1.\n \n title(str,optional): Title of the generated plot. Default None.\n \n zlabel(str,optional): Label of the colormap axis. Default None.\n \n ax(matplotlib.axes.Axes,optional): Predefined axis used to draw the plot. Default None.\n \n pcs(array_like,optional): Specify Principal components to use for projection. Default None.\n \n plot_points(bool,optional): Overlay features as a scatterplot over the heatmap. Default True.\n \n '
pca = PCA(n_components=2)
if (pcs is not None):
pca.components_ = pcs
data_transformed = pca.fit_transform(data)
xmin = np.amin(data_transformed[(:, 0)])
xmax = np.amax(data_transformed[(:, 0)])
ymin = np.amin(data_transformed[(:, 1)])
ymax = np.amax(data_transformed[(:, 1)])
scaling_factor = 0.5
xmin = (xmin - (((xmax - xmin) * scaling_factor) / 2))
xmax = (xmax + (((xmax - xmin) * scaling_factor) / 2))
ymin = (ymin - (((ymax - ymin) * scaling_factor) / 2))
ymax = (ymax + (((ymax - ymin) * scaling_factor) / 2))
points = 1000
x = np.linspace(xmin, xmax, num=points)
y = np.linspace(ymin, ymax, num=points)
(xv, yv) = np.meshgrid(x, y)
xyt = np.concatenate((xv.reshape([xv.size, 1]), yv.reshape([yv.size, 1])), axis=1)
xy = pca.inverse_transform(xyt)
zero_truncate = np.vectorize((lambda x: max(0.01, x)))
xy = zero_truncate(xy)
z = model.predict(xy).reshape([points, points])
minpoint = min([min(p) for p in z])
maxpoint = max([max(p) for p in z])
if (ax is None):
fig = plt.figure()
ax = plt.gca()
scaled_targets = [((target / max(targets)) * 200) for target in targets]
if plot_points:
ax.scatter(data_transformed[(:, 0)], [(1 * value) for value in data_transformed[(:, 1)]], c='k', cmap=plt.cm.bwr, marker='+', s=scaled_targets, linewidths=1.5)
ax.grid(b=False)
midpercent = ((midpoint - minpoint) / (maxpoint - minpoint))
centered_cmap = shiftedColorMap(plt.cm.bwr, midpoint=midpercent)
cmap = centered_cmap
if (midpercent > 1):
midpercent = 1
cmap = plt.cm.Blues_r
elif (midpercent < 0):
midpercent = 0
cmap = plt.cm.Reds
z = [row for row in reversed(z)]
im = ax.imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cmap)
ax.set_aspect('auto')
if (title is not None):
ax.set_title(title)
ax.set_xlabel('Principal Component 1')
ax.set_ylabel('Principal Component 2')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
if (zlabel is not None):
plt.colorbar(im, cax=cax, label=zlabel)
else:
plt.colorbar(im, cax=cax) | def plot_model(model, data, targets, midpoint=0.1, title=None, zlabel=None, ax=None, pcs=None, plot_points=True):
'Plots a heatmap representing a machine learning model and overlays training data on top.\n \n A heatmap of a machine learning model is generated to better understand how the model performs. \n In order to deal with higher dimentional feature spaces, principal component analysis is used\n to reduce the feature space to the two dimensions with the most variance. The data is then projected\n onto that plane and plotted over the model heatmap as a scatterplot.\n \n Args:\n model(sklearn.base.BaseEstimator): A scikit-learn style machine learning model.\n \n data(array_like): The feature space of the model training set. Dimensions are \n (num_samples,feature_cardinality).\n \n targets(array_like): An array like object containing the ground truth for the model predictions.\n Dimensions are (num_samples,1).\n \n midpoint(float,optional): Select the midpoint value on the color map. Default 0.1.\n \n title(str,optional): Title of the generated plot. Default None.\n \n zlabel(str,optional): Label of the colormap axis. Default None.\n \n ax(matplotlib.axes.Axes,optional): Predefined axis used to draw the plot. Default None.\n \n pcs(array_like,optional): Specify Principal components to use for projection. Default None.\n \n plot_points(bool,optional): Overlay features as a scatterplot over the heatmap. Default True.\n \n '
pca = PCA(n_components=2)
if (pcs is not None):
pca.components_ = pcs
data_transformed = pca.fit_transform(data)
xmin = np.amin(data_transformed[(:, 0)])
xmax = np.amax(data_transformed[(:, 0)])
ymin = np.amin(data_transformed[(:, 1)])
ymax = np.amax(data_transformed[(:, 1)])
scaling_factor = 0.5
xmin = (xmin - (((xmax - xmin) * scaling_factor) / 2))
xmax = (xmax + (((xmax - xmin) * scaling_factor) / 2))
ymin = (ymin - (((ymax - ymin) * scaling_factor) / 2))
ymax = (ymax + (((ymax - ymin) * scaling_factor) / 2))
points = 1000
x = np.linspace(xmin, xmax, num=points)
y = np.linspace(ymin, ymax, num=points)
(xv, yv) = np.meshgrid(x, y)
xyt = np.concatenate((xv.reshape([xv.size, 1]), yv.reshape([yv.size, 1])), axis=1)
xy = pca.inverse_transform(xyt)
zero_truncate = np.vectorize((lambda x: max(0.01, x)))
xy = zero_truncate(xy)
z = model.predict(xy).reshape([points, points])
minpoint = min([min(p) for p in z])
maxpoint = max([max(p) for p in z])
if (ax is None):
fig = plt.figure()
ax = plt.gca()
scaled_targets = [((target / max(targets)) * 200) for target in targets]
if plot_points:
ax.scatter(data_transformed[(:, 0)], [(1 * value) for value in data_transformed[(:, 1)]], c='k', cmap=plt.cm.bwr, marker='+', s=scaled_targets, linewidths=1.5)
ax.grid(b=False)
midpercent = ((midpoint - minpoint) / (maxpoint - minpoint))
centered_cmap = shiftedColorMap(plt.cm.bwr, midpoint=midpercent)
cmap = centered_cmap
if (midpercent > 1):
midpercent = 1
cmap = plt.cm.Blues_r
elif (midpercent < 0):
midpercent = 0
cmap = plt.cm.Reds
z = [row for row in reversed(z)]
im = ax.imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cmap)
ax.set_aspect('auto')
if (title is not None):
ax.set_title(title)
ax.set_xlabel('Principal Component 1')
ax.set_ylabel('Principal Component 2')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
if (zlabel is not None):
plt.colorbar(im, cax=cax, label=zlabel)
else:
plt.colorbar(im, cax=cax)<|docstring|>Plots a heatmap representing a machine learning model and overlays training data on top.
A heatmap of a machine learning model is generated to better understand how the model performs.
In order to deal with higher dimentional feature spaces, principal component analysis is used
to reduce the feature space to the two dimensions with the most variance. The data is then projected
onto that plane and plotted over the model heatmap as a scatterplot.
Args:
model(sklearn.base.BaseEstimator): A scikit-learn style machine learning model.
data(array_like): The feature space of the model training set. Dimensions are
(num_samples,feature_cardinality).
targets(array_like): An array like object containing the ground truth for the model predictions.
Dimensions are (num_samples,1).
midpoint(float,optional): Select the midpoint value on the color map. Default 0.1.
title(str,optional): Title of the generated plot. Default None.
zlabel(str,optional): Label of the colormap axis. Default None.
ax(matplotlib.axes.Axes,optional): Predefined axis used to draw the plot. Default None.
pcs(array_like,optional): Specify Principal components to use for projection. Default None.
plot_points(bool,optional): Overlay features as a scatterplot over the heatmap. Default True.<|endoftext|> |
5db09edb6f905c7382c430be94ab06bc4f2f82c5829902b223755ee61ae14c8e | def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'\n Function to offset the "center" of a colormap. Useful for\n data with a negative min and positive max and you want the\n middle of the colormap\'s dynamic range to be at zero\n\n Input\n -----\n cmap : The matplotlib colormap to be altered\n start : Offset from lowest point in the colormap\'s range.\n Defaults to 0.0 (no lower ofset). Should be between\n 0.0 and `midpoint`.\n midpoint : The new center of the colormap. Defaults to \n 0.5 (no shift). Should be between 0.0 and 1.0. In\n general, this should be 1 - vmax/(vmax + abs(vmin))\n For example if your data range from -15.0 to +5.0 and\n you want the center of the colormap at 0.0, `midpoint`\n should be set to 1 - 5/(5 + 15)) or 0.75\n stop : Offset from highets point in the colormap\'s range.\n Defaults to 1.0 (no upper ofset). Should be between\n `midpoint` and 1.0.\n '
cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}
reg_index = np.linspace(start, stop, 257)
shift_index = np.hstack([np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True)])
for (ri, si) in zip(reg_index, shift_index):
(r, g, b, a) = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap | Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0. | rectool/plot.py | shiftedColorMap | JBEI/Ajinomoto | 0 | python | def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'\n Function to offset the "center" of a colormap. Useful for\n data with a negative min and positive max and you want the\n middle of the colormap\'s dynamic range to be at zero\n\n Input\n -----\n cmap : The matplotlib colormap to be altered\n start : Offset from lowest point in the colormap\'s range.\n Defaults to 0.0 (no lower ofset). Should be between\n 0.0 and `midpoint`.\n midpoint : The new center of the colormap. Defaults to \n 0.5 (no shift). Should be between 0.0 and 1.0. In\n general, this should be 1 - vmax/(vmax + abs(vmin))\n For example if your data range from -15.0 to +5.0 and\n you want the center of the colormap at 0.0, `midpoint`\n should be set to 1 - 5/(5 + 15)) or 0.75\n stop : Offset from highets point in the colormap\'s range.\n Defaults to 1.0 (no upper ofset). Should be between\n `midpoint` and 1.0.\n '
cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}
reg_index = np.linspace(start, stop, 257)
shift_index = np.hstack([np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True)])
for (ri, si) in zip(reg_index, shift_index):
(r, g, b, a) = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap | def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'\n Function to offset the "center" of a colormap. Useful for\n data with a negative min and positive max and you want the\n middle of the colormap\'s dynamic range to be at zero\n\n Input\n -----\n cmap : The matplotlib colormap to be altered\n start : Offset from lowest point in the colormap\'s range.\n Defaults to 0.0 (no lower ofset). Should be between\n 0.0 and `midpoint`.\n midpoint : The new center of the colormap. Defaults to \n 0.5 (no shift). Should be between 0.0 and 1.0. In\n general, this should be 1 - vmax/(vmax + abs(vmin))\n For example if your data range from -15.0 to +5.0 and\n you want the center of the colormap at 0.0, `midpoint`\n should be set to 1 - 5/(5 + 15)) or 0.75\n stop : Offset from highets point in the colormap\'s range.\n Defaults to 1.0 (no upper ofset). Should be between\n `midpoint` and 1.0.\n '
cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}
reg_index = np.linspace(start, stop, 257)
shift_index = np.hstack([np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True)])
for (ri, si) in zip(reg_index, shift_index):
(r, g, b, a) = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap<|docstring|>Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.<|endoftext|> |
049bc02c03ac3267ff3fa28a6bf474019714633662869ee099b0789c7a3d4f3b | def test_error_3():
' This should work\n '
try:
connect_and_list('edison.nersc.gov', 'yadunand')
except BadHostKeyException as e:
print('Caught exception BadHostKeyException: ', e)
else:
assert False, 'Expected SSException, got: {0}'.format(e) | This should work | parsl/tests/integration/test_channels/test_ssh_errors.py | test_error_3 | nirandaperera/parsl | 323 | python | def test_error_3():
' \n '
try:
connect_and_list('edison.nersc.gov', 'yadunand')
except BadHostKeyException as e:
print('Caught exception BadHostKeyException: ', e)
else:
assert False, 'Expected SSException, got: {0}'.format(e) | def test_error_3():
' \n '
try:
connect_and_list('edison.nersc.gov', 'yadunand')
except BadHostKeyException as e:
print('Caught exception BadHostKeyException: ', e)
else:
assert False, 'Expected SSException, got: {0}'.format(e)<|docstring|>This should work<|endoftext|> |
5f8268a06fe42e7d7785a7a10dfa376a5ba5d14eeff1178b440084938ab57617 | def _init_decode_head(self, decode_head):
'Initialize ``decode_head``'
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes | Initialize ``decode_head`` | mmseg/models/segmentors/encoder_decoder.py | _init_decode_head | delldu/SegFormer | 0 | python | def _init_decode_head(self, decode_head):
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes | def _init_decode_head(self, decode_head):
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes<|docstring|>Initialize ``decode_head``<|endoftext|> |
8d721d005e5ded21f20dc3c05cfc41840764f6917cba674962022e8dbaf4946f | def init_weights(self, pretrained=None):
'Initialize the weights in backbone and heads.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n '
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
self.decode_head.init_weights() | Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None. | mmseg/models/segmentors/encoder_decoder.py | init_weights | delldu/SegFormer | 0 | python | def init_weights(self, pretrained=None):
'Initialize the weights in backbone and heads.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n '
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
self.decode_head.init_weights() | def init_weights(self, pretrained=None):
'Initialize the weights in backbone and heads.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n '
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
self.decode_head.init_weights()<|docstring|>Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.<|endoftext|> |
b2bc6b5eaac59c0950d23f850041f8a11273aacbe48a1f2efb81bfdeef3f92a9 | def inference(self, img, img_meta, rescale):
"Inference with slide/whole style.\n\n Args:\n img (Tensor): The input image of shape (N, 3, H, W).\n img_meta (dict): Image info dict where each dict has: 'img_shape',\n 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:Collect`.\n rescale (bool): Whether rescale back to original shape.\n\n Returns:\n Tensor: The output segmentation map.\n "
assert (self.test_cfg.mode in ['slide', 'whole'])
ori_shape = img_meta[0]['ori_shape']
assert all(((_['ori_shape'] == ori_shape) for _ in img_meta))
x = self.backbone(img)
seg_logit = self.decode_head.forward_test(x, img_meta, self.test_cfg)
if rescale:
seg_logit = resize(seg_logit, size=img_meta[0]['ori_shape'][:2], mode='bilinear', align_corners=False, warning=False)
output = F.softmax(seg_logit, dim=1)
return output | Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map. | mmseg/models/segmentors/encoder_decoder.py | inference | delldu/SegFormer | 0 | python | def inference(self, img, img_meta, rescale):
"Inference with slide/whole style.\n\n Args:\n img (Tensor): The input image of shape (N, 3, H, W).\n img_meta (dict): Image info dict where each dict has: 'img_shape',\n 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:Collect`.\n rescale (bool): Whether rescale back to original shape.\n\n Returns:\n Tensor: The output segmentation map.\n "
assert (self.test_cfg.mode in ['slide', 'whole'])
ori_shape = img_meta[0]['ori_shape']
assert all(((_['ori_shape'] == ori_shape) for _ in img_meta))
x = self.backbone(img)
seg_logit = self.decode_head.forward_test(x, img_meta, self.test_cfg)
if rescale:
seg_logit = resize(seg_logit, size=img_meta[0]['ori_shape'][:2], mode='bilinear', align_corners=False, warning=False)
output = F.softmax(seg_logit, dim=1)
return output | def inference(self, img, img_meta, rescale):
"Inference with slide/whole style.\n\n Args:\n img (Tensor): The input image of shape (N, 3, H, W).\n img_meta (dict): Image info dict where each dict has: 'img_shape',\n 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:Collect`.\n rescale (bool): Whether rescale back to original shape.\n\n Returns:\n Tensor: The output segmentation map.\n "
assert (self.test_cfg.mode in ['slide', 'whole'])
ori_shape = img_meta[0]['ori_shape']
assert all(((_['ori_shape'] == ori_shape) for _ in img_meta))
x = self.backbone(img)
seg_logit = self.decode_head.forward_test(x, img_meta, self.test_cfg)
if rescale:
seg_logit = resize(seg_logit, size=img_meta[0]['ori_shape'][:2], mode='bilinear', align_corners=False, warning=False)
output = F.softmax(seg_logit, dim=1)
return output<|docstring|>Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.<|endoftext|> |
ad8228af16e462e8fd8486b3a654fbde5bbb877f98958f7fa9ee2f46611e782d | def simple_test(self, img, img_meta, rescale=True):
'Simple test with single image.'
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
seg_pred = list(seg_pred)
return seg_pred | Simple test with single image. | mmseg/models/segmentors/encoder_decoder.py | simple_test | delldu/SegFormer | 0 | python | def simple_test(self, img, img_meta, rescale=True):
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
seg_pred = list(seg_pred)
return seg_pred | def simple_test(self, img, img_meta, rescale=True):
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
seg_pred = list(seg_pred)
return seg_pred<|docstring|>Simple test with single image.<|endoftext|> |
1ae9a8cb1730e0a9167c2f64c0cf144505cc2080180bb3d655a0de65476e89a5 | def open(self, comp_filepath, length_unit='DimMeter', angle_unit='DimDegree', study_type='Transient'):
'Open an existing JMAG file or a create new one if file does not exist.\n\n Launches the JMAG application by opening an already created file if or by creating a new file. Assigns JMAG\n application handles to object attributes for future operations. If intended file path does not exist and could\n not be created, an error is raised.\n\n Args:\n comp_filepath: Path of the JMAG file which is to be opened. If no such file exist, a new one is created.\n length_unit: String input of the eMach linear dimension unit to be employed to construct designs. JMAG tool\n only supports DimMeter\n angle_unit: String input of the eMach angular dimension unit to be employed to construct designs. JMAG tool\n only supports DimDegree\n study_type: Specifies type of study launched in JMAG. Commonly used types are Static2D, Transient2D,\n Frequency2D, Static, Transient, Frequency\n\n Returns:\n file_found: 1 if file exists; 0 if new file was created.\n '
self.default_length = length_unit
self.default_angle = angle_unit
self.study_type = study_type
file_found = 0
if (not os.path.isabs(comp_filepath)):
comp_filepath = ((os.path.abspath('.') + '\\') + comp_filepath)
(file_name_path, file_extension) = os.path.splitext(comp_filepath)
file_contents = file_name_path.split('\\')
if (file_extension != '.jproj'):
raise TypeError('Incorrect file extension')
file_path = ''
for i in range((len(file_contents) - 1)):
file_path = ((file_path + file_contents[i]) + '\\')
self.jd = self.jd_instance.GetNamedInstance(comp_filepath, 0)
self.set_visibility(self.visible)
if os.path.exists(comp_filepath):
file_found = 1
self.jd.Load(comp_filepath)
self.filepath = comp_filepath
else:
if os.path.exists(file_path):
self.filepath = comp_filepath
else:
try:
os.mkdir(file_path)
self.filepath = comp_filepath
except FileNotFoundError:
raise FileNotFoundError('Path was not found and could not be created')
self.jd.NewProject(self.filepath)
self.save_as(self.filepath)
self.view = self.jd.View()
self.jd.GetCurrentModel().RestoreCadLink(True)
self.geometry_editor = self.jd.CreateGeometryEditor(True)
self.doc = self.geometry_editor.GetDocument()
self.assembly = self.doc.GetAssembly()
return file_found | Open an existing JMAG file or a create new one if file does not exist.
Launches the JMAG application by opening an already created file if or by creating a new file. Assigns JMAG
application handles to object attributes for future operations. If intended file path does not exist and could
not be created, an error is raised.
Args:
comp_filepath: Path of the JMAG file which is to be opened. If no such file exist, a new one is created.
length_unit: String input of the eMach linear dimension unit to be employed to construct designs. JMAG tool
only supports DimMeter
angle_unit: String input of the eMach angular dimension unit to be employed to construct designs. JMAG tool
only supports DimDegree
study_type: Specifies type of study launched in JMAG. Commonly used types are Static2D, Transient2D,
Frequency2D, Static, Transient, Frequency
Returns:
file_found: 1 if file exists; 0 if new file was created. | mach_cad/tools/jmag/jmag.py | open | Severson-Group/MachEval | 6 | python | def open(self, comp_filepath, length_unit='DimMeter', angle_unit='DimDegree', study_type='Transient'):
'Open an existing JMAG file or a create new one if file does not exist.\n\n Launches the JMAG application by opening an already created file if or by creating a new file. Assigns JMAG\n application handles to object attributes for future operations. If intended file path does not exist and could\n not be created, an error is raised.\n\n Args:\n comp_filepath: Path of the JMAG file which is to be opened. If no such file exist, a new one is created.\n length_unit: String input of the eMach linear dimension unit to be employed to construct designs. JMAG tool\n only supports DimMeter\n angle_unit: String input of the eMach angular dimension unit to be employed to construct designs. JMAG tool\n only supports DimDegree\n study_type: Specifies type of study launched in JMAG. Commonly used types are Static2D, Transient2D,\n Frequency2D, Static, Transient, Frequency\n\n Returns:\n file_found: 1 if file exists; 0 if new file was created.\n '
self.default_length = length_unit
self.default_angle = angle_unit
self.study_type = study_type
file_found = 0
if (not os.path.isabs(comp_filepath)):
comp_filepath = ((os.path.abspath('.') + '\\') + comp_filepath)
(file_name_path, file_extension) = os.path.splitext(comp_filepath)
file_contents = file_name_path.split('\\')
if (file_extension != '.jproj'):
raise TypeError('Incorrect file extension')
file_path =
for i in range((len(file_contents) - 1)):
file_path = ((file_path + file_contents[i]) + '\\')
self.jd = self.jd_instance.GetNamedInstance(comp_filepath, 0)
self.set_visibility(self.visible)
if os.path.exists(comp_filepath):
file_found = 1
self.jd.Load(comp_filepath)
self.filepath = comp_filepath
else:
if os.path.exists(file_path):
self.filepath = comp_filepath
else:
try:
os.mkdir(file_path)
self.filepath = comp_filepath
except FileNotFoundError:
raise FileNotFoundError('Path was not found and could not be created')
self.jd.NewProject(self.filepath)
self.save_as(self.filepath)
self.view = self.jd.View()
self.jd.GetCurrentModel().RestoreCadLink(True)
self.geometry_editor = self.jd.CreateGeometryEditor(True)
self.doc = self.geometry_editor.GetDocument()
self.assembly = self.doc.GetAssembly()
return file_found | def open(self, comp_filepath, length_unit='DimMeter', angle_unit='DimDegree', study_type='Transient'):
'Open an existing JMAG file or a create new one if file does not exist.\n\n Launches the JMAG application by opening an already created file if or by creating a new file. Assigns JMAG\n application handles to object attributes for future operations. If intended file path does not exist and could\n not be created, an error is raised.\n\n Args:\n comp_filepath: Path of the JMAG file which is to be opened. If no such file exist, a new one is created.\n length_unit: String input of the eMach linear dimension unit to be employed to construct designs. JMAG tool\n only supports DimMeter\n angle_unit: String input of the eMach angular dimension unit to be employed to construct designs. JMAG tool\n only supports DimDegree\n study_type: Specifies type of study launched in JMAG. Commonly used types are Static2D, Transient2D,\n Frequency2D, Static, Transient, Frequency\n\n Returns:\n file_found: 1 if file exists; 0 if new file was created.\n '
self.default_length = length_unit
self.default_angle = angle_unit
self.study_type = study_type
file_found = 0
if (not os.path.isabs(comp_filepath)):
comp_filepath = ((os.path.abspath('.') + '\\') + comp_filepath)
(file_name_path, file_extension) = os.path.splitext(comp_filepath)
file_contents = file_name_path.split('\\')
if (file_extension != '.jproj'):
raise TypeError('Incorrect file extension')
file_path =
for i in range((len(file_contents) - 1)):
file_path = ((file_path + file_contents[i]) + '\\')
self.jd = self.jd_instance.GetNamedInstance(comp_filepath, 0)
self.set_visibility(self.visible)
if os.path.exists(comp_filepath):
file_found = 1
self.jd.Load(comp_filepath)
self.filepath = comp_filepath
else:
if os.path.exists(file_path):
self.filepath = comp_filepath
else:
try:
os.mkdir(file_path)
self.filepath = comp_filepath
except FileNotFoundError:
raise FileNotFoundError('Path was not found and could not be created')
self.jd.NewProject(self.filepath)
self.save_as(self.filepath)
self.view = self.jd.View()
self.jd.GetCurrentModel().RestoreCadLink(True)
self.geometry_editor = self.jd.CreateGeometryEditor(True)
self.doc = self.geometry_editor.GetDocument()
self.assembly = self.doc.GetAssembly()
return file_found<|docstring|>Open an existing JMAG file or a create new one if file does not exist.
Launches the JMAG application by opening an already created file if or by creating a new file. Assigns JMAG
application handles to object attributes for future operations. If intended file path does not exist and could
not be created, an error is raised.
Args:
comp_filepath: Path of the JMAG file which is to be opened. If no such file exist, a new one is created.
length_unit: String input of the eMach linear dimension unit to be employed to construct designs. JMAG tool
only supports DimMeter
angle_unit: String input of the eMach angular dimension unit to be employed to construct designs. JMAG tool
only supports DimDegree
study_type: Specifies type of study launched in JMAG. Commonly used types are Static2D, Transient2D,
Frequency2D, Static, Transient, Frequency
Returns:
file_found: 1 if file exists; 0 if new file was created.<|endoftext|> |
c35ff2a9d25998e7f3bbbb5767115964839d5438109cbc99c2d40226f10b615a | def save(self):
'Save JMAG designer file at previously defined path'
if (type(self.filepath) is str):
self.jd.SaveAs(self.filepath)
else:
raise AttributeError('Unable to save file. Use the save_as() function') | Save JMAG designer file at previously defined path | mach_cad/tools/jmag/jmag.py | save | Severson-Group/MachEval | 6 | python | def save(self):
if (type(self.filepath) is str):
self.jd.SaveAs(self.filepath)
else:
raise AttributeError('Unable to save file. Use the save_as() function') | def save(self):
if (type(self.filepath) is str):
self.jd.SaveAs(self.filepath)
else:
raise AttributeError('Unable to save file. Use the save_as() function')<|docstring|>Save JMAG designer file at previously defined path<|endoftext|> |
4ccbadba9429f6271ef0e3c6e119a1133d67c8713104104f9be3ca26c5ae69b7 | def save_as(self, filepath):
'Save JMAG designer file at defined path'
self.filepath = filepath
self.save() | Save JMAG designer file at defined path | mach_cad/tools/jmag/jmag.py | save_as | Severson-Group/MachEval | 6 | python | def save_as(self, filepath):
self.filepath = filepath
self.save() | def save_as(self, filepath):
self.filepath = filepath
self.save()<|docstring|>Save JMAG designer file at defined path<|endoftext|> |
b7b0008596b51009617022e5da75b72e366291fcaf74c61d94547d1805950856 | def close(self):
'Close JMAG designer file and all associated applications'
del self | Close JMAG designer file and all associated applications | mach_cad/tools/jmag/jmag.py | close | Severson-Group/MachEval | 6 | python | def close(self):
del self | def close(self):
del self<|docstring|>Close JMAG designer file and all associated applications<|endoftext|> |
90df6a4a4f0896e927a5b3e5d22528895c09beb48d732e1802fcbf73503cd733 | def set_visibility(self, visible):
'Set JMAG designer file visibility by passing True or False to visible'
self.visible = visible
if self.visible:
self.jd.Show()
else:
self.jd.Hide() | Set JMAG designer file visibility by passing True or False to visible | mach_cad/tools/jmag/jmag.py | set_visibility | Severson-Group/MachEval | 6 | python | def set_visibility(self, visible):
self.visible = visible
if self.visible:
self.jd.Show()
else:
self.jd.Hide() | def set_visibility(self, visible):
self.visible = visible
if self.visible:
self.jd.Show()
else:
self.jd.Hide()<|docstring|>Set JMAG designer file visibility by passing True or False to visible<|endoftext|> |
41eb023aecf1d748027afdcdc5072697f56c603de5757950f4a6f45a8a7b7af6 | def draw_line(self, startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw a line in JMAG Geometry Editor.\n\n Args:\n startxy: Start point of line. Should be of type Location2D defined with eMach DimLinear.\n endxy: End point of the. Should be of type Location2D defined with eMach DimLinear.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained upon drawing a line.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
line = self.sketch.CreateLine(start_x, start_y, end_x, end_y)
return TokenDraw(line, 0) | Draw a line in JMAG Geometry Editor.
Args:
startxy: Start point of line. Should be of type Location2D defined with eMach DimLinear.
endxy: End point of the. Should be of type Location2D defined with eMach DimLinear.
Returns:
TokenDraw: Wrapper object holding return values obtained upon drawing a line. | mach_cad/tools/jmag/jmag.py | draw_line | Severson-Group/MachEval | 6 | python | def draw_line(self, startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw a line in JMAG Geometry Editor.\n\n Args:\n startxy: Start point of line. Should be of type Location2D defined with eMach DimLinear.\n endxy: End point of the. Should be of type Location2D defined with eMach DimLinear.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained upon drawing a line.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
line = self.sketch.CreateLine(start_x, start_y, end_x, end_y)
return TokenDraw(line, 0) | def draw_line(self, startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw a line in JMAG Geometry Editor.\n\n Args:\n startxy: Start point of line. Should be of type Location2D defined with eMach DimLinear.\n endxy: End point of the. Should be of type Location2D defined with eMach DimLinear.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained upon drawing a line.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
line = self.sketch.CreateLine(start_x, start_y, end_x, end_y)
return TokenDraw(line, 0)<|docstring|>Draw a line in JMAG Geometry Editor.
Args:
startxy: Start point of line. Should be of type Location2D defined with eMach DimLinear.
endxy: End point of the. Should be of type Location2D defined with eMach DimLinear.
Returns:
TokenDraw: Wrapper object holding return values obtained upon drawing a line.<|endoftext|> |
bf39bc813e74542e0c01ce35fa796e7594fa450db6a86457142ba9bc41e665d4 | def draw_arc(self, centerxy: 'Location2D', startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw an arc in JMAG Geometry Editor.\n\n Args:\n centerxy: Centre point of arc. Should be of type Location2D defined with eMach Dimensions.\n startxy: Start point of arc. Should be of type Location2D defined with eMach Dimensions.\n endxy: End point of arc. Should be of type Location2D defined with eMach Dimensions.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained from tool upon drawing an arc.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
center_x = eval(self.default_length)(centerxy[0])
center_y = eval(self.default_length)(centerxy[1])
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
arc = self.sketch.CreateArc(center_x, center_y, start_x, start_y, end_x, end_y)
return TokenDraw(arc, 1) | Draw an arc in JMAG Geometry Editor.
Args:
centerxy: Centre point of arc. Should be of type Location2D defined with eMach Dimensions.
startxy: Start point of arc. Should be of type Location2D defined with eMach Dimensions.
endxy: End point of arc. Should be of type Location2D defined with eMach Dimensions.
Returns:
TokenDraw: Wrapper object holding return values obtained from tool upon drawing an arc. | mach_cad/tools/jmag/jmag.py | draw_arc | Severson-Group/MachEval | 6 | python | def draw_arc(self, centerxy: 'Location2D', startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw an arc in JMAG Geometry Editor.\n\n Args:\n centerxy: Centre point of arc. Should be of type Location2D defined with eMach Dimensions.\n startxy: Start point of arc. Should be of type Location2D defined with eMach Dimensions.\n endxy: End point of arc. Should be of type Location2D defined with eMach Dimensions.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained from tool upon drawing an arc.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
center_x = eval(self.default_length)(centerxy[0])
center_y = eval(self.default_length)(centerxy[1])
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
arc = self.sketch.CreateArc(center_x, center_y, start_x, start_y, end_x, end_y)
return TokenDraw(arc, 1) | def draw_arc(self, centerxy: 'Location2D', startxy: 'Location2D', endxy: 'Location2D') -> 'TokenDraw':
'Draw an arc in JMAG Geometry Editor.\n\n Args:\n centerxy: Centre point of arc. Should be of type Location2D defined with eMach Dimensions.\n startxy: Start point of arc. Should be of type Location2D defined with eMach Dimensions.\n endxy: End point of arc. Should be of type Location2D defined with eMach Dimensions.\n\n Returns:\n TokenDraw: Wrapper object holding return values obtained from tool upon drawing an arc.\n '
if (self.sketch is None):
self.sketch = self.create_sketch()
center_x = eval(self.default_length)(centerxy[0])
center_y = eval(self.default_length)(centerxy[1])
start_x = eval(self.default_length)(startxy[0])
start_y = eval(self.default_length)(startxy[1])
end_x = eval(self.default_length)(endxy[0])
end_y = eval(self.default_length)(endxy[1])
arc = self.sketch.CreateArc(center_x, center_y, start_x, start_y, end_x, end_y)
return TokenDraw(arc, 1)<|docstring|>Draw an arc in JMAG Geometry Editor.
Args:
centerxy: Centre point of arc. Should be of type Location2D defined with eMach Dimensions.
startxy: Start point of arc. Should be of type Location2D defined with eMach Dimensions.
endxy: End point of arc. Should be of type Location2D defined with eMach Dimensions.
Returns:
TokenDraw: Wrapper object holding return values obtained from tool upon drawing an arc.<|endoftext|> |
c76a72083a21f4d74bf6d6183a8c571bfd2e6c9d46dc41ae36c2e5f0aceea12c | def create_sketch(self):
'Create and open a new sketch in JMAG geometry editor'
ref1 = self.assembly.GetItem('XY Plane')
ref2 = self.doc.CreateReferenceFromItem(ref1)
sketch = self.assembly.CreateSketch(ref2)
sketch_name = 'sketch_drawing'
sketch.SetProperty('Name', sketch_name)
sketch.OpenSketch()
return sketch | Create and open a new sketch in JMAG geometry editor | mach_cad/tools/jmag/jmag.py | create_sketch | Severson-Group/MachEval | 6 | python | def create_sketch(self):
ref1 = self.assembly.GetItem('XY Plane')
ref2 = self.doc.CreateReferenceFromItem(ref1)
sketch = self.assembly.CreateSketch(ref2)
sketch_name = 'sketch_drawing'
sketch.SetProperty('Name', sketch_name)
sketch.OpenSketch()
return sketch | def create_sketch(self):
ref1 = self.assembly.GetItem('XY Plane')
ref2 = self.doc.CreateReferenceFromItem(ref1)
sketch = self.assembly.CreateSketch(ref2)
sketch_name = 'sketch_drawing'
sketch.SetProperty('Name', sketch_name)
sketch.OpenSketch()
return sketch<|docstring|>Create and open a new sketch in JMAG geometry editor<|endoftext|> |
f8f1f22252e1f6db5a84a180154f4a9b2f360a6187abf12a0932bba1d4d47566 | def create_part(self):
'Create a new part in JMAG geometry editor'
sketch_name = 'sketch_drawing'
self.sketch.OpenSketch()
ref1 = self.assembly.GetItem(sketch_name)
ref2 = self.doc.CreateReferenceFromItem(ref1)
self.assembly.MoveToPart(ref2)
part = self.assembly.GetItem(sketch_name)
self.sketch.CloseSketch()
return part | Create a new part in JMAG geometry editor | mach_cad/tools/jmag/jmag.py | create_part | Severson-Group/MachEval | 6 | python | def create_part(self):
sketch_name = 'sketch_drawing'
self.sketch.OpenSketch()
ref1 = self.assembly.GetItem(sketch_name)
ref2 = self.doc.CreateReferenceFromItem(ref1)
self.assembly.MoveToPart(ref2)
part = self.assembly.GetItem(sketch_name)
self.sketch.CloseSketch()
return part | def create_part(self):
sketch_name = 'sketch_drawing'
self.sketch.OpenSketch()
ref1 = self.assembly.GetItem(sketch_name)
ref2 = self.doc.CreateReferenceFromItem(ref1)
self.assembly.MoveToPart(ref2)
part = self.assembly.GetItem(sketch_name)
self.sketch.CloseSketch()
return part<|docstring|>Create a new part in JMAG geometry editor<|endoftext|> |
7f088ea947e5cc14ac60ccb7ae8fd846f29476ad7c3fdba0087ebcf7b1500112 | def prepare_section(self, cs_token: 'CrossSectToken') -> TokenMake:
' Creates JMAG geometry region using lines and arcs.\n '
self.geometry_editor.View().Xy()
self.doc.GetSelection().Clear()
for i in range(len(cs_token.token)):
self.doc.GetSelection().Add(self.sketch.GetItem(cs_token.token[i].draw_token.GetName()))
id = self.sketch.NumItems()
self.sketch.CreateRegions()
id2 = self.sketch.NumItems()
visItem = 1
itemType = 64
innerCoord1 = cs_token.inner_coord[0]
innerCoord2 = cs_token.inner_coord[1]
innerCoord1 = eval(self.default_length)(innerCoord1)
innerCoord2 = eval(self.default_length)(innerCoord2)
self.geometry_editor.View().SelectAtCoordinateDlg(innerCoord1, innerCoord2, 0, visItem, itemType)
region = self.doc.GetSelection().Item(0)
regionName = region.GetName()
regionList = ['Region']
for idx in range(1, (id2 - id)):
regionList.append(('Region.' + str((idx + 1))))
for idx in range((id2 - id)):
if (regionList[idx] != regionName):
self.doc.GetSelection().Clear()
self.doc.GetSelection().Add(self.sketch.GetItem(regionList[idx]))
self.doc.GetSelection().Delete()
self.sketch.CloseSketch()
return region | Creates JMAG geometry region using lines and arcs. | mach_cad/tools/jmag/jmag.py | prepare_section | Severson-Group/MachEval | 6 | python | def prepare_section(self, cs_token: 'CrossSectToken') -> TokenMake:
' \n '
self.geometry_editor.View().Xy()
self.doc.GetSelection().Clear()
for i in range(len(cs_token.token)):
self.doc.GetSelection().Add(self.sketch.GetItem(cs_token.token[i].draw_token.GetName()))
id = self.sketch.NumItems()
self.sketch.CreateRegions()
id2 = self.sketch.NumItems()
visItem = 1
itemType = 64
innerCoord1 = cs_token.inner_coord[0]
innerCoord2 = cs_token.inner_coord[1]
innerCoord1 = eval(self.default_length)(innerCoord1)
innerCoord2 = eval(self.default_length)(innerCoord2)
self.geometry_editor.View().SelectAtCoordinateDlg(innerCoord1, innerCoord2, 0, visItem, itemType)
region = self.doc.GetSelection().Item(0)
regionName = region.GetName()
regionList = ['Region']
for idx in range(1, (id2 - id)):
regionList.append(('Region.' + str((idx + 1))))
for idx in range((id2 - id)):
if (regionList[idx] != regionName):
self.doc.GetSelection().Clear()
self.doc.GetSelection().Add(self.sketch.GetItem(regionList[idx]))
self.doc.GetSelection().Delete()
self.sketch.CloseSketch()
return region | def prepare_section(self, cs_token: 'CrossSectToken') -> TokenMake:
' \n '
self.geometry_editor.View().Xy()
self.doc.GetSelection().Clear()
for i in range(len(cs_token.token)):
self.doc.GetSelection().Add(self.sketch.GetItem(cs_token.token[i].draw_token.GetName()))
id = self.sketch.NumItems()
self.sketch.CreateRegions()
id2 = self.sketch.NumItems()
visItem = 1
itemType = 64
innerCoord1 = cs_token.inner_coord[0]
innerCoord2 = cs_token.inner_coord[1]
innerCoord1 = eval(self.default_length)(innerCoord1)
innerCoord2 = eval(self.default_length)(innerCoord2)
self.geometry_editor.View().SelectAtCoordinateDlg(innerCoord1, innerCoord2, 0, visItem, itemType)
region = self.doc.GetSelection().Item(0)
regionName = region.GetName()
regionList = ['Region']
for idx in range(1, (id2 - id)):
regionList.append(('Region.' + str((idx + 1))))
for idx in range((id2 - id)):
if (regionList[idx] != regionName):
self.doc.GetSelection().Clear()
self.doc.GetSelection().Add(self.sketch.GetItem(regionList[idx]))
self.doc.GetSelection().Delete()
self.sketch.CloseSketch()
return region<|docstring|>Creates JMAG geometry region using lines and arcs.<|endoftext|> |
39bcbc5a554e564141e74e9e0ac4f453066f75bb0203fe60861c7961c49bbed8 | def create_study(self, study_name, study_type, model) -> any:
'Creates a JMAG study\n '
self.study_type = study_type
num_studies = self.jd.NumStudies()
if (num_studies == 0):
study = model.CreateStudy(study_type, study_name)
else:
for i in range((num_studies - 2)):
model.DeleteStudy(i)
study = self.jd.GetCurrentStudy()
study.SetName(study_name)
return study | Creates a JMAG study | mach_cad/tools/jmag/jmag.py | create_study | Severson-Group/MachEval | 6 | python | def create_study(self, study_name, study_type, model) -> any:
'\n '
self.study_type = study_type
num_studies = self.jd.NumStudies()
if (num_studies == 0):
study = model.CreateStudy(study_type, study_name)
else:
for i in range((num_studies - 2)):
model.DeleteStudy(i)
study = self.jd.GetCurrentStudy()
study.SetName(study_name)
return study | def create_study(self, study_name, study_type, model) -> any:
'\n '
self.study_type = study_type
num_studies = self.jd.NumStudies()
if (num_studies == 0):
study = model.CreateStudy(study_type, study_name)
else:
for i in range((num_studies - 2)):
model.DeleteStudy(i)
study = self.jd.GetCurrentStudy()
study.SetName(study_name)
return study<|docstring|>Creates a JMAG study<|endoftext|> |
2eb7759b03036cf37822603f355607134f2a1e73260422a940d8108127bd783b | def extrude(self, name, material: str, depth: float, token=None) -> any:
' Extrudes a cross-section to a 3D component\n\n Args:\n name: name of the newly extruded component.\n depth: Depth of extrusion. Should be defined with eMach Dimensions.\n material : Material applied to the extruded component.\n\n Returns:\n Function will return the handle to the new extruded part\n '
depth = eval(self.default_length)(depth)
self.part = self.create_part()
ref1 = self.sketch
extrude_part = self.part.CreateExtrudeSolid(ref1, depth)
self.part.SetProperty('Name', name)
self.part.SetProperty('Color', material.color)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.sketch = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return extrude_part | Extrudes a cross-section to a 3D component
Args:
name: name of the newly extruded component.
depth: Depth of extrusion. Should be defined with eMach Dimensions.
material : Material applied to the extruded component.
Returns:
Function will return the handle to the new extruded part | mach_cad/tools/jmag/jmag.py | extrude | Severson-Group/MachEval | 6 | python | def extrude(self, name, material: str, depth: float, token=None) -> any:
' Extrudes a cross-section to a 3D component\n\n Args:\n name: name of the newly extruded component.\n depth: Depth of extrusion. Should be defined with eMach Dimensions.\n material : Material applied to the extruded component.\n\n Returns:\n Function will return the handle to the new extruded part\n '
depth = eval(self.default_length)(depth)
self.part = self.create_part()
ref1 = self.sketch
extrude_part = self.part.CreateExtrudeSolid(ref1, depth)
self.part.SetProperty('Name', name)
self.part.SetProperty('Color', material.color)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.sketch = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return extrude_part | def extrude(self, name, material: str, depth: float, token=None) -> any:
' Extrudes a cross-section to a 3D component\n\n Args:\n name: name of the newly extruded component.\n depth: Depth of extrusion. Should be defined with eMach Dimensions.\n material : Material applied to the extruded component.\n\n Returns:\n Function will return the handle to the new extruded part\n '
depth = eval(self.default_length)(depth)
self.part = self.create_part()
ref1 = self.sketch
extrude_part = self.part.CreateExtrudeSolid(ref1, depth)
self.part.SetProperty('Name', name)
self.part.SetProperty('Color', material.color)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.sketch = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return extrude_part<|docstring|>Extrudes a cross-section to a 3D component
Args:
name: name of the newly extruded component.
depth: Depth of extrusion. Should be defined with eMach Dimensions.
material : Material applied to the extruded component.
Returns:
Function will return the handle to the new extruded part<|endoftext|> |
5e16351351a32561d6b302d7d25ad7ff24de2a506862ff7f9e0eed62ee59eeae | def revolve(self, name, material: str, center, axis, angle: float) -> any:
' Revolves cross-section along an arc\n\n Args:\n name: Name of the newly revolved component.\n material: Material applied to the component.\n center: center point of rotation. Should be of type Location2d defined with eMach Dimensions.\n axis: Axis of rotation. Should be of type Location2d defined with eMach Dimensions.\n Specifying negative value reverses the axis of rotation.\n\n Returns:\n This function will return the handle of the newly revolved part.\n '
center = eval(self.default_length, center)
axis = eval(self.default_length, axis)
angle = eval(self.default_angle, angle)
self.part = self.create_part()
ref1 = self.sketch
revolve_part = self.part.CreateRevolveSolid(ref1)
self.part.GetItem('Revolve').setProperty('SpecificRatio', 1)
self.part.GetItem('Revolve').setProperty('AxisType', '1')
self.part.GetItem('Revolve').setProperty('AxisPosX', center[0])
self.part.GetItem('Revolve').setProperty('AxisPosY', center[1])
self.part.GetItem('Revolve').setProperty('AxisVecX', axis[0])
self.part.GetItem('Revolve').setProperty('AxisVecY', axis[1])
self.part.GetItem('Revolve').setProperty('AxisVecZ', 0)
self.part.GetItem('Revolve').setProperty('Angle', angle)
self.part.SetProperty('Name', name)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return revolve_part | Revolves cross-section along an arc
Args:
name: Name of the newly revolved component.
material: Material applied to the component.
center: center point of rotation. Should be of type Location2d defined with eMach Dimensions.
axis: Axis of rotation. Should be of type Location2d defined with eMach Dimensions.
Specifying negative value reverses the axis of rotation.
Returns:
This function will return the handle of the newly revolved part. | mach_cad/tools/jmag/jmag.py | revolve | Severson-Group/MachEval | 6 | python | def revolve(self, name, material: str, center, axis, angle: float) -> any:
' Revolves cross-section along an arc\n\n Args:\n name: Name of the newly revolved component.\n material: Material applied to the component.\n center: center point of rotation. Should be of type Location2d defined with eMach Dimensions.\n axis: Axis of rotation. Should be of type Location2d defined with eMach Dimensions.\n Specifying negative value reverses the axis of rotation.\n\n Returns:\n This function will return the handle of the newly revolved part.\n '
center = eval(self.default_length, center)
axis = eval(self.default_length, axis)
angle = eval(self.default_angle, angle)
self.part = self.create_part()
ref1 = self.sketch
revolve_part = self.part.CreateRevolveSolid(ref1)
self.part.GetItem('Revolve').setProperty('SpecificRatio', 1)
self.part.GetItem('Revolve').setProperty('AxisType', '1')
self.part.GetItem('Revolve').setProperty('AxisPosX', center[0])
self.part.GetItem('Revolve').setProperty('AxisPosY', center[1])
self.part.GetItem('Revolve').setProperty('AxisVecX', axis[0])
self.part.GetItem('Revolve').setProperty('AxisVecY', axis[1])
self.part.GetItem('Revolve').setProperty('AxisVecZ', 0)
self.part.GetItem('Revolve').setProperty('Angle', angle)
self.part.SetProperty('Name', name)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return revolve_part | def revolve(self, name, material: str, center, axis, angle: float) -> any:
' Revolves cross-section along an arc\n\n Args:\n name: Name of the newly revolved component.\n material: Material applied to the component.\n center: center point of rotation. Should be of type Location2d defined with eMach Dimensions.\n axis: Axis of rotation. Should be of type Location2d defined with eMach Dimensions.\n Specifying negative value reverses the axis of rotation.\n\n Returns:\n This function will return the handle of the newly revolved part.\n '
center = eval(self.default_length, center)
axis = eval(self.default_length, axis)
angle = eval(self.default_angle, angle)
self.part = self.create_part()
ref1 = self.sketch
revolve_part = self.part.CreateRevolveSolid(ref1)
self.part.GetItem('Revolve').setProperty('SpecificRatio', 1)
self.part.GetItem('Revolve').setProperty('AxisType', '1')
self.part.GetItem('Revolve').setProperty('AxisPosX', center[0])
self.part.GetItem('Revolve').setProperty('AxisPosY', center[1])
self.part.GetItem('Revolve').setProperty('AxisVecX', axis[0])
self.part.GetItem('Revolve').setProperty('AxisVecY', axis[1])
self.part.GetItem('Revolve').setProperty('AxisVecZ', 0)
self.part.GetItem('Revolve').setProperty('Angle', angle)
self.part.SetProperty('Name', name)
sketch_name = (name + '_sketch')
self.sketch.SetProperty('Name', sketch_name)
self.part = None
self.doc.SaveModel(True)
model_name = (name + '_model')
self.model = self.create_model(model_name)
study_name = (name + '_study')
self.study = self.create_study(study_name, self.study_type, self.model)
self.set_default_length_unit(self.default_length)
self.set_default_angle_unit(self.default_angle)
self.study.SetMaterialByName(name, material.name)
return revolve_part<|docstring|>Revolves cross-section along an arc
Args:
name: Name of the newly revolved component.
material: Material applied to the component.
center: center point of rotation. Should be of type Location2d defined with eMach Dimensions.
axis: Axis of rotation. Should be of type Location2d defined with eMach Dimensions.
Specifying negative value reverses the axis of rotation.
Returns:
This function will return the handle of the newly revolved part.<|endoftext|> |
f6b5a5b3c33923241f50c080f54bf13bd66aae610689177e8a253cdce175b673 | def set_default_length_unit(self, user_unit):
'Set the default length unit in JMAG. Only DimMeter supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimMeter'):
self.default_length = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported length unit') | Set the default length unit in JMAG. Only DimMeter supported.
Args:
user_unit: String representing the unit the user wishes to set as default.
Raises:
TypeError: Incorrect dimension passed | mach_cad/tools/jmag/jmag.py | set_default_length_unit | Severson-Group/MachEval | 6 | python | def set_default_length_unit(self, user_unit):
'Set the default length unit in JMAG. Only DimMeter supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimMeter'):
self.default_length = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported length unit') | def set_default_length_unit(self, user_unit):
'Set the default length unit in JMAG. Only DimMeter supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimMeter'):
self.default_length = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported length unit')<|docstring|>Set the default length unit in JMAG. Only DimMeter supported.
Args:
user_unit: String representing the unit the user wishes to set as default.
Raises:
TypeError: Incorrect dimension passed<|endoftext|> |
81830dd6bb17ca667f6e08298d0d513210098438b3baaa232ec4961f777b907a | def set_default_angle_unit(self, user_unit):
'Set the default angular unit in JMAG. Only DimDegree supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimDegree'):
self.default_angle = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported angle unit') | Set the default angular unit in JMAG. Only DimDegree supported.
Args:
user_unit: String representing the unit the user wishes to set as default.
Raises:
TypeError: Incorrect dimension passed | mach_cad/tools/jmag/jmag.py | set_default_angle_unit | Severson-Group/MachEval | 6 | python | def set_default_angle_unit(self, user_unit):
'Set the default angular unit in JMAG. Only DimDegree supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimDegree'):
self.default_angle = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported angle unit') | def set_default_angle_unit(self, user_unit):
'Set the default angular unit in JMAG. Only DimDegree supported.\n\n Args:\n user_unit: String representing the unit the user wishes to set as default.\n\n Raises:\n TypeError: Incorrect dimension passed\n '
if (user_unit == 'DimDegree'):
self.default_angle = user_unit
self.model.SetUnitCollection('SI_units')
else:
raise Exception('Unsupported angle unit')<|docstring|>Set the default angular unit in JMAG. Only DimDegree supported.
Args:
user_unit: String representing the unit the user wishes to set as default.
Raises:
TypeError: Incorrect dimension passed<|endoftext|> |
d931416c1a416f6ac58e3bd18aea9b47d76147ac220c871070f35dc83f46ebcf | def save(self, filename):
'\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n '
if isinstance(filename, str):
with open(filename, 'wb') as f:
f.write(self.value)
else:
filename.write(self.value) | Saves the uploaded FileInput data to a file or BytesIO object.
Arguments
---------
filename (str): File path or file-like object | panel/widgets/input.py | save | gnowland/panel | 1,130 | python | def save(self, filename):
'\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n '
if isinstance(filename, str):
with open(filename, 'wb') as f:
f.write(self.value)
else:
filename.write(self.value) | def save(self, filename):
'\n Saves the uploaded FileInput data to a file or BytesIO object.\n\n Arguments\n ---------\n filename (str): File path or file-like object\n '
if isinstance(filename, str):
with open(filename, 'wb') as f:
f.write(self.value)
else:
filename.write(self.value)<|docstring|>Saves the uploaded FileInput data to a file or BytesIO object.
Arguments
---------
filename (str): File path or file-like object<|endoftext|> |
a9697c11e482f1cea59956b6b0faa5a02499bb33850780a9e3e0034481686bde | def __init__(self, diveFolder='./'):
'Initiate camera and lock resources'
PiCamera.__init__(self)
self.diveFolder = diveFolder
self.deployed = False
self.last_access = 0
self.stream = None
self.thread = None
self.last_frame = None | Initiate camera and lock resources | deepi.py | __init__ | rshom/DEEPi | 1 | python | def __init__(self, diveFolder='./'):
PiCamera.__init__(self)
self.diveFolder = diveFolder
self.deployed = False
self.last_access = 0
self.stream = None
self.thread = None
self.last_frame = None | def __init__(self, diveFolder='./'):
PiCamera.__init__(self)
self.diveFolder = diveFolder
self.deployed = False
self.last_access = 0
self.stream = None
self.thread = None
self.last_frame = None<|docstring|>Initiate camera and lock resources<|endoftext|> |
5441b0af3e428a9c4be136ed816a28bc5e99818c358c677b6391359de9bdc885 | def close(self):
'Release all resources'
PiCamera.close(self) | Release all resources | deepi.py | close | rshom/DEEPi | 1 | python | def close(self):
PiCamera.close(self) | def close(self):
PiCamera.close(self)<|docstring|>Release all resources<|endoftext|> |
5271f769ff0c01445f1be831f444fed2c20787b280ebb4d0005b42c3852b95d8 | def update_frame(self):
'Continuous capture that saves the latest frame in memory.\n Any live stream applications will access this updating frame\n '
self.stream = io.BytesIO()
print('starting capture')
for _ in PiCamera.capture_continuous(self, self.stream, 'jpeg', use_video_port=True):
self.stream.seek(0)
self.last_frame = self.stream.read()
self.stream.seek(0)
self.stream.truncate()
if ((time.time() - self.last_access) > 10):
print((time.time() - self.last_access))
self.stream = None
break | Continuous capture that saves the latest frame in memory.
Any live stream applications will access this updating frame | deepi.py | update_frame | rshom/DEEPi | 1 | python | def update_frame(self):
'Continuous capture that saves the latest frame in memory.\n Any live stream applications will access this updating frame\n '
self.stream = io.BytesIO()
print('starting capture')
for _ in PiCamera.capture_continuous(self, self.stream, 'jpeg', use_video_port=True):
self.stream.seek(0)
self.last_frame = self.stream.read()
self.stream.seek(0)
self.stream.truncate()
if ((time.time() - self.last_access) > 10):
print((time.time() - self.last_access))
self.stream = None
break | def update_frame(self):
'Continuous capture that saves the latest frame in memory.\n Any live stream applications will access this updating frame\n '
self.stream = io.BytesIO()
print('starting capture')
for _ in PiCamera.capture_continuous(self, self.stream, 'jpeg', use_video_port=True):
self.stream.seek(0)
self.last_frame = self.stream.read()
self.stream.seek(0)
self.stream.truncate()
if ((time.time() - self.last_access) > 10):
print((time.time() - self.last_access))
self.stream = None
break<|docstring|>Continuous capture that saves the latest frame in memory.
Any live stream applications will access this updating frame<|endoftext|> |
892a5cbb6c276ec502b6d470a1bea81cd5e4b05c2405c12985875c9f168e9e67 | def start_stream(self):
'Start and stop the threaded process for updating the live stream frame'
self.last_access = time.time()
if (self.thread is None):
self.thread = threading.Thread(target=self.update_frame)
self.thread.start()
while (self.last_frame is None):
time.sleep(0) | Start and stop the threaded process for updating the live stream frame | deepi.py | start_stream | rshom/DEEPi | 1 | python | def start_stream(self):
self.last_access = time.time()
if (self.thread is None):
self.thread = threading.Thread(target=self.update_frame)
self.thread.start()
while (self.last_frame is None):
time.sleep(0) | def start_stream(self):
self.last_access = time.time()
if (self.thread is None):
self.thread = threading.Thread(target=self.update_frame)
self.thread.start()
while (self.last_frame is None):
time.sleep(0)<|docstring|>Start and stop the threaded process for updating the live stream frame<|endoftext|> |
bed23fce88f7e792bd8189d31bc3e8b99f93798f3ed0ebfb5f5da0c2d429c486 | def __enter__(self):
'Called whenever instance is opened using a with statement'
return self | Called whenever instance is opened using a with statement | deepi.py | __enter__ | rshom/DEEPi | 1 | python | def __enter__(self):
return self | def __enter__(self):
return self<|docstring|>Called whenever instance is opened using a with statement<|endoftext|> |
6aadb9e8bf9edea01ff903d8d903572e98560c98c7ad25e31a7de1ef906692ba | def __exit__(self, exc_type, exc_val, exc_tb):
'Close out anything necessary'
self.close() | Close out anything necessary | deepi.py | __exit__ | rshom/DEEPi | 1 | python | def __exit__(self, exc_type, exc_val, exc_tb):
self.close() | def __exit__(self, exc_type, exc_val, exc_tb):
self.close()<|docstring|>Close out anything necessary<|endoftext|> |
d5086c97c9adbc2c579753004248ecd55ba9a479bc2bd4d9f80ab32c73374fd7 | def what_are_we_looking_for(self, fct_name, verbose=False):
'returns the files we are looking for, for a functor in a module'
tb_name = self.get_tb_name()
if verbose:
print(("for the '%s' module and functor '%s' \nthe following files are looked for," % (tb_name, fct_name)))
print('from nt2 root:')
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
r += os.path.normpath(os.path.join(tb_name, f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(tb_name, f))))
else:
r += os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))))
return r | returns the files we are looking for, for a functor in a module | script/python/lib/nt2_basics/nt2_tb_props.py | what_are_we_looking_for | timblechmann/nt2 | 2 | python | def what_are_we_looking_for(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
if verbose:
print(("for the '%s' module and functor '%s' \nthe following files are looked for," % (tb_name, fct_name)))
print('from nt2 root:')
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
r += os.path.normpath(os.path.join(tb_name, f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(tb_name, f))))
else:
r += os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))))
return r | def what_are_we_looking_for(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
if verbose:
print(("for the '%s' module and functor '%s' \nthe following files are looked for," % (tb_name, fct_name)))
print('from nt2 root:')
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
r += os.path.normpath(os.path.join(tb_name, f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(tb_name, f))))
else:
r += os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))
if verbose:
print((' %s' % os.path.normpath(os.path.join(self.get_tb_from_md_path(), f))))
return r<|docstring|>returns the files we are looking for, for a functor in a module<|endoftext|> |
7dd01966d290d1f11f2cd2796848bdb3bb433958d91aefa39ab91c10eeca266f | def who_is_here(self, fct_name, verbose=False):
'returns the files already present for a functor in a module'
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files exist:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if exist(os.path.join(self.get_md_path(), f)):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif exist(os.path.join(self.get_tb_path(), f)):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are defined" % (tb_name, fct_name))) | returns the files already present for a functor in a module | script/python/lib/nt2_basics/nt2_tb_props.py | who_is_here | timblechmann/nt2 | 2 | python | def who_is_here(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files exist:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if exist(os.path.join(self.get_md_path(), f)):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif exist(os.path.join(self.get_tb_path(), f)):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are defined" % (tb_name, fct_name))) | def who_is_here(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files exist:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if exist(os.path.join(self.get_md_path(), f)):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif exist(os.path.join(self.get_tb_path(), f)):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are defined" % (tb_name, fct_name)))<|docstring|>returns the files already present for a functor in a module<|endoftext|> |
8a4192190ca66ea06f8367c008649ef0440c166208ec8f466735b582f2d6d8d2 | def who_is_missing(self, fct_name, verbose=False):
'returns what files are potentially missing for a functor in a module'
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files are not defined:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if (not exist(os.path.join(self.get_md_path(), f))):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif (not exist(os.path.join(self.get_tb_path(), f))):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are missing" % (tb_name, fct_name))) | returns what files are potentially missing for a functor in a module | script/python/lib/nt2_basics/nt2_tb_props.py | who_is_missing | timblechmann/nt2 | 2 | python | def who_is_missing(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files are not defined:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if (not exist(os.path.join(self.get_md_path(), f))):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif (not exist(os.path.join(self.get_tb_path(), f))):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are missing" % (tb_name, fct_name))) | def who_is_missing(self, fct_name, verbose=False):
tb_name = self.get_tb_name()
head = False
mes = ("for the '%s' module and functor '%s' \nthe following files are not defined:" % (tb_name, fct_name))
r = []
for f in self.get_rel_tb_fcts_files(tb_name, fct_name):
if re.match('doc|bench|unit', f):
if (not exist(os.path.join(self.get_md_path(), f))):
r += os.path.abspath(os.path.join(self.get_md_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_md_path(), f))))
elif (not exist(os.path.join(self.get_tb_path(), f))):
r += os.path.abspath(os.path.join(self.get_tb_path(), f))
if verbose:
head = self.__print_cond(head, mes)
print((' %s' % os.path.abspath(os.path.join(self.get_tb_path(), f))))
if ((not head) and verbose):
print(("for the '%s' module and functor '%s', no files are missing" % (tb_name, fct_name)))<|docstring|>returns what files are potentially missing for a functor in a module<|endoftext|> |
be46511750ba5c2d82e31a7972ec65738d14afa7384f12ac5b742d7646075318 | @abstractmethod
def load(self) -> None:
'\n Initialize the recognizer assets if needed.\n\n (e.g. machine learning models)\n ' | Initialize the recognizer assets if needed.
(e.g. machine learning models) | presidio-analyzer/presidio_analyzer/entity_recognizer.py | load | omri374/presidio | 68 | python | @abstractmethod
def load(self) -> None:
'\n Initialize the recognizer assets if needed.\n\n (e.g. machine learning models)\n ' | @abstractmethod
def load(self) -> None:
'\n Initialize the recognizer assets if needed.\n\n (e.g. machine learning models)\n '<|docstring|>Initialize the recognizer assets if needed.
(e.g. machine learning models)<|endoftext|> |
9a614dc2c993103b445166340debb30a5adbe561f374962eeea5ee08e038cc1d | @abstractmethod
def analyze(self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts) -> List[RecognizerResult]:
'\n Analyze text to identify entities.\n\n :param text: The text to be analyzed\n :param entities: The list of entities this recognizer is able to detect\n :param nlp_artifacts: A group of attributes which are the result of\n an NLP process over the input text.\n :return: List of results detected by this recognizer.\n '
return None | Analyze text to identify entities.
:param text: The text to be analyzed
:param entities: The list of entities this recognizer is able to detect
:param nlp_artifacts: A group of attributes which are the result of
an NLP process over the input text.
:return: List of results detected by this recognizer. | presidio-analyzer/presidio_analyzer/entity_recognizer.py | analyze | omri374/presidio | 68 | python | @abstractmethod
def analyze(self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts) -> List[RecognizerResult]:
'\n Analyze text to identify entities.\n\n :param text: The text to be analyzed\n :param entities: The list of entities this recognizer is able to detect\n :param nlp_artifacts: A group of attributes which are the result of\n an NLP process over the input text.\n :return: List of results detected by this recognizer.\n '
return None | @abstractmethod
def analyze(self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts) -> List[RecognizerResult]:
'\n Analyze text to identify entities.\n\n :param text: The text to be analyzed\n :param entities: The list of entities this recognizer is able to detect\n :param nlp_artifacts: A group of attributes which are the result of\n an NLP process over the input text.\n :return: List of results detected by this recognizer.\n '
return None<|docstring|>Analyze text to identify entities.
:param text: The text to be analyzed
:param entities: The list of entities this recognizer is able to detect
:param nlp_artifacts: A group of attributes which are the result of
an NLP process over the input text.
:return: List of results detected by this recognizer.<|endoftext|> |
c08e2950d90d722ba79955712fd49f2978e14347d64c2008a090b66f6dea8b01 | def enhance_using_context(self, text: str, raw_recognizer_results: List[RecognizerResult], other_raw_recognizer_results: List[RecognizerResult], nlp_artifacts: NlpArtifacts, context: Optional[List[str]]=None) -> List[RecognizerResult]:
"Enhance confidence score using context of the entity.\n\n Override this method in derived class in case a custom logic\n is needed, otherwise return value will be equal to\n raw_results.\n\n in case a result score is boosted, derived class need to update\n result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]\n\n :param text: The actual text that was analyzed\n :param raw_recognizer_results: This recognizer's results, to be updated\n based on recognizer specific context.\n :param other_raw_recognizer_results: Other recognizer results matched in\n the given text to allow related entity context enhancement\n :param nlp_artifacts: The nlp artifacts contains elements\n such as lemmatized tokens for better\n accuracy of the context enhancement process\n :param context: list of context words\n "
return raw_recognizer_results | Enhance confidence score using context of the entity.
Override this method in derived class in case a custom logic
is needed, otherwise return value will be equal to
raw_results.
in case a result score is boosted, derived class need to update
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
:param text: The actual text that was analyzed
:param raw_recognizer_results: This recognizer's results, to be updated
based on recognizer specific context.
:param other_raw_recognizer_results: Other recognizer results matched in
the given text to allow related entity context enhancement
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param context: list of context words | presidio-analyzer/presidio_analyzer/entity_recognizer.py | enhance_using_context | omri374/presidio | 68 | python | def enhance_using_context(self, text: str, raw_recognizer_results: List[RecognizerResult], other_raw_recognizer_results: List[RecognizerResult], nlp_artifacts: NlpArtifacts, context: Optional[List[str]]=None) -> List[RecognizerResult]:
"Enhance confidence score using context of the entity.\n\n Override this method in derived class in case a custom logic\n is needed, otherwise return value will be equal to\n raw_results.\n\n in case a result score is boosted, derived class need to update\n result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]\n\n :param text: The actual text that was analyzed\n :param raw_recognizer_results: This recognizer's results, to be updated\n based on recognizer specific context.\n :param other_raw_recognizer_results: Other recognizer results matched in\n the given text to allow related entity context enhancement\n :param nlp_artifacts: The nlp artifacts contains elements\n such as lemmatized tokens for better\n accuracy of the context enhancement process\n :param context: list of context words\n "
return raw_recognizer_results | def enhance_using_context(self, text: str, raw_recognizer_results: List[RecognizerResult], other_raw_recognizer_results: List[RecognizerResult], nlp_artifacts: NlpArtifacts, context: Optional[List[str]]=None) -> List[RecognizerResult]:
"Enhance confidence score using context of the entity.\n\n Override this method in derived class in case a custom logic\n is needed, otherwise return value will be equal to\n raw_results.\n\n in case a result score is boosted, derived class need to update\n result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]\n\n :param text: The actual text that was analyzed\n :param raw_recognizer_results: This recognizer's results, to be updated\n based on recognizer specific context.\n :param other_raw_recognizer_results: Other recognizer results matched in\n the given text to allow related entity context enhancement\n :param nlp_artifacts: The nlp artifacts contains elements\n such as lemmatized tokens for better\n accuracy of the context enhancement process\n :param context: list of context words\n "
return raw_recognizer_results<|docstring|>Enhance confidence score using context of the entity.
Override this method in derived class in case a custom logic
is needed, otherwise return value will be equal to
raw_results.
in case a result score is boosted, derived class need to update
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
:param text: The actual text that was analyzed
:param raw_recognizer_results: This recognizer's results, to be updated
based on recognizer specific context.
:param other_raw_recognizer_results: Other recognizer results matched in
the given text to allow related entity context enhancement
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param context: list of context words<|endoftext|> |
92ed49b6317e14ee7756244668249ddcedb4ea4c94a354c75aef49eae6512f97 | def get_supported_entities(self) -> List[str]:
'\n Return the list of entities this recognizer can identify.\n\n :return: A list of the supported entities by this recognizer\n '
return self.supported_entities | Return the list of entities this recognizer can identify.
:return: A list of the supported entities by this recognizer | presidio-analyzer/presidio_analyzer/entity_recognizer.py | get_supported_entities | omri374/presidio | 68 | python | def get_supported_entities(self) -> List[str]:
'\n Return the list of entities this recognizer can identify.\n\n :return: A list of the supported entities by this recognizer\n '
return self.supported_entities | def get_supported_entities(self) -> List[str]:
'\n Return the list of entities this recognizer can identify.\n\n :return: A list of the supported entities by this recognizer\n '
return self.supported_entities<|docstring|>Return the list of entities this recognizer can identify.
:return: A list of the supported entities by this recognizer<|endoftext|> |
1ec9ecb9aafbacc8315a76913195f17bdccd1c042f68244b7c16cffd44a5f60c | def get_supported_language(self) -> str:
'\n Return the language this recognizer can support.\n\n :return: A list of the supported language by this recognizer\n '
return self.supported_language | Return the language this recognizer can support.
:return: A list of the supported language by this recognizer | presidio-analyzer/presidio_analyzer/entity_recognizer.py | get_supported_language | omri374/presidio | 68 | python | def get_supported_language(self) -> str:
'\n Return the language this recognizer can support.\n\n :return: A list of the supported language by this recognizer\n '
return self.supported_language | def get_supported_language(self) -> str:
'\n Return the language this recognizer can support.\n\n :return: A list of the supported language by this recognizer\n '
return self.supported_language<|docstring|>Return the language this recognizer can support.
:return: A list of the supported language by this recognizer<|endoftext|> |
a59c7c8a64d7e34cc945692e80257469523250c7ef14e4dc57f7b5e2ef9a0b07 | def get_version(self) -> str:
'\n Return the version of this recognizer.\n\n :return: The current version of this recognizer\n '
return self.version | Return the version of this recognizer.
:return: The current version of this recognizer | presidio-analyzer/presidio_analyzer/entity_recognizer.py | get_version | omri374/presidio | 68 | python | def get_version(self) -> str:
'\n Return the version of this recognizer.\n\n :return: The current version of this recognizer\n '
return self.version | def get_version(self) -> str:
'\n Return the version of this recognizer.\n\n :return: The current version of this recognizer\n '
return self.version<|docstring|>Return the version of this recognizer.
:return: The current version of this recognizer<|endoftext|> |
975adb362d135184e60fd77156661631747b91eedb87ee70f3b5b5fa3174a457 | def to_dict(self) -> Dict:
'\n Serialize self to dictionary.\n\n :return: a dictionary\n '
return_dict = {'supported_entities': self.supported_entities, 'supported_language': self.supported_language, 'name': self.name, 'version': self.version}
return return_dict | Serialize self to dictionary.
:return: a dictionary | presidio-analyzer/presidio_analyzer/entity_recognizer.py | to_dict | omri374/presidio | 68 | python | def to_dict(self) -> Dict:
'\n Serialize self to dictionary.\n\n :return: a dictionary\n '
return_dict = {'supported_entities': self.supported_entities, 'supported_language': self.supported_language, 'name': self.name, 'version': self.version}
return return_dict | def to_dict(self) -> Dict:
'\n Serialize self to dictionary.\n\n :return: a dictionary\n '
return_dict = {'supported_entities': self.supported_entities, 'supported_language': self.supported_language, 'name': self.name, 'version': self.version}
return return_dict<|docstring|>Serialize self to dictionary.
:return: a dictionary<|endoftext|> |
c7c7ff7961145e4e871e4be5c49e1fd796240e39bf84407817fcec547faae74f | @classmethod
def from_dict(cls, entity_recognizer_dict: Dict) -> 'EntityRecognizer':
'\n Create EntityRecognizer from a dict input.\n\n :param entity_recognizer_dict: Dict containing keys and values for instantiation\n '
return cls(**entity_recognizer_dict) | Create EntityRecognizer from a dict input.
:param entity_recognizer_dict: Dict containing keys and values for instantiation | presidio-analyzer/presidio_analyzer/entity_recognizer.py | from_dict | omri374/presidio | 68 | python | @classmethod
def from_dict(cls, entity_recognizer_dict: Dict) -> 'EntityRecognizer':
'\n Create EntityRecognizer from a dict input.\n\n :param entity_recognizer_dict: Dict containing keys and values for instantiation\n '
return cls(**entity_recognizer_dict) | @classmethod
def from_dict(cls, entity_recognizer_dict: Dict) -> 'EntityRecognizer':
'\n Create EntityRecognizer from a dict input.\n\n :param entity_recognizer_dict: Dict containing keys and values for instantiation\n '
return cls(**entity_recognizer_dict)<|docstring|>Create EntityRecognizer from a dict input.
:param entity_recognizer_dict: Dict containing keys and values for instantiation<|endoftext|> |
42a03f972c3fcdb4cdc91f8b016edbda9450cd3488b33a9de69dd69a107db2a1 | @staticmethod
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
'\n Remove duplicate results.\n\n Remove duplicates in case the two results\n have identical start and ends and types.\n :param results: List[RecognizerResult]\n :return: List[RecognizerResult]\n '
results = list(set(results))
results = sorted(results, key=(lambda x: ((- x.score), x.start, (- (x.end - x.start)))))
filtered_results = []
for result in results:
if (result.score == 0):
continue
to_keep = (result not in filtered_results)
if to_keep:
for filtered in filtered_results:
if (result.contained_in(filtered) and (result.entity_type == filtered.entity_type)):
to_keep = False
break
if to_keep:
filtered_results.append(result)
return filtered_results | Remove duplicate results.
Remove duplicates in case the two results
have identical start and ends and types.
:param results: List[RecognizerResult]
:return: List[RecognizerResult] | presidio-analyzer/presidio_analyzer/entity_recognizer.py | remove_duplicates | omri374/presidio | 68 | python | @staticmethod
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
'\n Remove duplicate results.\n\n Remove duplicates in case the two results\n have identical start and ends and types.\n :param results: List[RecognizerResult]\n :return: List[RecognizerResult]\n '
results = list(set(results))
results = sorted(results, key=(lambda x: ((- x.score), x.start, (- (x.end - x.start)))))
filtered_results = []
for result in results:
if (result.score == 0):
continue
to_keep = (result not in filtered_results)
if to_keep:
for filtered in filtered_results:
if (result.contained_in(filtered) and (result.entity_type == filtered.entity_type)):
to_keep = False
break
if to_keep:
filtered_results.append(result)
return filtered_results | @staticmethod
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
'\n Remove duplicate results.\n\n Remove duplicates in case the two results\n have identical start and ends and types.\n :param results: List[RecognizerResult]\n :return: List[RecognizerResult]\n '
results = list(set(results))
results = sorted(results, key=(lambda x: ((- x.score), x.start, (- (x.end - x.start)))))
filtered_results = []
for result in results:
if (result.score == 0):
continue
to_keep = (result not in filtered_results)
if to_keep:
for filtered in filtered_results:
if (result.contained_in(filtered) and (result.entity_type == filtered.entity_type)):
to_keep = False
break
if to_keep:
filtered_results.append(result)
return filtered_results<|docstring|>Remove duplicate results.
Remove duplicates in case the two results
have identical start and ends and types.
:param results: List[RecognizerResult]
:return: List[RecognizerResult]<|endoftext|> |
0b9896a106156e62dd8edebb55b21a48b81bd977e9a181f2a78898f2ae3df322 | def __init__(self, coords):
'\n Initializes a Simplex from vertex coordinates.\n\n Args:\n coords ([[float]]): Coords of the vertices of the simplex. E.g.,\n [[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].\n '
self._coords = np.array(coords)
(self.simplex_dim, self.space_dim) = self._coords.shape
self.origin = self._coords[(- 1)]
if (self.simplex_dim == (self.space_dim + 1)):
self.T = (self._coords[:(- 1)] - self.origin)
self.T_inv = np.linalg.inv(self.T) | Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10]. | pyhull/simplex.py | __init__ | BerkeleyAutomation/pyhull | 69 | python | def __init__(self, coords):
'\n Initializes a Simplex from vertex coordinates.\n\n Args:\n coords ([[float]]): Coords of the vertices of the simplex. E.g.,\n [[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].\n '
self._coords = np.array(coords)
(self.simplex_dim, self.space_dim) = self._coords.shape
self.origin = self._coords[(- 1)]
if (self.simplex_dim == (self.space_dim + 1)):
self.T = (self._coords[:(- 1)] - self.origin)
self.T_inv = np.linalg.inv(self.T) | def __init__(self, coords):
'\n Initializes a Simplex from vertex coordinates.\n\n Args:\n coords ([[float]]): Coords of the vertices of the simplex. E.g.,\n [[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].\n '
self._coords = np.array(coords)
(self.simplex_dim, self.space_dim) = self._coords.shape
self.origin = self._coords[(- 1)]
if (self.simplex_dim == (self.space_dim + 1)):
self.T = (self._coords[:(- 1)] - self.origin)
self.T_inv = np.linalg.inv(self.T)<|docstring|>Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].<|endoftext|> |
e180c078537f261a9676e92623518412f42ac9436b64f51aa93851530456a5e8 | @property
def volume(self):
'\n Volume of the simplex.\n '
return (abs(np.linalg.det(self.T)) / math.factorial(self.space_dim)) | Volume of the simplex. | pyhull/simplex.py | volume | BerkeleyAutomation/pyhull | 69 | python | @property
def volume(self):
'\n \n '
return (abs(np.linalg.det(self.T)) / math.factorial(self.space_dim)) | @property
def volume(self):
'\n \n '
return (abs(np.linalg.det(self.T)) / math.factorial(self.space_dim))<|docstring|>Volume of the simplex.<|endoftext|> |
41af687863c2c2002e67ad356d5d16fd2efe85a3ef88b5b523ee92ff189148cb | def in_simplex(self, point, tolerance=1e-08):
'\n Checks if a point is in the simplex using the standard barycentric\n coordinate system algorithm.\n\n Taking an arbitrary vertex as an origin, we compute the basis for the\n simplex from this origin by subtracting all other vertices from the\n origin. We then project the point into this coordinate system and\n determine the linear decomposition coefficients in this coordinate\n system. If the coeffs satisfy that all coeffs >= 0, the composition\n is in the facet.\n\n Args:\n point ([float]): Point to test\n tolerance (float): Tolerance to test if point is in simplex.\n '
return (self.bary_coords(point) >= (- tolerance)).all() | Checks if a point is in the simplex using the standard barycentric
coordinate system algorithm.
Taking an arbitrary vertex as an origin, we compute the basis for the
simplex from this origin by subtracting all other vertices from the
origin. We then project the point into this coordinate system and
determine the linear decomposition coefficients in this coordinate
system. If the coeffs satisfy that all coeffs >= 0, the composition
is in the facet.
Args:
point ([float]): Point to test
tolerance (float): Tolerance to test if point is in simplex. | pyhull/simplex.py | in_simplex | BerkeleyAutomation/pyhull | 69 | python | def in_simplex(self, point, tolerance=1e-08):
'\n Checks if a point is in the simplex using the standard barycentric\n coordinate system algorithm.\n\n Taking an arbitrary vertex as an origin, we compute the basis for the\n simplex from this origin by subtracting all other vertices from the\n origin. We then project the point into this coordinate system and\n determine the linear decomposition coefficients in this coordinate\n system. If the coeffs satisfy that all coeffs >= 0, the composition\n is in the facet.\n\n Args:\n point ([float]): Point to test\n tolerance (float): Tolerance to test if point is in simplex.\n '
return (self.bary_coords(point) >= (- tolerance)).all() | def in_simplex(self, point, tolerance=1e-08):
'\n Checks if a point is in the simplex using the standard barycentric\n coordinate system algorithm.\n\n Taking an arbitrary vertex as an origin, we compute the basis for the\n simplex from this origin by subtracting all other vertices from the\n origin. We then project the point into this coordinate system and\n determine the linear decomposition coefficients in this coordinate\n system. If the coeffs satisfy that all coeffs >= 0, the composition\n is in the facet.\n\n Args:\n point ([float]): Point to test\n tolerance (float): Tolerance to test if point is in simplex.\n '
return (self.bary_coords(point) >= (- tolerance)).all()<|docstring|>Checks if a point is in the simplex using the standard barycentric
coordinate system algorithm.
Taking an arbitrary vertex as an origin, we compute the basis for the
simplex from this origin by subtracting all other vertices from the
origin. We then project the point into this coordinate system and
determine the linear decomposition coefficients in this coordinate
system. If the coeffs satisfy that all coeffs >= 0, the composition
is in the facet.
Args:
point ([float]): Point to test
tolerance (float): Tolerance to test if point is in simplex.<|endoftext|> |
8d6e3fdb85f0d460a1582e506f4fdac88d7f05414fcaee9c2993705fc4a4927c | @property
def coords(self):
'\n Returns a copy of the vertex coordinates in the simplex.\n '
return self._coords.copy() | Returns a copy of the vertex coordinates in the simplex. | pyhull/simplex.py | coords | BerkeleyAutomation/pyhull | 69 | python | @property
def coords(self):
'\n \n '
return self._coords.copy() | @property
def coords(self):
'\n \n '
return self._coords.copy()<|docstring|>Returns a copy of the vertex coordinates in the simplex.<|endoftext|> |
1659419c2b2378db66e79fef1437b8d06ebc7a7cdde939f1749201866a7a9d53 | def runTest(self):
'This function will update trigger under table node.'
trigger_response = triggers_utils.verify_trigger(self.server, self.db_name, self.trigger_name)
if (not trigger_response):
raise Exception('Could not find the trigger to delete.')
data = {'id': self.trigger_id, 'description': 'This is test comment.'}
response = self.tester.put('{0}{1}/{2}/{3}/{4}/{5}/{6}'.format(self.url, utils.SERVER_GROUP, self.server_id, self.db_id, self.schema_id, self.table_id, self.trigger_id), data=json.dumps(data), follow_redirects=True)
self.assertEquals(response.status_code, 200) | This function will update trigger under table node. | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/tests/test_triggers_put.py | runTest | jhkuang11/UniTrade | 0 | python | def runTest(self):
trigger_response = triggers_utils.verify_trigger(self.server, self.db_name, self.trigger_name)
if (not trigger_response):
raise Exception('Could not find the trigger to delete.')
data = {'id': self.trigger_id, 'description': 'This is test comment.'}
response = self.tester.put('{0}{1}/{2}/{3}/{4}/{5}/{6}'.format(self.url, utils.SERVER_GROUP, self.server_id, self.db_id, self.schema_id, self.table_id, self.trigger_id), data=json.dumps(data), follow_redirects=True)
self.assertEquals(response.status_code, 200) | def runTest(self):
trigger_response = triggers_utils.verify_trigger(self.server, self.db_name, self.trigger_name)
if (not trigger_response):
raise Exception('Could not find the trigger to delete.')
data = {'id': self.trigger_id, 'description': 'This is test comment.'}
response = self.tester.put('{0}{1}/{2}/{3}/{4}/{5}/{6}'.format(self.url, utils.SERVER_GROUP, self.server_id, self.db_id, self.schema_id, self.table_id, self.trigger_id), data=json.dumps(data), follow_redirects=True)
self.assertEquals(response.status_code, 200)<|docstring|>This function will update trigger under table node.<|endoftext|> |
e98f83ae33364980bff39ad228bde5f2ca7f4e580b5c863054dfcef5f2843f22 | def get_horizontal_rotation(self):
'if self.object_type == "{item}":\n return float(self._object_data[0][1][1])\n elif self.object_type == "{teki}":\n return float(self._object_data[2])\n elif self.object_type == "{pelt}":\n return float(self._object_data[0][1][1])\n else:\n return None'
return self._horizontal_rotation | if self.object_type == "{item}":
return float(self._object_data[0][1][1])
elif self.object_type == "{teki}":
return float(self._object_data[2])
elif self.object_type == "{pelt}":
return float(self._object_data[0][1][1])
else:
return None | pikmingen.py | get_horizontal_rotation | RenolY2/pikmin-tools | 4 | python | def get_horizontal_rotation(self):
'if self.object_type == "{item}":\n return float(self._object_data[0][1][1])\n elif self.object_type == "{teki}":\n return float(self._object_data[2])\n elif self.object_type == "{pelt}":\n return float(self._object_data[0][1][1])\n else:\n return None'
return self._horizontal_rotation | def get_horizontal_rotation(self):
'if self.object_type == "{item}":\n return float(self._object_data[0][1][1])\n elif self.object_type == "{teki}":\n return float(self._object_data[2])\n elif self.object_type == "{pelt}":\n return float(self._object_data[0][1][1])\n else:\n return None'
return self._horizontal_rotation<|docstring|>if self.object_type == "{item}":
return float(self._object_data[0][1][1])
elif self.object_type == "{teki}":
return float(self._object_data[2])
elif self.object_type == "{pelt}":
return float(self._object_data[0][1][1])
else:
return None<|endoftext|> |
7cd7a16cd574e8cb8127fc567043cc7c0cce5b2c620832dd361190abf74455ae | def __getitem__(self, key):
'\n Return the phase series object for the scenario.\n\n Args:\n key (str): scenario name\n\n Raises:\n ScenarioNotFoundError: the scenario is not registered\n\n Returns:\n covsirphy.PhaseSeries\n '
if (key in self._tracker_dict):
return self._tracker_dict[key].series
raise ScenarioNotFoundError(key) | Return the phase series object for the scenario.
Args:
key (str): scenario name
Raises:
ScenarioNotFoundError: the scenario is not registered
Returns:
covsirphy.PhaseSeries | covsirphy/analysis/scenario.py | __getitem__ | fadelrahman31/modified-covsirphhy | 0 | python | def __getitem__(self, key):
'\n Return the phase series object for the scenario.\n\n Args:\n key (str): scenario name\n\n Raises:\n ScenarioNotFoundError: the scenario is not registered\n\n Returns:\n covsirphy.PhaseSeries\n '
if (key in self._tracker_dict):
return self._tracker_dict[key].series
raise ScenarioNotFoundError(key) | def __getitem__(self, key):
'\n Return the phase series object for the scenario.\n\n Args:\n key (str): scenario name\n\n Raises:\n ScenarioNotFoundError: the scenario is not registered\n\n Returns:\n covsirphy.PhaseSeries\n '
if (key in self._tracker_dict):
return self._tracker_dict[key].series
raise ScenarioNotFoundError(key)<|docstring|>Return the phase series object for the scenario.
Args:
key (str): scenario name
Raises:
ScenarioNotFoundError: the scenario is not registered
Returns:
covsirphy.PhaseSeries<|endoftext|> |
30eabbb1a8146af9eb75289a055c2864a15529a124d773738b4c3444339d5fca | def __setitem__(self, key, value):
'\n Register a phase series.\n\n Args:\n key (str): scenario name\n value (covsirphy.PhaseSeries): phase series object\n '
self._tracker_dict[key] = ParamTracker(self._data.records(extras=False), value, area=self.area, tau=self.tau) | Register a phase series.
Args:
key (str): scenario name
value (covsirphy.PhaseSeries): phase series object | covsirphy/analysis/scenario.py | __setitem__ | fadelrahman31/modified-covsirphhy | 0 | python | def __setitem__(self, key, value):
'\n Register a phase series.\n\n Args:\n key (str): scenario name\n value (covsirphy.PhaseSeries): phase series object\n '
self._tracker_dict[key] = ParamTracker(self._data.records(extras=False), value, area=self.area, tau=self.tau) | def __setitem__(self, key, value):
'\n Register a phase series.\n\n Args:\n key (str): scenario name\n value (covsirphy.PhaseSeries): phase series object\n '
self._tracker_dict[key] = ParamTracker(self._data.records(extras=False), value, area=self.area, tau=self.tau)<|docstring|>Register a phase series.
Args:
key (str): scenario name
value (covsirphy.PhaseSeries): phase series object<|endoftext|> |
64167e64088221ad3abea142a83294346ff96a99706d78eda35e54070c89a0b4 | @property
def first_date(self):
'\n str: the first date of the records\n '
return self._data.first_date | str: the first date of the records | covsirphy/analysis/scenario.py | first_date | fadelrahman31/modified-covsirphhy | 0 | python | @property
def first_date(self):
'\n \n '
return self._data.first_date | @property
def first_date(self):
'\n \n '
return self._data.first_date<|docstring|>str: the first date of the records<|endoftext|> |
e0a8c1714c6073ce2e1a5fc244104f6f5c442396f1d377c6e93201fad6928a81 | @property
def last_date(self):
'\n str: the last date of the records\n '
return self._data.last_date | str: the last date of the records | covsirphy/analysis/scenario.py | last_date | fadelrahman31/modified-covsirphhy | 0 | python | @property
def last_date(self):
'\n \n '
return self._data.last_date | @property
def last_date(self):
'\n \n '
return self._data.last_date<|docstring|>str: the last date of the records<|endoftext|> |
a0047146083913f4a3dc273f887996d2551259b0557d44a06eb02c87d85f4cc6 | @property
def today(self):
'\n str: reference date to determine whether a phase is a past phase or a future phase\n '
return self._data.today | str: reference date to determine whether a phase is a past phase or a future phase | covsirphy/analysis/scenario.py | today | fadelrahman31/modified-covsirphhy | 0 | python | @property
def today(self):
'\n \n '
return self._data.today | @property
def today(self):
'\n \n '
return self._data.today<|docstring|>str: reference date to determine whether a phase is a past phase or a future phase<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.