body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
c81d6fdd2b6abeaf19c7c6a77b9c0dfb74bdb5697f71fec317eac3027b676dba
|
def forward(self, outputs, targets):
" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n "
outputs_without_aux = {k: v for (k, v) in outputs.items() if (k != 'aux_outputs')}
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum((len(t['labels']) for t in targets))
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp((num_boxes / get_world_size()), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
if ('aux_outputs' in outputs):
for (i, aux_outputs) in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if (loss == 'masks'):
continue
kwargs = {}
if (loss == 'labels'):
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {(k + f'_{i}'): v for (k, v) in l_dict.items()}
losses.update(l_dict)
return losses
|
This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
|
models/vistr.py
|
forward
|
rbli-john/VisTR
| 646
|
python
|
def forward(self, outputs, targets):
" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n "
outputs_without_aux = {k: v for (k, v) in outputs.items() if (k != 'aux_outputs')}
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum((len(t['labels']) for t in targets))
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp((num_boxes / get_world_size()), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
if ('aux_outputs' in outputs):
for (i, aux_outputs) in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if (loss == 'masks'):
continue
kwargs = {}
if (loss == 'labels'):
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {(k + f'_{i}'): v for (k, v) in l_dict.items()}
losses.update(l_dict)
return losses
|
def forward(self, outputs, targets):
" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n "
outputs_without_aux = {k: v for (k, v) in outputs.items() if (k != 'aux_outputs')}
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum((len(t['labels']) for t in targets))
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp((num_boxes / get_world_size()), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
if ('aux_outputs' in outputs):
for (i, aux_outputs) in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if (loss == 'masks'):
continue
kwargs = {}
if (loss == 'labels'):
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {(k + f'_{i}'): v for (k, v) in l_dict.items()}
losses.update(l_dict)
return losses<|docstring|>This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc<|endoftext|>
|
989acddf3b588628bf564f31ae1a23408eb0b244afa014e9df0d341ae41de672
|
@torch.no_grad()
def forward(self, outputs, target_sizes):
' Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n '
(out_logits, out_bbox) = (outputs['pred_logits'], outputs['pred_boxes'])
assert (len(out_logits) == len(target_sizes))
assert (target_sizes.shape[1] == 2)
prob = F.softmax(out_logits, (- 1))
(scores, labels) = prob[(..., :(- 1))].max((- 1))
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
(img_h, img_w) = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = (boxes * scale_fct[(:, None, :)])
results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)]
return results
|
Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
|
models/vistr.py
|
forward
|
rbli-john/VisTR
| 646
|
python
|
@torch.no_grad()
def forward(self, outputs, target_sizes):
' Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n '
(out_logits, out_bbox) = (outputs['pred_logits'], outputs['pred_boxes'])
assert (len(out_logits) == len(target_sizes))
assert (target_sizes.shape[1] == 2)
prob = F.softmax(out_logits, (- 1))
(scores, labels) = prob[(..., :(- 1))].max((- 1))
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
(img_h, img_w) = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = (boxes * scale_fct[(:, None, :)])
results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)]
return results
|
@torch.no_grad()
def forward(self, outputs, target_sizes):
' Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n '
(out_logits, out_bbox) = (outputs['pred_logits'], outputs['pred_boxes'])
assert (len(out_logits) == len(target_sizes))
assert (target_sizes.shape[1] == 2)
prob = F.softmax(out_logits, (- 1))
(scores, labels) = prob[(..., :(- 1))].max((- 1))
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
(img_h, img_w) = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = (boxes * scale_fct[(:, None, :)])
results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)]
return results<|docstring|>Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding<|endoftext|>
|
350085eb32d92332492807bb6d7a5fc46b27299bdd884bd440d1d61f8b1885bf
|
def json_to_obj(obj_type, json_data):
"Converts json data to an object with a given object type\n\n :param obj_type: converted object's type that is determined dynamically\n :param json_data: json string or json object\n :return: object\n "
def dic2obj(x):
if isinstance(x, dict):
return type(obj_type, (), {k: dic2obj(v) for (k, v) in six.iteritems(x)})
else:
return x
if isinstance(json_data, six.string_types):
json_data = jsonutils.loads(json_data)
return dic2obj(json_data)
|
Converts json data to an object with a given object type
:param obj_type: converted object's type that is determined dynamically
:param json_data: json string or json object
:return: object
|
infoblox_client/utils.py
|
json_to_obj
|
bondar-pavel/infoblox-client
| 1
|
python
|
def json_to_obj(obj_type, json_data):
"Converts json data to an object with a given object type\n\n :param obj_type: converted object's type that is determined dynamically\n :param json_data: json string or json object\n :return: object\n "
def dic2obj(x):
if isinstance(x, dict):
return type(obj_type, (), {k: dic2obj(v) for (k, v) in six.iteritems(x)})
else:
return x
if isinstance(json_data, six.string_types):
json_data = jsonutils.loads(json_data)
return dic2obj(json_data)
|
def json_to_obj(obj_type, json_data):
"Converts json data to an object with a given object type\n\n :param obj_type: converted object's type that is determined dynamically\n :param json_data: json string or json object\n :return: object\n "
def dic2obj(x):
if isinstance(x, dict):
return type(obj_type, (), {k: dic2obj(v) for (k, v) in six.iteritems(x)})
else:
return x
if isinstance(json_data, six.string_types):
json_data = jsonutils.loads(json_data)
return dic2obj(json_data)<|docstring|>Converts json data to an object with a given object type
:param obj_type: converted object's type that is determined dynamically
:param json_data: json string or json object
:return: object<|endoftext|>
|
a666233a5d5d4f9279d67c421f9992f4bb59513586304f0d9188eed0bca9a4bd
|
def db_records_to_json(records):
'Converts db records to json.\n\n alchemy_encoder is needed for date and numeric(x,y) fields since\n they will turn into datetime.date and decimal.Decimal types.\n '
def alchemy_encoder(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
rows = []
for record in records:
if isinstance(record, tuple):
merge = dict()
for table in record:
merge.update(dict(table))
rows.append(merge)
else:
rows.append(dict(record))
json_str = jsonutils.dumps(rows, alchemy_encoder)
return jsonutils.loads(json_str)
|
Converts db records to json.
alchemy_encoder is needed for date and numeric(x,y) fields since
they will turn into datetime.date and decimal.Decimal types.
|
infoblox_client/utils.py
|
db_records_to_json
|
bondar-pavel/infoblox-client
| 1
|
python
|
def db_records_to_json(records):
'Converts db records to json.\n\n alchemy_encoder is needed for date and numeric(x,y) fields since\n they will turn into datetime.date and decimal.Decimal types.\n '
def alchemy_encoder(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
rows = []
for record in records:
if isinstance(record, tuple):
merge = dict()
for table in record:
merge.update(dict(table))
rows.append(merge)
else:
rows.append(dict(record))
json_str = jsonutils.dumps(rows, alchemy_encoder)
return jsonutils.loads(json_str)
|
def db_records_to_json(records):
'Converts db records to json.\n\n alchemy_encoder is needed for date and numeric(x,y) fields since\n they will turn into datetime.date and decimal.Decimal types.\n '
def alchemy_encoder(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
rows = []
for record in records:
if isinstance(record, tuple):
merge = dict()
for table in record:
merge.update(dict(table))
rows.append(merge)
else:
rows.append(dict(record))
json_str = jsonutils.dumps(rows, alchemy_encoder)
return jsonutils.loads(json_str)<|docstring|>Converts db records to json.
alchemy_encoder is needed for date and numeric(x,y) fields since
they will turn into datetime.date and decimal.Decimal types.<|endoftext|>
|
54ab4beaa27886b9444fb9ce0455bfb4f821270f5c088fd9ea128b8acd7ba3e8
|
def generate_duid(mac):
'DUID is consisted of 10 hex numbers.\n\n 0x00 + 3 random hex + mac with 6 hex\n '
valid = (mac and isinstance(mac, six.string_types))
if (not valid):
ValueError('Invalid argument was passed')
duid = [0, random.randint(0, 127), random.randint(0, 255), random.randint(0, 255)]
return ((':'.join(map((lambda x: ('%02x' % x)), duid)) + ':') + mac)
|
DUID is consisted of 10 hex numbers.
0x00 + 3 random hex + mac with 6 hex
|
infoblox_client/utils.py
|
generate_duid
|
bondar-pavel/infoblox-client
| 1
|
python
|
def generate_duid(mac):
'DUID is consisted of 10 hex numbers.\n\n 0x00 + 3 random hex + mac with 6 hex\n '
valid = (mac and isinstance(mac, six.string_types))
if (not valid):
ValueError('Invalid argument was passed')
duid = [0, random.randint(0, 127), random.randint(0, 255), random.randint(0, 255)]
return ((':'.join(map((lambda x: ('%02x' % x)), duid)) + ':') + mac)
|
def generate_duid(mac):
'DUID is consisted of 10 hex numbers.\n\n 0x00 + 3 random hex + mac with 6 hex\n '
valid = (mac and isinstance(mac, six.string_types))
if (not valid):
ValueError('Invalid argument was passed')
duid = [0, random.randint(0, 127), random.randint(0, 255), random.randint(0, 255)]
return ((':'.join(map((lambda x: ('%02x' % x)), duid)) + ':') + mac)<|docstring|>DUID is consisted of 10 hex numbers.
0x00 + 3 random hex + mac with 6 hex<|endoftext|>
|
ebf1691dea20571791bf4cd5d234432f8f661c87366512df96e098b233f9aaa3
|
def add_deployment(user):
'Add deployment for a given package ID'
dep = Deployment(user=user, status='complete', declared=func.current_timestamp())
Session.add(dep)
Session.commit()
Session.flush()
return dep
|
Add deployment for a given package ID
|
tagopsdb/deploy/deploy.py
|
add_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def add_deployment(user):
dep = Deployment(user=user, status='complete', declared=func.current_timestamp())
Session.add(dep)
Session.commit()
Session.flush()
return dep
|
def add_deployment(user):
dep = Deployment(user=user, status='complete', declared=func.current_timestamp())
Session.add(dep)
Session.commit()
Session.flush()
return dep<|docstring|>Add deployment for a given package ID<|endoftext|>
|
94c9500f4705657639b4be6ed26337ebc5e2f9ee59c40a7d8e9a17ac399dae18
|
def add_app_deployment(dep_id, app_id, user, status, environment, package_id):
'Add a tier deployment for a given deployment ID'
environment_id = _calculate_environment_id(environment)
app_dep = AppDeployment(deployment_id=dep_id, app_id=app_id, user=user, status=status, environment_id=environment_id, realized=func.current_timestamp(), package_id=package_id)
Session.add(app_dep)
Session.commit()
return app_dep
|
Add a tier deployment for a given deployment ID
|
tagopsdb/deploy/deploy.py
|
add_app_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def add_app_deployment(dep_id, app_id, user, status, environment, package_id):
environment_id = _calculate_environment_id(environment)
app_dep = AppDeployment(deployment_id=dep_id, app_id=app_id, user=user, status=status, environment_id=environment_id, realized=func.current_timestamp(), package_id=package_id)
Session.add(app_dep)
Session.commit()
return app_dep
|
def add_app_deployment(dep_id, app_id, user, status, environment, package_id):
environment_id = _calculate_environment_id(environment)
app_dep = AppDeployment(deployment_id=dep_id, app_id=app_id, user=user, status=status, environment_id=environment_id, realized=func.current_timestamp(), package_id=package_id)
Session.add(app_dep)
Session.commit()
return app_dep<|docstring|>Add a tier deployment for a given deployment ID<|endoftext|>
|
67a4c16631358f9965445003619c83424a9df1af24ca820be14695098bbd1c0d
|
def add_host_deployment(dep_id, host_id, user, status, package_id):
'Add host deployment for a given host and deployment'
host_dep = HostDeployment(deployment_id=dep_id, host_id=host_id, user=user, status=status, realized=func.current_timestamp(), package_id=package_id)
Session.add(host_dep)
Session.commit()
return host_dep
|
Add host deployment for a given host and deployment
|
tagopsdb/deploy/deploy.py
|
add_host_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def add_host_deployment(dep_id, host_id, user, status, package_id):
host_dep = HostDeployment(deployment_id=dep_id, host_id=host_id, user=user, status=status, realized=func.current_timestamp(), package_id=package_id)
Session.add(host_dep)
Session.commit()
return host_dep
|
def add_host_deployment(dep_id, host_id, user, status, package_id):
host_dep = HostDeployment(deployment_id=dep_id, host_id=host_id, user=user, status=status, realized=func.current_timestamp(), package_id=package_id)
Session.add(host_dep)
Session.commit()
return host_dep<|docstring|>Add host deployment for a given host and deployment<|endoftext|>
|
bcab8aa38c1565719ea1c625e502f9213a19fe742842fb98b90a060f35c5ee4e
|
def find_all_app_deployments_by_apptype(package_name, apptype, environment):
'Find all tier deployments for a given application type\n and specific environment\n '
return Session.query(AppDeployment).join(Package).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDefinition.app_type == apptype)).filter((AppDeployment.environment == environment)).all()
|
Find all tier deployments for a given application type
and specific environment
|
tagopsdb/deploy/deploy.py
|
find_all_app_deployments_by_apptype
|
ifwe/tagopsdb
| 0
|
python
|
def find_all_app_deployments_by_apptype(package_name, apptype, environment):
'Find all tier deployments for a given application type\n and specific environment\n '
return Session.query(AppDeployment).join(Package).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDefinition.app_type == apptype)).filter((AppDeployment.environment == environment)).all()
|
def find_all_app_deployments_by_apptype(package_name, apptype, environment):
'Find all tier deployments for a given application type\n and specific environment\n '
return Session.query(AppDeployment).join(Package).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDefinition.app_type == apptype)).filter((AppDeployment.environment == environment)).all()<|docstring|>Find all tier deployments for a given application type
and specific environment<|endoftext|>
|
03a5338e77ed6276924da19148cb3e205a2e0c86455738c458b521a15a3f9c32
|
def find_app_deployment(pkg_id, app_ids, environment):
'Find specific tier deployment(s) based on package ID and\n application ID(s)\n '
subq = Session.query(AppDeployment.app_id, AppDefinition.app_type, AppDeployment.id).join(Package).join(AppDefinition).filter((Package.id == pkg_id))
if app_ids:
subq = subq.filter(AppDeployment.app_id.in_(app_ids))
subq = subq.filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(AppDeployment, AppDefinition.app_type, Package).join(AppDefinition).join(Package).join(subq, (AppDeployment.id == subq.c.AppDeploymentID)).group_by(subq.c.AppID).all()
|
Find specific tier deployment(s) based on package ID and
application ID(s)
|
tagopsdb/deploy/deploy.py
|
find_app_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_app_deployment(pkg_id, app_ids, environment):
'Find specific tier deployment(s) based on package ID and\n application ID(s)\n '
subq = Session.query(AppDeployment.app_id, AppDefinition.app_type, AppDeployment.id).join(Package).join(AppDefinition).filter((Package.id == pkg_id))
if app_ids:
subq = subq.filter(AppDeployment.app_id.in_(app_ids))
subq = subq.filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(AppDeployment, AppDefinition.app_type, Package).join(AppDefinition).join(Package).join(subq, (AppDeployment.id == subq.c.AppDeploymentID)).group_by(subq.c.AppID).all()
|
def find_app_deployment(pkg_id, app_ids, environment):
'Find specific tier deployment(s) based on package ID and\n application ID(s)\n '
subq = Session.query(AppDeployment.app_id, AppDefinition.app_type, AppDeployment.id).join(Package).join(AppDefinition).filter((Package.id == pkg_id))
if app_ids:
subq = subq.filter(AppDeployment.app_id.in_(app_ids))
subq = subq.filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(AppDeployment, AppDefinition.app_type, Package).join(AppDefinition).join(Package).join(subq, (AppDeployment.id == subq.c.AppDeploymentID)).group_by(subq.c.AppID).all()<|docstring|>Find specific tier deployment(s) based on package ID and
application ID(s)<|endoftext|>
|
044a02f52a89897d94132229a039df73666d04cbe374bc697f121df07aa96d9f
|
def find_app_by_apptype(apptype):
'Find a given application by app type'
try:
return Session.query(AppDefinition).filter_by(app_type=apptype).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
|
Find a given application by app type
|
tagopsdb/deploy/deploy.py
|
find_app_by_apptype
|
ifwe/tagopsdb
| 0
|
python
|
def find_app_by_apptype(apptype):
try:
return Session.query(AppDefinition).filter_by(app_type=apptype).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
|
def find_app_by_apptype(apptype):
try:
return Session.query(AppDefinition).filter_by(app_type=apptype).one()
except sqlalchemy.orm.exc.NoResultFound:
return None<|docstring|>Find a given application by app type<|endoftext|>
|
e5664188b15cdb14eb2724fab4c0aa6c330277ebe3b6b9bb51c85f95e4b2a79e
|
def find_apptype_by_appid(app_id):
'Find the app type for a given ID'
try:
app_def = Session.query(AppDefinition).filter_by(id=app_id).one()
return app_def.app_type
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No app type with AppID "%s" was found in the app_definitions table' % app_id))
|
Find the app type for a given ID
|
tagopsdb/deploy/deploy.py
|
find_apptype_by_appid
|
ifwe/tagopsdb
| 0
|
python
|
def find_apptype_by_appid(app_id):
try:
app_def = Session.query(AppDefinition).filter_by(id=app_id).one()
return app_def.app_type
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No app type with AppID "%s" was found in the app_definitions table' % app_id))
|
def find_apptype_by_appid(app_id):
try:
app_def = Session.query(AppDefinition).filter_by(id=app_id).one()
return app_def.app_type
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No app type with AppID "%s" was found in the app_definitions table' % app_id))<|docstring|>Find the app type for a given ID<|endoftext|>
|
664db8b12b283e4d15cbe3e006ec3fae042debe40f779839cc275d9dfee12feb
|
def find_deployed_version(package_name, environment, version=None, revision=None, apptypes=None, apptier=False):
'Find a given deployed version for a given package in a given\n environment for all related app types; search for full tier\n or host only deployment specifically\n '
if apptier:
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated'))
if (apptypes is not None):
subq = subq.filter(AppDefinition.app_type.in_(apptypes))
if (version is not None):
subq = subq.filter((Package.version == version))
if (revision is not None):
subq = subq.filter((Package.revision == revision))
subq = subq.order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
versions = Session.query(subq.c.appType, subq.c.version, subq.c.revision).group_by(subq.c.appType, subq.c.environment).all()
else:
hostsq = Session.query(Host.hostname, Host.app_id, Package.version, Package.revision).join(AppDefinition).join(HostDeployment).join(Package).filter((Package.pkg_name == package_name)).filter((Host.environment == environment))
if (apptypes is not None):
hostsq = hostsq.filter(AppDefinition.app_type.in_(apptypes))
versions = hostsq.all()
return versions
|
Find a given deployed version for a given package in a given
environment for all related app types; search for full tier
or host only deployment specifically
|
tagopsdb/deploy/deploy.py
|
find_deployed_version
|
ifwe/tagopsdb
| 0
|
python
|
def find_deployed_version(package_name, environment, version=None, revision=None, apptypes=None, apptier=False):
'Find a given deployed version for a given package in a given\n environment for all related app types; search for full tier\n or host only deployment specifically\n '
if apptier:
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated'))
if (apptypes is not None):
subq = subq.filter(AppDefinition.app_type.in_(apptypes))
if (version is not None):
subq = subq.filter((Package.version == version))
if (revision is not None):
subq = subq.filter((Package.revision == revision))
subq = subq.order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
versions = Session.query(subq.c.appType, subq.c.version, subq.c.revision).group_by(subq.c.appType, subq.c.environment).all()
else:
hostsq = Session.query(Host.hostname, Host.app_id, Package.version, Package.revision).join(AppDefinition).join(HostDeployment).join(Package).filter((Package.pkg_name == package_name)).filter((Host.environment == environment))
if (apptypes is not None):
hostsq = hostsq.filter(AppDefinition.app_type.in_(apptypes))
versions = hostsq.all()
return versions
|
def find_deployed_version(package_name, environment, version=None, revision=None, apptypes=None, apptier=False):
'Find a given deployed version for a given package in a given\n environment for all related app types; search for full tier\n or host only deployment specifically\n '
if apptier:
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated'))
if (apptypes is not None):
subq = subq.filter(AppDefinition.app_type.in_(apptypes))
if (version is not None):
subq = subq.filter((Package.version == version))
if (revision is not None):
subq = subq.filter((Package.revision == revision))
subq = subq.order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
versions = Session.query(subq.c.appType, subq.c.version, subq.c.revision).group_by(subq.c.appType, subq.c.environment).all()
else:
hostsq = Session.query(Host.hostname, Host.app_id, Package.version, Package.revision).join(AppDefinition).join(HostDeployment).join(Package).filter((Package.pkg_name == package_name)).filter((Host.environment == environment))
if (apptypes is not None):
hostsq = hostsq.filter(AppDefinition.app_type.in_(apptypes))
versions = hostsq.all()
return versions<|docstring|>Find a given deployed version for a given package in a given
environment for all related app types; search for full tier
or host only deployment specifically<|endoftext|>
|
a4ffd8369ac2c76f81df4c61246b31f54ceac122af479b30722435c91e2768da
|
def find_deployment_by_id(dep_id):
'Find deployment for a given ID'
try:
return Session.query(Deployment).filter_by(id=dep_id).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No deployment with deploy ID "%s" found in the deployments table' % dep_id))
|
Find deployment for a given ID
|
tagopsdb/deploy/deploy.py
|
find_deployment_by_id
|
ifwe/tagopsdb
| 0
|
python
|
def find_deployment_by_id(dep_id):
try:
return Session.query(Deployment).filter_by(id=dep_id).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No deployment with deploy ID "%s" found in the deployments table' % dep_id))
|
def find_deployment_by_id(dep_id):
try:
return Session.query(Deployment).filter_by(id=dep_id).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No deployment with deploy ID "%s" found in the deployments table' % dep_id))<|docstring|>Find deployment for a given ID<|endoftext|>
|
322db1289719f0790ee37c165b9e24a7f1b1ce2ea2fb610986f91bd1f2537f45
|
def find_host_by_hostname(hostname):
'Find host for a given hostname'
try:
return Session.query(Host).filter_by(hostname=hostname).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No host with hostname "%s" found in the hosts table' % hostname))
|
Find host for a given hostname
|
tagopsdb/deploy/deploy.py
|
find_host_by_hostname
|
ifwe/tagopsdb
| 0
|
python
|
def find_host_by_hostname(hostname):
try:
return Session.query(Host).filter_by(hostname=hostname).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No host with hostname "%s" found in the hosts table' % hostname))
|
def find_host_by_hostname(hostname):
try:
return Session.query(Host).filter_by(hostname=hostname).one()
except sqlalchemy.orm.exc.NoResultFound:
raise DeployException(('No host with hostname "%s" found in the hosts table' % hostname))<|docstring|>Find host for a given hostname<|endoftext|>
|
8f1ce6f921fdea437b62bf4436f270a44f810b32a10faa509d2ce76ecf195b52
|
def find_host_deployment_by_depid(dep_id, dep_host):
'Find host deployment (if exists) for a given deployment ID'
try:
return Session.query(HostDeployment).join(Host).filter((HostDeployment.deployment_id == dep_id)).filter((Host.hostname == dep_host)).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
|
Find host deployment (if exists) for a given deployment ID
|
tagopsdb/deploy/deploy.py
|
find_host_deployment_by_depid
|
ifwe/tagopsdb
| 0
|
python
|
def find_host_deployment_by_depid(dep_id, dep_host):
try:
return Session.query(HostDeployment).join(Host).filter((HostDeployment.deployment_id == dep_id)).filter((Host.hostname == dep_host)).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
|
def find_host_deployment_by_depid(dep_id, dep_host):
try:
return Session.query(HostDeployment).join(Host).filter((HostDeployment.deployment_id == dep_id)).filter((Host.hostname == dep_host)).one()
except sqlalchemy.orm.exc.NoResultFound:
return None<|docstring|>Find host deployment (if exists) for a given deployment ID<|endoftext|>
|
bbd9a2ea348cfc07081355e25db2ff42e983c66dfaab347af6b0e288b9191374
|
def find_host_deployments_by_pkgid(pkg_id, dep_hosts):
'Find host deployments for a given package ID and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id).join(Host).join(Package).filter((Package.id == pkg_id)).filter(Host.hostname.in_(dep_hosts)).all()
|
Find host deployments for a given package ID and a given
set of hosts
|
tagopsdb/deploy/deploy.py
|
find_host_deployments_by_pkgid
|
ifwe/tagopsdb
| 0
|
python
|
def find_host_deployments_by_pkgid(pkg_id, dep_hosts):
'Find host deployments for a given package ID and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id).join(Host).join(Package).filter((Package.id == pkg_id)).filter(Host.hostname.in_(dep_hosts)).all()
|
def find_host_deployments_by_pkgid(pkg_id, dep_hosts):
'Find host deployments for a given package ID and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id).join(Host).join(Package).filter((Package.id == pkg_id)).filter(Host.hostname.in_(dep_hosts)).all()<|docstring|>Find host deployments for a given package ID and a given
set of hosts<|endoftext|>
|
b4968afcca8572085f1cf5dadbb09f1352f39709e8805ad4edf373bdb3e9e23c
|
def find_host_deployments_by_package_name(package_name, dep_hosts):
'Find host deployments for a given package and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id, Package.version).join(Host).join(Package).filter((Package.pkg_name == package_name)).filter(Host.hostname.in_(dep_hosts)).all()
|
Find host deployments for a given package and a given
set of hosts
|
tagopsdb/deploy/deploy.py
|
find_host_deployments_by_package_name
|
ifwe/tagopsdb
| 0
|
python
|
def find_host_deployments_by_package_name(package_name, dep_hosts):
'Find host deployments for a given package and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id, Package.version).join(Host).join(Package).filter((Package.pkg_name == package_name)).filter(Host.hostname.in_(dep_hosts)).all()
|
def find_host_deployments_by_package_name(package_name, dep_hosts):
'Find host deployments for a given package and a given\n set of hosts\n '
return Session.query(HostDeployment, Host.hostname, Host.app_id, Package.version).join(Host).join(Package).filter((Package.pkg_name == package_name)).filter(Host.hostname.in_(dep_hosts)).all()<|docstring|>Find host deployments for a given package and a given
set of hosts<|endoftext|>
|
d83302454359a8f3ea0d1a7e414df2bf966618c9b4f84f0b4fd930a2ff59371d
|
def find_host_deployments_not_ok(pkg_id, app_id, environment):
"Find host deployments that are not in 'ok' state for a given\n package ID, app ID and environment (may return none)\n "
return Session.query(HostDeployment, Host.hostname).join(Host).filter((HostDeployment.package_id == pkg_id)).filter((Host.app_id == app_id)).filter((Host.environment == environment)).filter((HostDeployment.status != 'ok')).all()
|
Find host deployments that are not in 'ok' state for a given
package ID, app ID and environment (may return none)
|
tagopsdb/deploy/deploy.py
|
find_host_deployments_not_ok
|
ifwe/tagopsdb
| 0
|
python
|
def find_host_deployments_not_ok(pkg_id, app_id, environment):
"Find host deployments that are not in 'ok' state for a given\n package ID, app ID and environment (may return none)\n "
return Session.query(HostDeployment, Host.hostname).join(Host).filter((HostDeployment.package_id == pkg_id)).filter((Host.app_id == app_id)).filter((Host.environment == environment)).filter((HostDeployment.status != 'ok')).all()
|
def find_host_deployments_not_ok(pkg_id, app_id, environment):
"Find host deployments that are not in 'ok' state for a given\n package ID, app ID and environment (may return none)\n "
return Session.query(HostDeployment, Host.hostname).join(Host).filter((HostDeployment.package_id == pkg_id)).filter((Host.app_id == app_id)).filter((Host.environment == environment)).filter((HostDeployment.status != 'ok')).all()<|docstring|>Find host deployments that are not in 'ok' state for a given
package ID, app ID and environment (may return none)<|endoftext|>
|
3b64fae6575e319d0e5bf04d9bc7f9366c3ea6a885741fd7e1a616ea44142683
|
def find_hosts_for_app(app_id, environment):
'Find the hosts for a given application and environment'
return Session.query(Host).join(AppDefinition).filter((AppDefinition.id == app_id)).filter((Host.environment == environment)).all()
|
Find the hosts for a given application and environment
|
tagopsdb/deploy/deploy.py
|
find_hosts_for_app
|
ifwe/tagopsdb
| 0
|
python
|
def find_hosts_for_app(app_id, environment):
return Session.query(Host).join(AppDefinition).filter((AppDefinition.id == app_id)).filter((Host.environment == environment)).all()
|
def find_hosts_for_app(app_id, environment):
return Session.query(Host).join(AppDefinition).filter((AppDefinition.id == app_id)).filter((Host.environment == environment)).all()<|docstring|>Find the hosts for a given application and environment<|endoftext|>
|
3dbacb2d929c0c6b08de802f7ab280d86c79bcb92ccb707a434abafa675a0c24
|
def find_hipchat_rooms_for_app(project, apptypes=None):
'Find the relevent HipChat rooms (if any) for a given project'
app_defs = repo.find_app_packages_mapping(project)
if (apptypes is None):
proj_type = repo.find_project_type(project)[0]
if (proj_type != 'application'):
return []
apptypes = [x.app_type for x in app_defs]
rooms_query = Session.query(Hipchat.room_name).filter(Hipchat.app_definitions.any(AppDefinition.app_type.in_(apptypes))).all()
return [x[0] for x in rooms_query]
|
Find the relevent HipChat rooms (if any) for a given project
|
tagopsdb/deploy/deploy.py
|
find_hipchat_rooms_for_app
|
ifwe/tagopsdb
| 0
|
python
|
def find_hipchat_rooms_for_app(project, apptypes=None):
app_defs = repo.find_app_packages_mapping(project)
if (apptypes is None):
proj_type = repo.find_project_type(project)[0]
if (proj_type != 'application'):
return []
apptypes = [x.app_type for x in app_defs]
rooms_query = Session.query(Hipchat.room_name).filter(Hipchat.app_definitions.any(AppDefinition.app_type.in_(apptypes))).all()
return [x[0] for x in rooms_query]
|
def find_hipchat_rooms_for_app(project, apptypes=None):
app_defs = repo.find_app_packages_mapping(project)
if (apptypes is None):
proj_type = repo.find_project_type(project)[0]
if (proj_type != 'application'):
return []
apptypes = [x.app_type for x in app_defs]
rooms_query = Session.query(Hipchat.room_name).filter(Hipchat.app_definitions.any(AppDefinition.app_type.in_(apptypes))).all()
return [x[0] for x in rooms_query]<|docstring|>Find the relevent HipChat rooms (if any) for a given project<|endoftext|>
|
2751005ca1b8645b56c162e2b1655eb66f4e131abe8c0bf22fbd8c64dbe0ae32
|
def find_latest_deployed_version(package_name, environment, apptypes=None, apptier=False):
'Find the most recent deployed version for a given package\n in a given environment for all related app types; search\n for full tier or host only deployment specifically\n '
return find_deployed_version(package_name, environment, apptypes=apptypes, apptier=apptier)
|
Find the most recent deployed version for a given package
in a given environment for all related app types; search
for full tier or host only deployment specifically
|
tagopsdb/deploy/deploy.py
|
find_latest_deployed_version
|
ifwe/tagopsdb
| 0
|
python
|
def find_latest_deployed_version(package_name, environment, apptypes=None, apptier=False):
'Find the most recent deployed version for a given package\n in a given environment for all related app types; search\n for full tier or host only deployment specifically\n '
return find_deployed_version(package_name, environment, apptypes=apptypes, apptier=apptier)
|
def find_latest_deployed_version(package_name, environment, apptypes=None, apptier=False):
'Find the most recent deployed version for a given package\n in a given environment for all related app types; search\n for full tier or host only deployment specifically\n '
return find_deployed_version(package_name, environment, apptypes=apptypes, apptier=apptier)<|docstring|>Find the most recent deployed version for a given package
in a given environment for all related app types; search
for full tier or host only deployment specifically<|endoftext|>
|
cfac1bf8e59ce73ff8d4d7f0cae39eab115e923af7cb2f5e8027ba52ca56f35d
|
def find_latest_deployment(package_name, app_id, environment):
'Find the most recent deployment for a given package in a given\n environment for the given application ID\n '
return Session.query(AppDeployment, Package).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
Find the most recent deployment for a given package in a given
environment for the given application ID
|
tagopsdb/deploy/deploy.py
|
find_latest_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_latest_deployment(package_name, app_id, environment):
'Find the most recent deployment for a given package in a given\n environment for the given application ID\n '
return Session.query(AppDeployment, Package).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
def find_latest_deployment(package_name, app_id, environment):
'Find the most recent deployment for a given package in a given\n environment for the given application ID\n '
return Session.query(AppDeployment, Package).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()<|docstring|>Find the most recent deployment for a given package in a given
environment for the given application ID<|endoftext|>
|
26949975c5a6357bf93eebe82f7b0287165b566ec96f6eb1a50f97a682f21432
|
def find_latest_validated_deployment(package_name, app_id, environment):
'Find the most recent deployment that was validated for a given\n package, application type and environment.\n '
return Session.query(AppDeployment, Package.id).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
Find the most recent deployment that was validated for a given
package, application type and environment.
|
tagopsdb/deploy/deploy.py
|
find_latest_validated_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_latest_validated_deployment(package_name, app_id, environment):
'Find the most recent deployment that was validated for a given\n package, application type and environment.\n '
return Session.query(AppDeployment, Package.id).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
def find_latest_validated_deployment(package_name, app_id, environment):
'Find the most recent deployment that was validated for a given\n package, application type and environment.\n '
return Session.query(AppDeployment, Package.id).join(Package).filter((Package.pkg_name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()<|docstring|>Find the most recent deployment that was validated for a given
package, application type and environment.<|endoftext|>
|
5ea2be2304a01408b93d023d01f8e8bbabf26d61d011bbaf25d8d9709c94b2ef
|
def find_previous_validated_deployment(package_name, app_id, environment):
'Find the previous validated deployment, ignoring if the current\n deployment is validated or not, for a given package, application\n type and environment.\n '
(app_dep, pkg) = find_latest_deployment(package_name, app_id, environment)
return Session.query(AppDeployment, Package.id).join(Package).filter((AppDeployment.id != app_dep.id)).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
Find the previous validated deployment, ignoring if the current
deployment is validated or not, for a given package, application
type and environment.
|
tagopsdb/deploy/deploy.py
|
find_previous_validated_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_previous_validated_deployment(package_name, app_id, environment):
'Find the previous validated deployment, ignoring if the current\n deployment is validated or not, for a given package, application\n type and environment.\n '
(app_dep, pkg) = find_latest_deployment(package_name, app_id, environment)
return Session.query(AppDeployment, Package.id).join(Package).filter((AppDeployment.id != app_dep.id)).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
def find_previous_validated_deployment(package_name, app_id, environment):
'Find the previous validated deployment, ignoring if the current\n deployment is validated or not, for a given package, application\n type and environment.\n '
(app_dep, pkg) = find_latest_deployment(package_name, app_id, environment)
return Session.query(AppDeployment, Package.id).join(Package).filter((AppDeployment.id != app_dep.id)).filter((Package.pkg_name == package_name)).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()<|docstring|>Find the previous validated deployment, ignoring if the current
deployment is validated or not, for a given package, application
type and environment.<|endoftext|>
|
823ffe988c30449ecdc1507ae44beefb60bd945861db5e965df9b53b3bbf8edd
|
def find_running_deployment(app_id, environment, hosts=None):
'Find a currently running tier or host deployment (or deployments)\n for a given application type and environment\n '
tier = Session.query(AppDeployment.user, AppDeployment.realized, AppDeployment.environment, AppDefinition.app_type).join(AppDefinition).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'inprogress')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
if tier:
return ('tier', tier)
host = Session.query(HostDeployment.user, HostDeployment.realized, Host.hostname, Host.environment).join(Host).filter((Host.environment == environment)).filter((Host.app_id == app_id)).filter((HostDeployment.status == 'inprogress')).all()
if host:
return ('host', host)
return None
|
Find a currently running tier or host deployment (or deployments)
for a given application type and environment
|
tagopsdb/deploy/deploy.py
|
find_running_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_running_deployment(app_id, environment, hosts=None):
'Find a currently running tier or host deployment (or deployments)\n for a given application type and environment\n '
tier = Session.query(AppDeployment.user, AppDeployment.realized, AppDeployment.environment, AppDefinition.app_type).join(AppDefinition).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'inprogress')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
if tier:
return ('tier', tier)
host = Session.query(HostDeployment.user, HostDeployment.realized, Host.hostname, Host.environment).join(Host).filter((Host.environment == environment)).filter((Host.app_id == app_id)).filter((HostDeployment.status == 'inprogress')).all()
if host:
return ('host', host)
return None
|
def find_running_deployment(app_id, environment, hosts=None):
'Find a currently running tier or host deployment (or deployments)\n for a given application type and environment\n '
tier = Session.query(AppDeployment.user, AppDeployment.realized, AppDeployment.environment, AppDefinition.app_type).join(AppDefinition).filter((AppDeployment.app_id == app_id)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'inprogress')).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
if tier:
return ('tier', tier)
host = Session.query(HostDeployment.user, HostDeployment.realized, Host.hostname, Host.environment).join(Host).filter((Host.environment == environment)).filter((Host.app_id == app_id)).filter((HostDeployment.status == 'inprogress')).all()
if host:
return ('host', host)
return None<|docstring|>Find a currently running tier or host deployment (or deployments)
for a given application type and environment<|endoftext|>
|
e97779071aab1b78901ddcc8b24f188d54a6b35d0f088d6611d1258d6219b909
|
def find_unvalidated_versions(time_delta, environment):
'Find the latest deployments that are not validated in a given\n environment for a given amount of time\n '
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment, AppDeployment.realized, AppDeployment.user, AppDeployment.status).join(AppDeployment).join(AppDefinition).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(subq.c.pkg_name, subq.c.version, subq.c.revision, subq.c.appType, subq.c.environment, subq.c.realized, subq.c.user, subq.c.status).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(and_(subq.c.status.like('%complete'), (func.unix_timestamp(subq.c.realized) < (func.unix_timestamp(func.now()) - time_delta)))).all()
|
Find the latest deployments that are not validated in a given
environment for a given amount of time
|
tagopsdb/deploy/deploy.py
|
find_unvalidated_versions
|
ifwe/tagopsdb
| 0
|
python
|
def find_unvalidated_versions(time_delta, environment):
'Find the latest deployments that are not validated in a given\n environment for a given amount of time\n '
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment, AppDeployment.realized, AppDeployment.user, AppDeployment.status).join(AppDeployment).join(AppDefinition).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(subq.c.pkg_name, subq.c.version, subq.c.revision, subq.c.appType, subq.c.environment, subq.c.realized, subq.c.user, subq.c.status).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(and_(subq.c.status.like('%complete'), (func.unix_timestamp(subq.c.realized) < (func.unix_timestamp(func.now()) - time_delta)))).all()
|
def find_unvalidated_versions(time_delta, environment):
'Find the latest deployments that are not validated in a given\n environment for a given amount of time\n '
subq = Session.query(Package.pkg_name, Package.version, Package.revision, AppDefinition.app_type, AppDeployment.environment, AppDeployment.realized, AppDeployment.user, AppDeployment.status).join(AppDeployment).join(AppDefinition).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
return Session.query(subq.c.pkg_name, subq.c.version, subq.c.revision, subq.c.appType, subq.c.environment, subq.c.realized, subq.c.user, subq.c.status).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(and_(subq.c.status.like('%complete'), (func.unix_timestamp(subq.c.realized) < (func.unix_timestamp(func.now()) - time_delta)))).all()<|docstring|>Find the latest deployments that are not validated in a given
environment for a given amount of time<|endoftext|>
|
d20c9b0b2b0d24ac4e834755a5f4e58ea4e9cf5a5822c321f19404bb9801d21c
|
def find_unvalidated_deployments(environment):
'Find the latest deployments that are not validated in a given\n environment (simplified version of find_unvalidated_versions)\n '
subq = Session.query(Package.pkg_name, AppDefinition.app_type, AppDeployment.environment, AppDeployment.status, AppDeployment).join(AppDeployment).join(AppDefinition).filter((AppDefinition.status == 'active')).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
appdep_alias = aliased(AppDeployment, subq)
return Session.query(appdep_alias).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(subq.c.status.like('%complete')).all()
|
Find the latest deployments that are not validated in a given
environment (simplified version of find_unvalidated_versions)
|
tagopsdb/deploy/deploy.py
|
find_unvalidated_deployments
|
ifwe/tagopsdb
| 0
|
python
|
def find_unvalidated_deployments(environment):
'Find the latest deployments that are not validated in a given\n environment (simplified version of find_unvalidated_versions)\n '
subq = Session.query(Package.pkg_name, AppDefinition.app_type, AppDeployment.environment, AppDeployment.status, AppDeployment).join(AppDeployment).join(AppDefinition).filter((AppDefinition.status == 'active')).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
appdep_alias = aliased(AppDeployment, subq)
return Session.query(appdep_alias).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(subq.c.status.like('%complete')).all()
|
def find_unvalidated_deployments(environment):
'Find the latest deployments that are not validated in a given\n environment (simplified version of find_unvalidated_versions)\n '
subq = Session.query(Package.pkg_name, AppDefinition.app_type, AppDeployment.environment, AppDeployment.status, AppDeployment).join(AppDeployment).join(AppDefinition).filter((AppDefinition.status == 'active')).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).subquery(name='t_ordered')
appdep_alias = aliased(AppDeployment, subq)
return Session.query(appdep_alias).group_by(subq.c.appType, subq.c.environment, subq.c.pkg_name).having(subq.c.status.like('%complete')).all()<|docstring|>Find the latest deployments that are not validated in a given
environment (simplified version of find_unvalidated_versions)<|endoftext|>
|
afcde78ece0451bf4de41c960ed83245599a396774f2def62e3aa1e01d861242
|
def list_app_deployment_info(package_name, environment, name, version, revision):
'Give all deployment information for a given package and version\n deployed to a given application tier and environment\n '
return Session.query(Deployment, AppDeployment, Package).join(Package).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Package.version == version)).filter((Package.revision == revision)).filter((AppDefinition.app_type == name)).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
Give all deployment information for a given package and version
deployed to a given application tier and environment
|
tagopsdb/deploy/deploy.py
|
list_app_deployment_info
|
ifwe/tagopsdb
| 0
|
python
|
def list_app_deployment_info(package_name, environment, name, version, revision):
'Give all deployment information for a given package and version\n deployed to a given application tier and environment\n '
return Session.query(Deployment, AppDeployment, Package).join(Package).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Package.version == version)).filter((Package.revision == revision)).filter((AppDefinition.app_type == name)).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()
|
def list_app_deployment_info(package_name, environment, name, version, revision):
'Give all deployment information for a given package and version\n deployed to a given application tier and environment\n '
return Session.query(Deployment, AppDeployment, Package).join(Package).join(AppDeployment).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Package.version == version)).filter((Package.revision == revision)).filter((AppDefinition.app_type == name)).filter((AppDeployment.environment == environment)).order_by(AppDeployment.realized.desc(), AppDeployment.id.desc()).first()<|docstring|>Give all deployment information for a given package and version
deployed to a given application tier and environment<|endoftext|>
|
1e6998b1b0d4bd876ec3aae45757032ef942332d4fcba5e9f89de0d32e6c365c
|
def list_host_deployment_info(package_name, environment, version=None, revision=None, apptypes=None):
'Give all deployment information for a given package\n deployed to hosts for given (or all) application types\n and in given environment\n '
dep_info = Session.query(Deployment, HostDeployment, Host.hostname, Package).join(Package).join(HostDeployment).join(Host).join(AppDefinition)
if (version is not None):
dep_info = dep_info.filter((Package.version == version))
if (revision is not None):
dep_info = dep_info.filter((Package.revision == revision))
if (apptypes is not None):
dep_info = dep_info.filter(AppDefinition.app_type.in_(apptypes))
return dep_info.filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).order_by(Host.hostname, HostDeployment.realized.asc()).all()
|
Give all deployment information for a given package
deployed to hosts for given (or all) application types
and in given environment
|
tagopsdb/deploy/deploy.py
|
list_host_deployment_info
|
ifwe/tagopsdb
| 0
|
python
|
def list_host_deployment_info(package_name, environment, version=None, revision=None, apptypes=None):
'Give all deployment information for a given package\n deployed to hosts for given (or all) application types\n and in given environment\n '
dep_info = Session.query(Deployment, HostDeployment, Host.hostname, Package).join(Package).join(HostDeployment).join(Host).join(AppDefinition)
if (version is not None):
dep_info = dep_info.filter((Package.version == version))
if (revision is not None):
dep_info = dep_info.filter((Package.revision == revision))
if (apptypes is not None):
dep_info = dep_info.filter(AppDefinition.app_type.in_(apptypes))
return dep_info.filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).order_by(Host.hostname, HostDeployment.realized.asc()).all()
|
def list_host_deployment_info(package_name, environment, version=None, revision=None, apptypes=None):
'Give all deployment information for a given package\n deployed to hosts for given (or all) application types\n and in given environment\n '
dep_info = Session.query(Deployment, HostDeployment, Host.hostname, Package).join(Package).join(HostDeployment).join(Host).join(AppDefinition)
if (version is not None):
dep_info = dep_info.filter((Package.version == version))
if (revision is not None):
dep_info = dep_info.filter((Package.revision == revision))
if (apptypes is not None):
dep_info = dep_info.filter(AppDefinition.app_type.in_(apptypes))
return dep_info.filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).order_by(Host.hostname, HostDeployment.realized.asc()).all()<|docstring|>Give all deployment information for a given package
deployed to hosts for given (or all) application types
and in given environment<|endoftext|>
|
f3b6855c89a0ca118d6b59d7e5c9718b25ba045daced5b48cf15fbb5a3ef8003
|
def find_specific_app_deployment(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find a specific\n deployment on a given tier.\n "
app_dep = Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.app_id == apptype.id))
if (version is not None):
app_dep = app_dep.filter((Package.version == version))
return app_dep.order_by(AppDeployment.id.desc()).first()
|
Temporary workaround method for 'show' command to find a specific
deployment on a given tier.
|
tagopsdb/deploy/deploy.py
|
find_specific_app_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_specific_app_deployment(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find a specific\n deployment on a given tier.\n "
app_dep = Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.app_id == apptype.id))
if (version is not None):
app_dep = app_dep.filter((Package.version == version))
return app_dep.order_by(AppDeployment.id.desc()).first()
|
def find_specific_app_deployment(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find a specific\n deployment on a given tier.\n "
app_dep = Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status != 'invalidated')).filter((AppDeployment.app_id == apptype.id))
if (version is not None):
app_dep = app_dep.filter((Package.version == version))
return app_dep.order_by(AppDeployment.id.desc()).first()<|docstring|>Temporary workaround method for 'show' command to find a specific
deployment on a given tier.<|endoftext|>
|
af77f774086ea3f1bb5380b4f91e188b62ac891a7ad0a8deeade2d6d9c643641
|
def find_current_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the current\n deployment on a given tier.\n "
return find_specific_app_deployment(package_name, environment, apptype)
|
Temporary workaround method for 'show' command to find the current
deployment on a given tier.
|
tagopsdb/deploy/deploy.py
|
find_current_app_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_current_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the current\n deployment on a given tier.\n "
return find_specific_app_deployment(package_name, environment, apptype)
|
def find_current_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the current\n deployment on a given tier.\n "
return find_specific_app_deployment(package_name, environment, apptype)<|docstring|>Temporary workaround method for 'show' command to find the current
deployment on a given tier.<|endoftext|>
|
743b75d6342cade021db3888a44f33ef7450f68cd4e144544593fbffd3ecfdb3
|
def find_previous_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the previous\n validated deployment on a given tier (ignoring the current deployment,\n validated or not).\n "
app_dep = find_current_app_deployment(package_name, environment, apptype)
if (app_dep is None):
return None
return Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).filter((AppDeployment.app_id == apptype.id)).filter((AppDeployment.id != app_dep.id)).order_by(AppDeployment.id.desc()).first()
|
Temporary workaround method for 'show' command to find the previous
validated deployment on a given tier (ignoring the current deployment,
validated or not).
|
tagopsdb/deploy/deploy.py
|
find_previous_app_deployment
|
ifwe/tagopsdb
| 0
|
python
|
def find_previous_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the previous\n validated deployment on a given tier (ignoring the current deployment,\n validated or not).\n "
app_dep = find_current_app_deployment(package_name, environment, apptype)
if (app_dep is None):
return None
return Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).filter((AppDeployment.app_id == apptype.id)).filter((AppDeployment.id != app_dep.id)).order_by(AppDeployment.id.desc()).first()
|
def find_previous_app_deployment(package_name, environment, apptype):
"Temporary workaround method for 'show' command to find the previous\n validated deployment on a given tier (ignoring the current deployment,\n validated or not).\n "
app_dep = find_current_app_deployment(package_name, environment, apptype)
if (app_dep is None):
return None
return Session.query(AppDeployment).join(Package).join(PackageDefinition).filter((PackageDefinition.name == package_name)).filter((AppDeployment.environment == environment)).filter((AppDeployment.status == 'validated')).filter((AppDeployment.app_id == apptype.id)).filter((AppDeployment.id != app_dep.id)).order_by(AppDeployment.id.desc()).first()<|docstring|>Temporary workaround method for 'show' command to find the previous
validated deployment on a given tier (ignoring the current deployment,
validated or not).<|endoftext|>
|
fde37ae4060a2f4ac0fbdba1f084cc21fd11d8bae1d40f69d4502a23552f89a0
|
def find_current_host_deployments(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find the current\n host deployments for a given tier.\n "
host_deps = Session.query(HostDeployment).join(Package).join(HostDeployment).join(Host).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).filter((AppDefinition.id == apptype.id))
if (version is not None):
host_deps = host_deps.filter((Package.version == version))
return host_deps.order_by(Host.hostname, HostDeployment.realized.asc()).all()
|
Temporary workaround method for 'show' command to find the current
host deployments for a given tier.
|
tagopsdb/deploy/deploy.py
|
find_current_host_deployments
|
ifwe/tagopsdb
| 0
|
python
|
def find_current_host_deployments(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find the current\n host deployments for a given tier.\n "
host_deps = Session.query(HostDeployment).join(Package).join(HostDeployment).join(Host).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).filter((AppDefinition.id == apptype.id))
if (version is not None):
host_deps = host_deps.filter((Package.version == version))
return host_deps.order_by(Host.hostname, HostDeployment.realized.asc()).all()
|
def find_current_host_deployments(package_name, environment, apptype, version=None):
"Temporary workaround method for 'show' command to find the current\n host deployments for a given tier.\n "
host_deps = Session.query(HostDeployment).join(Package).join(HostDeployment).join(Host).join(AppDefinition).filter((Package.pkg_name == package_name)).filter((Host.environment == environment)).filter((AppDefinition.id == apptype.id))
if (version is not None):
host_deps = host_deps.filter((Package.version == version))
return host_deps.order_by(Host.hostname, HostDeployment.realized.asc()).all()<|docstring|>Temporary workaround method for 'show' command to find the current
host deployments for a given tier.<|endoftext|>
|
89bc6182f4858a627df21c9476398a7b1807d5d08da3705f455e97af2ba9a1f5
|
def _process_input(wrapped_function: Callable):
'\n Allow `run_test` to take a single positional argument that is a\n `list` or `tuple` in lieu of using multiple positional/keyword\n arguments as usual. If `len` of this argument returns `3`, then\n it assumes that `kwargs` is an empty `dict` and that the expected\n result/outcome is the last item.\n '
def decorator(wrapped_function: Callable):
wrapped_signature = inspect.signature(wrapped_function)
@functools.wraps(wrapped_function)
def wrapper(*args, **kwargs):
arguments = wrapped_signature.bind(*args, **kwargs).arguments
if ((len(args) == 1) and (len(kwargs) == 0) and isinstance(args[0], (list, tuple))):
inputs = args[0]
if (len(inputs) not in (3, 4)):
raise RuntimeError(f'{args} is an invalid input to run_test.')
new_kwargs = {'func': inputs[0], 'args': inputs[1]}
new_kwargs['kwargs'] = (inputs[2] if (len(inputs) == 4) else {})
new_kwargs['expected_outcome'] = (inputs[3] if (len(inputs) == 4) else inputs[2])
else:
new_kwargs = {argname: argval for (argname, argval) in arguments.items()}
return wrapped_function(**new_kwargs)
return wrapper
return decorator(wrapped_function)
|
Allow `run_test` to take a single positional argument that is a
`list` or `tuple` in lieu of using multiple positional/keyword
arguments as usual. If `len` of this argument returns `3`, then
it assumes that `kwargs` is an empty `dict` and that the expected
result/outcome is the last item.
|
plasmapy/utils/pytest_helpers/pytest_helpers.py
|
_process_input
|
alfkoehn/PlasmaPy
| 429
|
python
|
def _process_input(wrapped_function: Callable):
'\n Allow `run_test` to take a single positional argument that is a\n `list` or `tuple` in lieu of using multiple positional/keyword\n arguments as usual. If `len` of this argument returns `3`, then\n it assumes that `kwargs` is an empty `dict` and that the expected\n result/outcome is the last item.\n '
def decorator(wrapped_function: Callable):
wrapped_signature = inspect.signature(wrapped_function)
@functools.wraps(wrapped_function)
def wrapper(*args, **kwargs):
arguments = wrapped_signature.bind(*args, **kwargs).arguments
if ((len(args) == 1) and (len(kwargs) == 0) and isinstance(args[0], (list, tuple))):
inputs = args[0]
if (len(inputs) not in (3, 4)):
raise RuntimeError(f'{args} is an invalid input to run_test.')
new_kwargs = {'func': inputs[0], 'args': inputs[1]}
new_kwargs['kwargs'] = (inputs[2] if (len(inputs) == 4) else {})
new_kwargs['expected_outcome'] = (inputs[3] if (len(inputs) == 4) else inputs[2])
else:
new_kwargs = {argname: argval for (argname, argval) in arguments.items()}
return wrapped_function(**new_kwargs)
return wrapper
return decorator(wrapped_function)
|
def _process_input(wrapped_function: Callable):
'\n Allow `run_test` to take a single positional argument that is a\n `list` or `tuple` in lieu of using multiple positional/keyword\n arguments as usual. If `len` of this argument returns `3`, then\n it assumes that `kwargs` is an empty `dict` and that the expected\n result/outcome is the last item.\n '
def decorator(wrapped_function: Callable):
wrapped_signature = inspect.signature(wrapped_function)
@functools.wraps(wrapped_function)
def wrapper(*args, **kwargs):
arguments = wrapped_signature.bind(*args, **kwargs).arguments
if ((len(args) == 1) and (len(kwargs) == 0) and isinstance(args[0], (list, tuple))):
inputs = args[0]
if (len(inputs) not in (3, 4)):
raise RuntimeError(f'{args} is an invalid input to run_test.')
new_kwargs = {'func': inputs[0], 'args': inputs[1]}
new_kwargs['kwargs'] = (inputs[2] if (len(inputs) == 4) else {})
new_kwargs['expected_outcome'] = (inputs[3] if (len(inputs) == 4) else inputs[2])
else:
new_kwargs = {argname: argval for (argname, argval) in arguments.items()}
return wrapped_function(**new_kwargs)
return wrapper
return decorator(wrapped_function)<|docstring|>Allow `run_test` to take a single positional argument that is a
`list` or `tuple` in lieu of using multiple positional/keyword
arguments as usual. If `len` of this argument returns `3`, then
it assumes that `kwargs` is an empty `dict` and that the expected
result/outcome is the last item.<|endoftext|>
|
2635293f6037d4ab9795d235de9bf496d722a633f42bd2691c6c6f0e2b12a398
|
@_process_input
def run_test(func, args: Any=(), kwargs: Dict=None, expected_outcome: Any=None, rtol: float=0.0, atol: float=0.0):
'\n Test that a function or class returns the expected result, raises\n the expected exception, or issues an expected warning for the\n supplied positional and keyword arguments.\n\n Parameters\n ----------\n func: callable, list, or tuple\n The `callable` to be tested. The first (and sole) argument to\n `~plasmapy.utils.run_test` may alternatively be a list or tuple\n containing these arguments (optionally omitting `kwargs` if the\n `len` returns 3).\n\n args: tuple or object\n The positional arguments to `func`.\n\n kwargs: dict\n The keyword arguments to `func`.\n\n expected_outcome: object\n The expected result, exception, or warning from\n `func(*args, **kwargs)`. This may also be a `tuple` of length\n two that contains the expected result as the first item and the\n expected warning as the second item.\n\n rtol : float\n The relative tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n atol : float\n The absolute tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n Returns\n -------\n `None`\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If the test returns a result that is different from the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.TypeMismatchFail\n If the actual result is of a different type than the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception occurs when no exception or a different\n exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingExceptionFail\n If no exception is raised when an exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingWarningFail\n An expected warning is not issued.\n\n ~astropy.units.UnitsError\n If the result has different units than expected.\n\n TypeError\n If the equality of the actual result and expected result cannot\n be determined (e.g., for a class lacking an `__eq__` method.\n\n Examples\n --------\n The simplest way to use `~plasmapy.utils.run_test` is with inputs\n for the function to be tests, the positional arguments in a `tuple`\n or `list`, the keyword arguments in a `dict`, and then finally the\n expected result or outcome.\n\n >>> args = tuple()\n >>> kwargs = dict()\n >>> run_test(lambda: 0, args, kwargs, 0)\n\n If `expected` is a an exception or warning, then\n `~plasmapy.utils.pytest_helpers.run_test` will raise an exception if\n the expected exception is not raised or the expected warning is not\n issued.\n\n >>> from warnings import warn\n\n >>> issue_warning = lambda: warn("Electrons are weird!", UserWarning)\n >>> run_test(issue_warning, args, kwargs, UserWarning)\n\n >>> def raise_exception(): raise RuntimeError\n >>> run_test(raise_exception, args, kwargs, RuntimeError)\n\n For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two\n items where the first item is the expected result and the second\n item is the expected warning.\n\n .. code-block:: python\n\n def return_arg_and_warn(x):\n warn("", UserWarning)\n return x\n\n run_test(return_arg_and_warn, 1, {}, (1, UserWarning))\n\n This function is also flexible enough that it can accept a `tuple`\n or `list` as its sole argument, with the arguments in the same\n order as in the function signature.\n\n >>> return_arg = lambda x: x\n >>> inputs = (return_arg, 42, {}, 42)\n >>> run_test(inputs)\n\n If the `tuple` or `list` has a length of `3`, then\n `~plasmapy.utils.run_test` assumes that `kwargs` is missing.\n\n >>> inputs_without_kwargs = [return_arg, 42, 42]\n >>> run_test(inputs_without_kwargs)\n\n .. code-block:: python\n\n import pytest\n\n def func(x, raise_exception=False, issue_warning=False):\n if raise_exception:\n raise ValueError("I\'m sorry, Dave. I\'m afraid I can\'t do that.")\n elif issue_warning:\n warn("Open the pod bay doors, HAL.", UserWarning)\n return x\n\n inputs_table = [\n (func, 1, 1),\n (func, (2,), {}, 2),\n (func, 3, {\'raise_exception\': True}, ValueError),\n (func, 4, {\'issue_warning\': True}, UserWarning),\n (func, 5, {\'issue_warning\': True}, (5, UserWarning)),\n ]\n\n @pytest.mark.parametrize(\'inputs\', inputs_table)\n def test_func(inputs):\n run_test(inputs)\n\n '
if (kwargs is None):
kwargs = {}
if (not isinstance(args, tuple)):
args = (args,)
if (not callable(func)):
raise InvalidTestError(f'The argument func = {func} to run_test must be callable.')
call_str = call_string(func, args, kwargs)
expected = collections.defaultdict((lambda : None))
if inspect.isclass(expected_outcome):
subclass_of_Exception = issubclass(expected_outcome, Exception)
subclass_of_Warning = issubclass(expected_outcome, Warning)
if subclass_of_Warning:
expected['warning'] = expected_outcome
elif (subclass_of_Exception and (not subclass_of_Warning)):
expected['exception'] = expected_outcome
if isinstance(expected_outcome, tuple):
length_not_two = (len(expected_outcome) != 2)
is_not_class = (not inspect.isclass(expected_outcome[1]))
is_not_warning = (True if is_not_class else (not issubclass(expected_outcome[1], Warning)))
if (length_not_two or is_not_warning):
raise InvalidTestError('Invalid expected outcome in run_test.')
expected['result'] = expected_outcome[0]
expected['warning'] = expected_outcome[1]
if ((expected['exception'] is None) and (expected['warning'] is None)):
expected['result'] = expected_outcome
if expected['exception']:
expected_exception = expected['exception']
try:
result = func(*args, **kwargs)
except expected_exception as exc_result:
resulting_exception = exc_result.__reduce__()[0]
if (resulting_exception.__name__ == expected_exception.__name__):
return None
else:
raise UnexpectedExceptionFail(f'The command {call_str} did not specifically raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(resulting_exception)} which is a subclass of the expected exception.')
except Exception as exc_unexpected_exception:
unexpected_exception = exc_unexpected_exception.__reduce__()[0]
raise UnexpectedExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(unexpected_exception)}.') from exc_unexpected_exception
else:
raise MissingExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead returned {_object_name(result)}.')
try:
with pytest.warns(expected['warning']):
result = func(*args, **kwargs)
except pytest.raises.Exception as missing_warning:
raise MissingWarningFail(f"The command {call_str} should issue {_name_with_article(expected['warning'])}, but instead returned {_object_name(result)}.") from missing_warning
except Exception as exception_no_warning:
raise UnexpectedExceptionFail(f"The command {call_str} unexpectedly raised {_name_with_article(exception_no_warning.__reduce__()[0])} instead of returning the expected value of {_object_name(expected['result'])}.") from exception_no_warning
if isinstance(expected['result'], u.UnitBase):
if isinstance(result, u.UnitBase):
if (result != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}.")
return None
if (not isinstance(result, (u.Quantity, const.Constant, const.EMConstant))):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of a quantity or constant with units of {_object_name(expected['result'])}.")
if (result.unit != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)}, which has units of {result.unit} instead of the expected units of {_object_name(expected['result'])}.")
return None
if isinstance(expected['result'], (u.Quantity, const.Constant, const.EMConstant)):
if (not (result.unit == expected['result'].unit)):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} which has different units than the expected result of {_object_name(expected['result'])}.")
if np.allclose(result.value, expected['result'].value):
return None
if (expected['result'] is None):
return None
if (type(result) != type(expected['result'])):
raise TypeMismatchFail(f"The command {call_str} returned {_object_name(result)} which has type {_object_name(type(result))}, instead of the expected value of {_object_name(expected['result'])} which has type {_object_name(type(expected['result']))}.")
try:
if (result == expected['result']):
return None
except Exception as exc_equality:
raise TypeError(f"The equality of {_object_name(result)} and {_object_name(expected['result'])} cannot be evaluated.") from exc_equality
try:
different_length = (len(result) != len(expected['result']))
except Exception:
different_length = False
try:
all_close = np.allclose(expected['result'], result, rtol=rtol, atol=atol)
if (all_close and (not different_length)):
return None
except Exception:
pass
errmsg = f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}."
if (atol or rtol):
errmsg += ' with '
if atol:
errmsg += f'atol = {atol}'
if (atol and rtol):
errmsg += ' and '
if rtol:
errmsg += f'rtol = {rtol}'
errmsg += '.'
raise UnexpectedResultFail(errmsg)
|
Test that a function or class returns the expected result, raises
the expected exception, or issues an expected warning for the
supplied positional and keyword arguments.
Parameters
----------
func: callable, list, or tuple
The `callable` to be tested. The first (and sole) argument to
`~plasmapy.utils.run_test` may alternatively be a list or tuple
containing these arguments (optionally omitting `kwargs` if the
`len` returns 3).
args: tuple or object
The positional arguments to `func`.
kwargs: dict
The keyword arguments to `func`.
expected_outcome: object
The expected result, exception, or warning from
`func(*args, **kwargs)`. This may also be a `tuple` of length
two that contains the expected result as the first item and the
expected warning as the second item.
rtol : float
The relative tolerance to be used by `~numpy.allclose` in an
element-wise comparison, defaulting to `0`.
atol : float
The absolute tolerance to be used by `~numpy.allclose` in an
element-wise comparison, defaulting to `0`.
Returns
-------
`None`
Raises
------
~plasmapy.tests.helpers.exceptions.UnexpectedResultFail
If the test returns a result that is different from the expected
result.
~plasmapy.tests.helpers.exceptions.TypeMismatchFail
If the actual result is of a different type than the expected
result.
~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail
If an exception occurs when no exception or a different
exception is expected.
~plasmapy.tests.helpers.exceptions.MissingExceptionFail
If no exception is raised when an exception is expected.
~plasmapy.tests.helpers.exceptions.MissingWarningFail
An expected warning is not issued.
~astropy.units.UnitsError
If the result has different units than expected.
TypeError
If the equality of the actual result and expected result cannot
be determined (e.g., for a class lacking an `__eq__` method.
Examples
--------
The simplest way to use `~plasmapy.utils.run_test` is with inputs
for the function to be tests, the positional arguments in a `tuple`
or `list`, the keyword arguments in a `dict`, and then finally the
expected result or outcome.
>>> args = tuple()
>>> kwargs = dict()
>>> run_test(lambda: 0, args, kwargs, 0)
If `expected` is a an exception or warning, then
`~plasmapy.utils.pytest_helpers.run_test` will raise an exception if
the expected exception is not raised or the expected warning is not
issued.
>>> from warnings import warn
>>> issue_warning = lambda: warn("Electrons are weird!", UserWarning)
>>> run_test(issue_warning, args, kwargs, UserWarning)
>>> def raise_exception(): raise RuntimeError
>>> run_test(raise_exception, args, kwargs, RuntimeError)
For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two
items where the first item is the expected result and the second
item is the expected warning.
.. code-block:: python
def return_arg_and_warn(x):
warn("", UserWarning)
return x
run_test(return_arg_and_warn, 1, {}, (1, UserWarning))
This function is also flexible enough that it can accept a `tuple`
or `list` as its sole argument, with the arguments in the same
order as in the function signature.
>>> return_arg = lambda x: x
>>> inputs = (return_arg, 42, {}, 42)
>>> run_test(inputs)
If the `tuple` or `list` has a length of `3`, then
`~plasmapy.utils.run_test` assumes that `kwargs` is missing.
>>> inputs_without_kwargs = [return_arg, 42, 42]
>>> run_test(inputs_without_kwargs)
.. code-block:: python
import pytest
def func(x, raise_exception=False, issue_warning=False):
if raise_exception:
raise ValueError("I'm sorry, Dave. I'm afraid I can't do that.")
elif issue_warning:
warn("Open the pod bay doors, HAL.", UserWarning)
return x
inputs_table = [
(func, 1, 1),
(func, (2,), {}, 2),
(func, 3, {'raise_exception': True}, ValueError),
(func, 4, {'issue_warning': True}, UserWarning),
(func, 5, {'issue_warning': True}, (5, UserWarning)),
]
@pytest.mark.parametrize('inputs', inputs_table)
def test_func(inputs):
run_test(inputs)
|
plasmapy/utils/pytest_helpers/pytest_helpers.py
|
run_test
|
alfkoehn/PlasmaPy
| 429
|
python
|
@_process_input
def run_test(func, args: Any=(), kwargs: Dict=None, expected_outcome: Any=None, rtol: float=0.0, atol: float=0.0):
'\n Test that a function or class returns the expected result, raises\n the expected exception, or issues an expected warning for the\n supplied positional and keyword arguments.\n\n Parameters\n ----------\n func: callable, list, or tuple\n The `callable` to be tested. The first (and sole) argument to\n `~plasmapy.utils.run_test` may alternatively be a list or tuple\n containing these arguments (optionally omitting `kwargs` if the\n `len` returns 3).\n\n args: tuple or object\n The positional arguments to `func`.\n\n kwargs: dict\n The keyword arguments to `func`.\n\n expected_outcome: object\n The expected result, exception, or warning from\n `func(*args, **kwargs)`. This may also be a `tuple` of length\n two that contains the expected result as the first item and the\n expected warning as the second item.\n\n rtol : float\n The relative tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n atol : float\n The absolute tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n Returns\n -------\n `None`\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If the test returns a result that is different from the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.TypeMismatchFail\n If the actual result is of a different type than the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception occurs when no exception or a different\n exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingExceptionFail\n If no exception is raised when an exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingWarningFail\n An expected warning is not issued.\n\n ~astropy.units.UnitsError\n If the result has different units than expected.\n\n TypeError\n If the equality of the actual result and expected result cannot\n be determined (e.g., for a class lacking an `__eq__` method.\n\n Examples\n --------\n The simplest way to use `~plasmapy.utils.run_test` is with inputs\n for the function to be tests, the positional arguments in a `tuple`\n or `list`, the keyword arguments in a `dict`, and then finally the\n expected result or outcome.\n\n >>> args = tuple()\n >>> kwargs = dict()\n >>> run_test(lambda: 0, args, kwargs, 0)\n\n If `expected` is a an exception or warning, then\n `~plasmapy.utils.pytest_helpers.run_test` will raise an exception if\n the expected exception is not raised or the expected warning is not\n issued.\n\n >>> from warnings import warn\n\n >>> issue_warning = lambda: warn("Electrons are weird!", UserWarning)\n >>> run_test(issue_warning, args, kwargs, UserWarning)\n\n >>> def raise_exception(): raise RuntimeError\n >>> run_test(raise_exception, args, kwargs, RuntimeError)\n\n For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two\n items where the first item is the expected result and the second\n item is the expected warning.\n\n .. code-block:: python\n\n def return_arg_and_warn(x):\n warn(, UserWarning)\n return x\n\n run_test(return_arg_and_warn, 1, {}, (1, UserWarning))\n\n This function is also flexible enough that it can accept a `tuple`\n or `list` as its sole argument, with the arguments in the same\n order as in the function signature.\n\n >>> return_arg = lambda x: x\n >>> inputs = (return_arg, 42, {}, 42)\n >>> run_test(inputs)\n\n If the `tuple` or `list` has a length of `3`, then\n `~plasmapy.utils.run_test` assumes that `kwargs` is missing.\n\n >>> inputs_without_kwargs = [return_arg, 42, 42]\n >>> run_test(inputs_without_kwargs)\n\n .. code-block:: python\n\n import pytest\n\n def func(x, raise_exception=False, issue_warning=False):\n if raise_exception:\n raise ValueError("I\'m sorry, Dave. I\'m afraid I can\'t do that.")\n elif issue_warning:\n warn("Open the pod bay doors, HAL.", UserWarning)\n return x\n\n inputs_table = [\n (func, 1, 1),\n (func, (2,), {}, 2),\n (func, 3, {\'raise_exception\': True}, ValueError),\n (func, 4, {\'issue_warning\': True}, UserWarning),\n (func, 5, {\'issue_warning\': True}, (5, UserWarning)),\n ]\n\n @pytest.mark.parametrize(\'inputs\', inputs_table)\n def test_func(inputs):\n run_test(inputs)\n\n '
if (kwargs is None):
kwargs = {}
if (not isinstance(args, tuple)):
args = (args,)
if (not callable(func)):
raise InvalidTestError(f'The argument func = {func} to run_test must be callable.')
call_str = call_string(func, args, kwargs)
expected = collections.defaultdict((lambda : None))
if inspect.isclass(expected_outcome):
subclass_of_Exception = issubclass(expected_outcome, Exception)
subclass_of_Warning = issubclass(expected_outcome, Warning)
if subclass_of_Warning:
expected['warning'] = expected_outcome
elif (subclass_of_Exception and (not subclass_of_Warning)):
expected['exception'] = expected_outcome
if isinstance(expected_outcome, tuple):
length_not_two = (len(expected_outcome) != 2)
is_not_class = (not inspect.isclass(expected_outcome[1]))
is_not_warning = (True if is_not_class else (not issubclass(expected_outcome[1], Warning)))
if (length_not_two or is_not_warning):
raise InvalidTestError('Invalid expected outcome in run_test.')
expected['result'] = expected_outcome[0]
expected['warning'] = expected_outcome[1]
if ((expected['exception'] is None) and (expected['warning'] is None)):
expected['result'] = expected_outcome
if expected['exception']:
expected_exception = expected['exception']
try:
result = func(*args, **kwargs)
except expected_exception as exc_result:
resulting_exception = exc_result.__reduce__()[0]
if (resulting_exception.__name__ == expected_exception.__name__):
return None
else:
raise UnexpectedExceptionFail(f'The command {call_str} did not specifically raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(resulting_exception)} which is a subclass of the expected exception.')
except Exception as exc_unexpected_exception:
unexpected_exception = exc_unexpected_exception.__reduce__()[0]
raise UnexpectedExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(unexpected_exception)}.') from exc_unexpected_exception
else:
raise MissingExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead returned {_object_name(result)}.')
try:
with pytest.warns(expected['warning']):
result = func(*args, **kwargs)
except pytest.raises.Exception as missing_warning:
raise MissingWarningFail(f"The command {call_str} should issue {_name_with_article(expected['warning'])}, but instead returned {_object_name(result)}.") from missing_warning
except Exception as exception_no_warning:
raise UnexpectedExceptionFail(f"The command {call_str} unexpectedly raised {_name_with_article(exception_no_warning.__reduce__()[0])} instead of returning the expected value of {_object_name(expected['result'])}.") from exception_no_warning
if isinstance(expected['result'], u.UnitBase):
if isinstance(result, u.UnitBase):
if (result != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}.")
return None
if (not isinstance(result, (u.Quantity, const.Constant, const.EMConstant))):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of a quantity or constant with units of {_object_name(expected['result'])}.")
if (result.unit != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)}, which has units of {result.unit} instead of the expected units of {_object_name(expected['result'])}.")
return None
if isinstance(expected['result'], (u.Quantity, const.Constant, const.EMConstant)):
if (not (result.unit == expected['result'].unit)):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} which has different units than the expected result of {_object_name(expected['result'])}.")
if np.allclose(result.value, expected['result'].value):
return None
if (expected['result'] is None):
return None
if (type(result) != type(expected['result'])):
raise TypeMismatchFail(f"The command {call_str} returned {_object_name(result)} which has type {_object_name(type(result))}, instead of the expected value of {_object_name(expected['result'])} which has type {_object_name(type(expected['result']))}.")
try:
if (result == expected['result']):
return None
except Exception as exc_equality:
raise TypeError(f"The equality of {_object_name(result)} and {_object_name(expected['result'])} cannot be evaluated.") from exc_equality
try:
different_length = (len(result) != len(expected['result']))
except Exception:
different_length = False
try:
all_close = np.allclose(expected['result'], result, rtol=rtol, atol=atol)
if (all_close and (not different_length)):
return None
except Exception:
pass
errmsg = f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}."
if (atol or rtol):
errmsg += ' with '
if atol:
errmsg += f'atol = {atol}'
if (atol and rtol):
errmsg += ' and '
if rtol:
errmsg += f'rtol = {rtol}'
errmsg += '.'
raise UnexpectedResultFail(errmsg)
|
@_process_input
def run_test(func, args: Any=(), kwargs: Dict=None, expected_outcome: Any=None, rtol: float=0.0, atol: float=0.0):
'\n Test that a function or class returns the expected result, raises\n the expected exception, or issues an expected warning for the\n supplied positional and keyword arguments.\n\n Parameters\n ----------\n func: callable, list, or tuple\n The `callable` to be tested. The first (and sole) argument to\n `~plasmapy.utils.run_test` may alternatively be a list or tuple\n containing these arguments (optionally omitting `kwargs` if the\n `len` returns 3).\n\n args: tuple or object\n The positional arguments to `func`.\n\n kwargs: dict\n The keyword arguments to `func`.\n\n expected_outcome: object\n The expected result, exception, or warning from\n `func(*args, **kwargs)`. This may also be a `tuple` of length\n two that contains the expected result as the first item and the\n expected warning as the second item.\n\n rtol : float\n The relative tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n atol : float\n The absolute tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n Returns\n -------\n `None`\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If the test returns a result that is different from the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.TypeMismatchFail\n If the actual result is of a different type than the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception occurs when no exception or a different\n exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingExceptionFail\n If no exception is raised when an exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingWarningFail\n An expected warning is not issued.\n\n ~astropy.units.UnitsError\n If the result has different units than expected.\n\n TypeError\n If the equality of the actual result and expected result cannot\n be determined (e.g., for a class lacking an `__eq__` method.\n\n Examples\n --------\n The simplest way to use `~plasmapy.utils.run_test` is with inputs\n for the function to be tests, the positional arguments in a `tuple`\n or `list`, the keyword arguments in a `dict`, and then finally the\n expected result or outcome.\n\n >>> args = tuple()\n >>> kwargs = dict()\n >>> run_test(lambda: 0, args, kwargs, 0)\n\n If `expected` is a an exception or warning, then\n `~plasmapy.utils.pytest_helpers.run_test` will raise an exception if\n the expected exception is not raised or the expected warning is not\n issued.\n\n >>> from warnings import warn\n\n >>> issue_warning = lambda: warn("Electrons are weird!", UserWarning)\n >>> run_test(issue_warning, args, kwargs, UserWarning)\n\n >>> def raise_exception(): raise RuntimeError\n >>> run_test(raise_exception, args, kwargs, RuntimeError)\n\n For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two\n items where the first item is the expected result and the second\n item is the expected warning.\n\n .. code-block:: python\n\n def return_arg_and_warn(x):\n warn(, UserWarning)\n return x\n\n run_test(return_arg_and_warn, 1, {}, (1, UserWarning))\n\n This function is also flexible enough that it can accept a `tuple`\n or `list` as its sole argument, with the arguments in the same\n order as in the function signature.\n\n >>> return_arg = lambda x: x\n >>> inputs = (return_arg, 42, {}, 42)\n >>> run_test(inputs)\n\n If the `tuple` or `list` has a length of `3`, then\n `~plasmapy.utils.run_test` assumes that `kwargs` is missing.\n\n >>> inputs_without_kwargs = [return_arg, 42, 42]\n >>> run_test(inputs_without_kwargs)\n\n .. code-block:: python\n\n import pytest\n\n def func(x, raise_exception=False, issue_warning=False):\n if raise_exception:\n raise ValueError("I\'m sorry, Dave. I\'m afraid I can\'t do that.")\n elif issue_warning:\n warn("Open the pod bay doors, HAL.", UserWarning)\n return x\n\n inputs_table = [\n (func, 1, 1),\n (func, (2,), {}, 2),\n (func, 3, {\'raise_exception\': True}, ValueError),\n (func, 4, {\'issue_warning\': True}, UserWarning),\n (func, 5, {\'issue_warning\': True}, (5, UserWarning)),\n ]\n\n @pytest.mark.parametrize(\'inputs\', inputs_table)\n def test_func(inputs):\n run_test(inputs)\n\n '
if (kwargs is None):
kwargs = {}
if (not isinstance(args, tuple)):
args = (args,)
if (not callable(func)):
raise InvalidTestError(f'The argument func = {func} to run_test must be callable.')
call_str = call_string(func, args, kwargs)
expected = collections.defaultdict((lambda : None))
if inspect.isclass(expected_outcome):
subclass_of_Exception = issubclass(expected_outcome, Exception)
subclass_of_Warning = issubclass(expected_outcome, Warning)
if subclass_of_Warning:
expected['warning'] = expected_outcome
elif (subclass_of_Exception and (not subclass_of_Warning)):
expected['exception'] = expected_outcome
if isinstance(expected_outcome, tuple):
length_not_two = (len(expected_outcome) != 2)
is_not_class = (not inspect.isclass(expected_outcome[1]))
is_not_warning = (True if is_not_class else (not issubclass(expected_outcome[1], Warning)))
if (length_not_two or is_not_warning):
raise InvalidTestError('Invalid expected outcome in run_test.')
expected['result'] = expected_outcome[0]
expected['warning'] = expected_outcome[1]
if ((expected['exception'] is None) and (expected['warning'] is None)):
expected['result'] = expected_outcome
if expected['exception']:
expected_exception = expected['exception']
try:
result = func(*args, **kwargs)
except expected_exception as exc_result:
resulting_exception = exc_result.__reduce__()[0]
if (resulting_exception.__name__ == expected_exception.__name__):
return None
else:
raise UnexpectedExceptionFail(f'The command {call_str} did not specifically raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(resulting_exception)} which is a subclass of the expected exception.')
except Exception as exc_unexpected_exception:
unexpected_exception = exc_unexpected_exception.__reduce__()[0]
raise UnexpectedExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead raised {_name_with_article(unexpected_exception)}.') from exc_unexpected_exception
else:
raise MissingExceptionFail(f'The command {call_str} did not raise {_name_with_article(expected_exception)} as expected, but instead returned {_object_name(result)}.')
try:
with pytest.warns(expected['warning']):
result = func(*args, **kwargs)
except pytest.raises.Exception as missing_warning:
raise MissingWarningFail(f"The command {call_str} should issue {_name_with_article(expected['warning'])}, but instead returned {_object_name(result)}.") from missing_warning
except Exception as exception_no_warning:
raise UnexpectedExceptionFail(f"The command {call_str} unexpectedly raised {_name_with_article(exception_no_warning.__reduce__()[0])} instead of returning the expected value of {_object_name(expected['result'])}.") from exception_no_warning
if isinstance(expected['result'], u.UnitBase):
if isinstance(result, u.UnitBase):
if (result != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}.")
return None
if (not isinstance(result, (u.Quantity, const.Constant, const.EMConstant))):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} instead of a quantity or constant with units of {_object_name(expected['result'])}.")
if (result.unit != expected['result']):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)}, which has units of {result.unit} instead of the expected units of {_object_name(expected['result'])}.")
return None
if isinstance(expected['result'], (u.Quantity, const.Constant, const.EMConstant)):
if (not (result.unit == expected['result'].unit)):
raise u.UnitsError(f"The command {call_str} returned {_object_name(result)} which has different units than the expected result of {_object_name(expected['result'])}.")
if np.allclose(result.value, expected['result'].value):
return None
if (expected['result'] is None):
return None
if (type(result) != type(expected['result'])):
raise TypeMismatchFail(f"The command {call_str} returned {_object_name(result)} which has type {_object_name(type(result))}, instead of the expected value of {_object_name(expected['result'])} which has type {_object_name(type(expected['result']))}.")
try:
if (result == expected['result']):
return None
except Exception as exc_equality:
raise TypeError(f"The equality of {_object_name(result)} and {_object_name(expected['result'])} cannot be evaluated.") from exc_equality
try:
different_length = (len(result) != len(expected['result']))
except Exception:
different_length = False
try:
all_close = np.allclose(expected['result'], result, rtol=rtol, atol=atol)
if (all_close and (not different_length)):
return None
except Exception:
pass
errmsg = f"The command {call_str} returned {_object_name(result)} instead of the expected value of {_object_name(expected['result'])}."
if (atol or rtol):
errmsg += ' with '
if atol:
errmsg += f'atol = {atol}'
if (atol and rtol):
errmsg += ' and '
if rtol:
errmsg += f'rtol = {rtol}'
errmsg += '.'
raise UnexpectedResultFail(errmsg)<|docstring|>Test that a function or class returns the expected result, raises
the expected exception, or issues an expected warning for the
supplied positional and keyword arguments.
Parameters
----------
func: callable, list, or tuple
The `callable` to be tested. The first (and sole) argument to
`~plasmapy.utils.run_test` may alternatively be a list or tuple
containing these arguments (optionally omitting `kwargs` if the
`len` returns 3).
args: tuple or object
The positional arguments to `func`.
kwargs: dict
The keyword arguments to `func`.
expected_outcome: object
The expected result, exception, or warning from
`func(*args, **kwargs)`. This may also be a `tuple` of length
two that contains the expected result as the first item and the
expected warning as the second item.
rtol : float
The relative tolerance to be used by `~numpy.allclose` in an
element-wise comparison, defaulting to `0`.
atol : float
The absolute tolerance to be used by `~numpy.allclose` in an
element-wise comparison, defaulting to `0`.
Returns
-------
`None`
Raises
------
~plasmapy.tests.helpers.exceptions.UnexpectedResultFail
If the test returns a result that is different from the expected
result.
~plasmapy.tests.helpers.exceptions.TypeMismatchFail
If the actual result is of a different type than the expected
result.
~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail
If an exception occurs when no exception or a different
exception is expected.
~plasmapy.tests.helpers.exceptions.MissingExceptionFail
If no exception is raised when an exception is expected.
~plasmapy.tests.helpers.exceptions.MissingWarningFail
An expected warning is not issued.
~astropy.units.UnitsError
If the result has different units than expected.
TypeError
If the equality of the actual result and expected result cannot
be determined (e.g., for a class lacking an `__eq__` method.
Examples
--------
The simplest way to use `~plasmapy.utils.run_test` is with inputs
for the function to be tests, the positional arguments in a `tuple`
or `list`, the keyword arguments in a `dict`, and then finally the
expected result or outcome.
>>> args = tuple()
>>> kwargs = dict()
>>> run_test(lambda: 0, args, kwargs, 0)
If `expected` is a an exception or warning, then
`~plasmapy.utils.pytest_helpers.run_test` will raise an exception if
the expected exception is not raised or the expected warning is not
issued.
>>> from warnings import warn
>>> issue_warning = lambda: warn("Electrons are weird!", UserWarning)
>>> run_test(issue_warning, args, kwargs, UserWarning)
>>> def raise_exception(): raise RuntimeError
>>> run_test(raise_exception, args, kwargs, RuntimeError)
For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two
items where the first item is the expected result and the second
item is the expected warning.
.. code-block:: python
def return_arg_and_warn(x):
warn("", UserWarning)
return x
run_test(return_arg_and_warn, 1, {}, (1, UserWarning))
This function is also flexible enough that it can accept a `tuple`
or `list` as its sole argument, with the arguments in the same
order as in the function signature.
>>> return_arg = lambda x: x
>>> inputs = (return_arg, 42, {}, 42)
>>> run_test(inputs)
If the `tuple` or `list` has a length of `3`, then
`~plasmapy.utils.run_test` assumes that `kwargs` is missing.
>>> inputs_without_kwargs = [return_arg, 42, 42]
>>> run_test(inputs_without_kwargs)
.. code-block:: python
import pytest
def func(x, raise_exception=False, issue_warning=False):
if raise_exception:
raise ValueError("I'm sorry, Dave. I'm afraid I can't do that.")
elif issue_warning:
warn("Open the pod bay doors, HAL.", UserWarning)
return x
inputs_table = [
(func, 1, 1),
(func, (2,), {}, 2),
(func, 3, {'raise_exception': True}, ValueError),
(func, 4, {'issue_warning': True}, UserWarning),
(func, 5, {'issue_warning': True}, (5, UserWarning)),
]
@pytest.mark.parametrize('inputs', inputs_table)
def test_func(inputs):
run_test(inputs)<|endoftext|>
|
9354b703291bde67fe517e6971e5a281dceb0f3a632539178a281643639e908b
|
def run_test_equivalent_calls(*test_inputs, require_same_type: bool=True):
"\n Test that different functions/inputs return equivalent results.\n\n Parameters\n ----------\n test_inputs\n The functions and inputs to the tests in an allowed format, as\n described below.\n\n require_same_type: bool\n If `True` (the default), then all of the results are required to\n be of the same type. If `False`, results do not need to be of\n the same type (e.g., cases like `1.0 == 1` will not raise an\n exception).\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If not all of the results are equivalent, or not all of the\n results are of the same type and `require_same_type` evaluates\n to `True`.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception is raised whilst attempting to run one of the\n test cases.\n\n ~plasmapy.tests.helpers.exceptions.InvalidTestError\n If there is an error associated with the inputs or the test is\n set up incorrectly.\n\n Examples\n --------\n There are several possible formats that can be accepted by this\n `~plasmapy.utils.run_test_equivalent_calls` to test that different\n combinations of functions (or other `callable` objects), positional\n arguments, and keyword arguments return equivalent results.\n\n To test a single function that takes a single positional argument,\n then `test_inputs` may be the function followed by an arbitrary\n number of positional arguments to be included into the function.\n\n >>> def f(x): return x ** 2\n >>> run_test_equivalent_calls(f, -1, 1)\n\n To test a single function with an arbitrary number of positional and\n keyword arguments, the first argument should be the function,\n followed by an arbitrary number of `tuple` or `list` objects that\n contain a `tuple` or `list` containing the positional arguments, and\n a `dict` containing the keyword arguments.\n\n >>> def g(x, y, z): return x + y + z\n >>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))\n\n If there is only one positional argument, then it is not necessary\n to include it in a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))\n >>> run_test_equivalent_calls(f, (1, {}), (1, {}))\n\n To test multiple functions with an arbitrary number of positional\n and keyword arguments, use a series of `tuple` or `list` objects\n that contain the function for each test, a `tuple` or `list` with\n the positional arguments, and a `dict` with the keyword arguments.\n\n >>> def p(x, y=None): return x + y if y else x\n >>> def q(x, y=None): return x + 1 if y else x\n\n >>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])\n\n The inputs may also be passed in as a whole as a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, -1, 1)\n >>> run_test_equivalent_calls([f, -1, 1])\n\n If `require_same_type` is `False`, then an exception will not be\n raised if the results are of different types.\n\n >>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)\n\n "
if (len(test_inputs) == 1):
test_inputs = test_inputs[0]
if (not isinstance(test_inputs, (tuple, list))):
raise InvalidTestError(f'The argument to run_test_equivalent_calls must be a tuple or list. The provided inputs are: {test_inputs}')
if callable(test_inputs[0]):
func = test_inputs[0]
test_inputs = test_inputs[1:]
else:
func = None
test_inputs = [(test_input if isinstance(test_input, (list, tuple)) else [test_input]) for test_input in test_inputs]
test_cases = []
for inputs in test_inputs:
test_case = {}
test_case['function'] = (func if func else inputs[0])
test_case['args'] = (inputs[0] if func else inputs[1])
if (not isinstance(test_case['args'], (list, tuple))):
test_case['args'] = [test_case['args']]
if func:
test_case['kwargs'] = (inputs[1] if (len(inputs) == 2) else {})
else:
test_case['kwargs'] = (inputs[2] if (len(inputs) == 3) else {})
try:
test_case['call string'] = call_string(test_case['function'], test_case['args'], test_case['kwargs'])
except Exception:
test_case['call string'] = f"function = {test_case['function']}, args = {test_case['args']}, and kwargs = {test_case['kwargs']}"
test_cases.append(test_case)
if (len(test_cases) < 2):
raise InvalidTestError('At least two tests are needed for run_test_equivalent_calls')
bad_inputs_errmsg = ''
for test_case in test_cases:
if (not callable(test_case['function'])):
bad_inputs_errmsg += f'''
{test_case['function']} is not callable '''
if (not isinstance(test_case['args'], (tuple, list))):
bad_inputs_errmsg += f'''
{test_case['args']} is not a list or tuple '''
if (not isinstance(test_case['kwargs'], dict)):
bad_inputs_errmsg += f'''
{test_case['kwargs']} is not a dict '''
if bad_inputs_errmsg:
raise InvalidTestError(bad_inputs_errmsg)
for test_case in test_cases:
try:
(f, args, kwargs) = (test_case['function'], test_case['args'], test_case['kwargs'])
test_case['result'] = f(*args, **kwargs)
test_case['type'] = type(test_case['result'])
except Exception as exc:
raise UnexpectedExceptionFail(f"Unable to evaluate {test_case['call string']}.")
results = [test_case['result'] for test_case in test_cases]
types = [test_case['type'] for test_case in test_cases]
try:
equals_first_result = [(result == results[0]) for result in results]
except Exception as exc:
raise UnexpectedExceptionFail('Unable to determine equality properties of results.') from exc
equals_first_type = [(result_type == types[0]) for result_type in types]
not_all_equal = (not all(equals_first_result))
not_all_same_type = (not all(equals_first_type))
if not_all_equal:
errmsg = 'The following tests did not all produce identical results:'
elif (not_all_same_type and require_same_type):
errmsg = 'The following tests did not all produce results of the same type:'
if (not_all_equal or (not_all_same_type and require_same_type)):
for test_case in test_cases:
errmsg += f'''
{test_case['call string']} yielded {test_case['result']} of type {test_case['type']}'''
raise UnexpectedResultFail(errmsg)
|
Test that different functions/inputs return equivalent results.
Parameters
----------
test_inputs
The functions and inputs to the tests in an allowed format, as
described below.
require_same_type: bool
If `True` (the default), then all of the results are required to
be of the same type. If `False`, results do not need to be of
the same type (e.g., cases like `1.0 == 1` will not raise an
exception).
Raises
------
~plasmapy.tests.helpers.exceptions.UnexpectedResultFail
If not all of the results are equivalent, or not all of the
results are of the same type and `require_same_type` evaluates
to `True`.
~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail
If an exception is raised whilst attempting to run one of the
test cases.
~plasmapy.tests.helpers.exceptions.InvalidTestError
If there is an error associated with the inputs or the test is
set up incorrectly.
Examples
--------
There are several possible formats that can be accepted by this
`~plasmapy.utils.run_test_equivalent_calls` to test that different
combinations of functions (or other `callable` objects), positional
arguments, and keyword arguments return equivalent results.
To test a single function that takes a single positional argument,
then `test_inputs` may be the function followed by an arbitrary
number of positional arguments to be included into the function.
>>> def f(x): return x ** 2
>>> run_test_equivalent_calls(f, -1, 1)
To test a single function with an arbitrary number of positional and
keyword arguments, the first argument should be the function,
followed by an arbitrary number of `tuple` or `list` objects that
contain a `tuple` or `list` containing the positional arguments, and
a `dict` containing the keyword arguments.
>>> def g(x, y, z): return x + y + z
>>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))
If there is only one positional argument, then it is not necessary
to include it in a `tuple` or `list`.
>>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))
>>> run_test_equivalent_calls(f, (1, {}), (1, {}))
To test multiple functions with an arbitrary number of positional
and keyword arguments, use a series of `tuple` or `list` objects
that contain the function for each test, a `tuple` or `list` with
the positional arguments, and a `dict` with the keyword arguments.
>>> def p(x, y=None): return x + y if y else x
>>> def q(x, y=None): return x + 1 if y else x
>>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])
The inputs may also be passed in as a whole as a `tuple` or `list`.
>>> run_test_equivalent_calls(f, -1, 1)
>>> run_test_equivalent_calls([f, -1, 1])
If `require_same_type` is `False`, then an exception will not be
raised if the results are of different types.
>>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)
|
plasmapy/utils/pytest_helpers/pytest_helpers.py
|
run_test_equivalent_calls
|
alfkoehn/PlasmaPy
| 429
|
python
|
def run_test_equivalent_calls(*test_inputs, require_same_type: bool=True):
"\n Test that different functions/inputs return equivalent results.\n\n Parameters\n ----------\n test_inputs\n The functions and inputs to the tests in an allowed format, as\n described below.\n\n require_same_type: bool\n If `True` (the default), then all of the results are required to\n be of the same type. If `False`, results do not need to be of\n the same type (e.g., cases like `1.0 == 1` will not raise an\n exception).\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If not all of the results are equivalent, or not all of the\n results are of the same type and `require_same_type` evaluates\n to `True`.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception is raised whilst attempting to run one of the\n test cases.\n\n ~plasmapy.tests.helpers.exceptions.InvalidTestError\n If there is an error associated with the inputs or the test is\n set up incorrectly.\n\n Examples\n --------\n There are several possible formats that can be accepted by this\n `~plasmapy.utils.run_test_equivalent_calls` to test that different\n combinations of functions (or other `callable` objects), positional\n arguments, and keyword arguments return equivalent results.\n\n To test a single function that takes a single positional argument,\n then `test_inputs` may be the function followed by an arbitrary\n number of positional arguments to be included into the function.\n\n >>> def f(x): return x ** 2\n >>> run_test_equivalent_calls(f, -1, 1)\n\n To test a single function with an arbitrary number of positional and\n keyword arguments, the first argument should be the function,\n followed by an arbitrary number of `tuple` or `list` objects that\n contain a `tuple` or `list` containing the positional arguments, and\n a `dict` containing the keyword arguments.\n\n >>> def g(x, y, z): return x + y + z\n >>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))\n\n If there is only one positional argument, then it is not necessary\n to include it in a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))\n >>> run_test_equivalent_calls(f, (1, {}), (1, {}))\n\n To test multiple functions with an arbitrary number of positional\n and keyword arguments, use a series of `tuple` or `list` objects\n that contain the function for each test, a `tuple` or `list` with\n the positional arguments, and a `dict` with the keyword arguments.\n\n >>> def p(x, y=None): return x + y if y else x\n >>> def q(x, y=None): return x + 1 if y else x\n\n >>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])\n\n The inputs may also be passed in as a whole as a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, -1, 1)\n >>> run_test_equivalent_calls([f, -1, 1])\n\n If `require_same_type` is `False`, then an exception will not be\n raised if the results are of different types.\n\n >>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)\n\n "
if (len(test_inputs) == 1):
test_inputs = test_inputs[0]
if (not isinstance(test_inputs, (tuple, list))):
raise InvalidTestError(f'The argument to run_test_equivalent_calls must be a tuple or list. The provided inputs are: {test_inputs}')
if callable(test_inputs[0]):
func = test_inputs[0]
test_inputs = test_inputs[1:]
else:
func = None
test_inputs = [(test_input if isinstance(test_input, (list, tuple)) else [test_input]) for test_input in test_inputs]
test_cases = []
for inputs in test_inputs:
test_case = {}
test_case['function'] = (func if func else inputs[0])
test_case['args'] = (inputs[0] if func else inputs[1])
if (not isinstance(test_case['args'], (list, tuple))):
test_case['args'] = [test_case['args']]
if func:
test_case['kwargs'] = (inputs[1] if (len(inputs) == 2) else {})
else:
test_case['kwargs'] = (inputs[2] if (len(inputs) == 3) else {})
try:
test_case['call string'] = call_string(test_case['function'], test_case['args'], test_case['kwargs'])
except Exception:
test_case['call string'] = f"function = {test_case['function']}, args = {test_case['args']}, and kwargs = {test_case['kwargs']}"
test_cases.append(test_case)
if (len(test_cases) < 2):
raise InvalidTestError('At least two tests are needed for run_test_equivalent_calls')
bad_inputs_errmsg =
for test_case in test_cases:
if (not callable(test_case['function'])):
bad_inputs_errmsg += f'
{test_case['function']} is not callable '
if (not isinstance(test_case['args'], (tuple, list))):
bad_inputs_errmsg += f'
{test_case['args']} is not a list or tuple '
if (not isinstance(test_case['kwargs'], dict)):
bad_inputs_errmsg += f'
{test_case['kwargs']} is not a dict '
if bad_inputs_errmsg:
raise InvalidTestError(bad_inputs_errmsg)
for test_case in test_cases:
try:
(f, args, kwargs) = (test_case['function'], test_case['args'], test_case['kwargs'])
test_case['result'] = f(*args, **kwargs)
test_case['type'] = type(test_case['result'])
except Exception as exc:
raise UnexpectedExceptionFail(f"Unable to evaluate {test_case['call string']}.")
results = [test_case['result'] for test_case in test_cases]
types = [test_case['type'] for test_case in test_cases]
try:
equals_first_result = [(result == results[0]) for result in results]
except Exception as exc:
raise UnexpectedExceptionFail('Unable to determine equality properties of results.') from exc
equals_first_type = [(result_type == types[0]) for result_type in types]
not_all_equal = (not all(equals_first_result))
not_all_same_type = (not all(equals_first_type))
if not_all_equal:
errmsg = 'The following tests did not all produce identical results:'
elif (not_all_same_type and require_same_type):
errmsg = 'The following tests did not all produce results of the same type:'
if (not_all_equal or (not_all_same_type and require_same_type)):
for test_case in test_cases:
errmsg += f'
{test_case['call string']} yielded {test_case['result']} of type {test_case['type']}'
raise UnexpectedResultFail(errmsg)
|
def run_test_equivalent_calls(*test_inputs, require_same_type: bool=True):
"\n Test that different functions/inputs return equivalent results.\n\n Parameters\n ----------\n test_inputs\n The functions and inputs to the tests in an allowed format, as\n described below.\n\n require_same_type: bool\n If `True` (the default), then all of the results are required to\n be of the same type. If `False`, results do not need to be of\n the same type (e.g., cases like `1.0 == 1` will not raise an\n exception).\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If not all of the results are equivalent, or not all of the\n results are of the same type and `require_same_type` evaluates\n to `True`.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception is raised whilst attempting to run one of the\n test cases.\n\n ~plasmapy.tests.helpers.exceptions.InvalidTestError\n If there is an error associated with the inputs or the test is\n set up incorrectly.\n\n Examples\n --------\n There are several possible formats that can be accepted by this\n `~plasmapy.utils.run_test_equivalent_calls` to test that different\n combinations of functions (or other `callable` objects), positional\n arguments, and keyword arguments return equivalent results.\n\n To test a single function that takes a single positional argument,\n then `test_inputs` may be the function followed by an arbitrary\n number of positional arguments to be included into the function.\n\n >>> def f(x): return x ** 2\n >>> run_test_equivalent_calls(f, -1, 1)\n\n To test a single function with an arbitrary number of positional and\n keyword arguments, the first argument should be the function,\n followed by an arbitrary number of `tuple` or `list` objects that\n contain a `tuple` or `list` containing the positional arguments, and\n a `dict` containing the keyword arguments.\n\n >>> def g(x, y, z): return x + y + z\n >>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))\n\n If there is only one positional argument, then it is not necessary\n to include it in a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))\n >>> run_test_equivalent_calls(f, (1, {}), (1, {}))\n\n To test multiple functions with an arbitrary number of positional\n and keyword arguments, use a series of `tuple` or `list` objects\n that contain the function for each test, a `tuple` or `list` with\n the positional arguments, and a `dict` with the keyword arguments.\n\n >>> def p(x, y=None): return x + y if y else x\n >>> def q(x, y=None): return x + 1 if y else x\n\n >>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])\n\n The inputs may also be passed in as a whole as a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, -1, 1)\n >>> run_test_equivalent_calls([f, -1, 1])\n\n If `require_same_type` is `False`, then an exception will not be\n raised if the results are of different types.\n\n >>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)\n\n "
if (len(test_inputs) == 1):
test_inputs = test_inputs[0]
if (not isinstance(test_inputs, (tuple, list))):
raise InvalidTestError(f'The argument to run_test_equivalent_calls must be a tuple or list. The provided inputs are: {test_inputs}')
if callable(test_inputs[0]):
func = test_inputs[0]
test_inputs = test_inputs[1:]
else:
func = None
test_inputs = [(test_input if isinstance(test_input, (list, tuple)) else [test_input]) for test_input in test_inputs]
test_cases = []
for inputs in test_inputs:
test_case = {}
test_case['function'] = (func if func else inputs[0])
test_case['args'] = (inputs[0] if func else inputs[1])
if (not isinstance(test_case['args'], (list, tuple))):
test_case['args'] = [test_case['args']]
if func:
test_case['kwargs'] = (inputs[1] if (len(inputs) == 2) else {})
else:
test_case['kwargs'] = (inputs[2] if (len(inputs) == 3) else {})
try:
test_case['call string'] = call_string(test_case['function'], test_case['args'], test_case['kwargs'])
except Exception:
test_case['call string'] = f"function = {test_case['function']}, args = {test_case['args']}, and kwargs = {test_case['kwargs']}"
test_cases.append(test_case)
if (len(test_cases) < 2):
raise InvalidTestError('At least two tests are needed for run_test_equivalent_calls')
bad_inputs_errmsg =
for test_case in test_cases:
if (not callable(test_case['function'])):
bad_inputs_errmsg += f'
{test_case['function']} is not callable '
if (not isinstance(test_case['args'], (tuple, list))):
bad_inputs_errmsg += f'
{test_case['args']} is not a list or tuple '
if (not isinstance(test_case['kwargs'], dict)):
bad_inputs_errmsg += f'
{test_case['kwargs']} is not a dict '
if bad_inputs_errmsg:
raise InvalidTestError(bad_inputs_errmsg)
for test_case in test_cases:
try:
(f, args, kwargs) = (test_case['function'], test_case['args'], test_case['kwargs'])
test_case['result'] = f(*args, **kwargs)
test_case['type'] = type(test_case['result'])
except Exception as exc:
raise UnexpectedExceptionFail(f"Unable to evaluate {test_case['call string']}.")
results = [test_case['result'] for test_case in test_cases]
types = [test_case['type'] for test_case in test_cases]
try:
equals_first_result = [(result == results[0]) for result in results]
except Exception as exc:
raise UnexpectedExceptionFail('Unable to determine equality properties of results.') from exc
equals_first_type = [(result_type == types[0]) for result_type in types]
not_all_equal = (not all(equals_first_result))
not_all_same_type = (not all(equals_first_type))
if not_all_equal:
errmsg = 'The following tests did not all produce identical results:'
elif (not_all_same_type and require_same_type):
errmsg = 'The following tests did not all produce results of the same type:'
if (not_all_equal or (not_all_same_type and require_same_type)):
for test_case in test_cases:
errmsg += f'
{test_case['call string']} yielded {test_case['result']} of type {test_case['type']}'
raise UnexpectedResultFail(errmsg)<|docstring|>Test that different functions/inputs return equivalent results.
Parameters
----------
test_inputs
The functions and inputs to the tests in an allowed format, as
described below.
require_same_type: bool
If `True` (the default), then all of the results are required to
be of the same type. If `False`, results do not need to be of
the same type (e.g., cases like `1.0 == 1` will not raise an
exception).
Raises
------
~plasmapy.tests.helpers.exceptions.UnexpectedResultFail
If not all of the results are equivalent, or not all of the
results are of the same type and `require_same_type` evaluates
to `True`.
~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail
If an exception is raised whilst attempting to run one of the
test cases.
~plasmapy.tests.helpers.exceptions.InvalidTestError
If there is an error associated with the inputs or the test is
set up incorrectly.
Examples
--------
There are several possible formats that can be accepted by this
`~plasmapy.utils.run_test_equivalent_calls` to test that different
combinations of functions (or other `callable` objects), positional
arguments, and keyword arguments return equivalent results.
To test a single function that takes a single positional argument,
then `test_inputs` may be the function followed by an arbitrary
number of positional arguments to be included into the function.
>>> def f(x): return x ** 2
>>> run_test_equivalent_calls(f, -1, 1)
To test a single function with an arbitrary number of positional and
keyword arguments, the first argument should be the function,
followed by an arbitrary number of `tuple` or `list` objects that
contain a `tuple` or `list` containing the positional arguments, and
a `dict` containing the keyword arguments.
>>> def g(x, y, z): return x + y + z
>>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))
If there is only one positional argument, then it is not necessary
to include it in a `tuple` or `list`.
>>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))
>>> run_test_equivalent_calls(f, (1, {}), (1, {}))
To test multiple functions with an arbitrary number of positional
and keyword arguments, use a series of `tuple` or `list` objects
that contain the function for each test, a `tuple` or `list` with
the positional arguments, and a `dict` with the keyword arguments.
>>> def p(x, y=None): return x + y if y else x
>>> def q(x, y=None): return x + 1 if y else x
>>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])
The inputs may also be passed in as a whole as a `tuple` or `list`.
>>> run_test_equivalent_calls(f, -1, 1)
>>> run_test_equivalent_calls([f, -1, 1])
If `require_same_type` is `False`, then an exception will not be
raised if the results are of different types.
>>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)<|endoftext|>
|
bcd9f396fb4f2e41978b8c91c15656f5f224db57d0cfef1e9a659eba32af07c2
|
def assert_can_handle_nparray(function_to_test, insert_some_nans=None, insert_all_nans=None, kwargs=None):
'\n Test for ability to handle numpy array quantities.\n\n Parameters\n ----------\n function_to_test\n The function to be tested for ability to handle numpy array quantities.\n Arguments are automatically given a vector input based on their\n variable name. Current args that are interpreted as vectors are:\n `["T", "T_i", "T_e", "temperature"]`\n `["n", "n_i", "n_e", "density"]`\n `["B"]`\n `["V", "Vperp"]`\n `["coulomb_log"]`\n `["characteristic_length"]`\n\n insert_some_nans: `list`\n List of argument names in which to insert some np.nan values.\n These must be arguments that will be tested as vectors as listed\n above.\n\n insert_all_nans: `list`\n List of argument names to fill entirely with np.nan values.\n\n kwargs: `dict`\n Arguments to pass directly to the function in under test, in the\n normal kwargs python dictionary format.\n\n Raises\n ------\n ValueError\n If this function cannot interpret a parameter of function_to_test.\n\n Examples\n --------\n >>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency\n >>> assert_can_handle_nparray(Alfven_speed)\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})\n '
if (insert_some_nans is None):
insert_some_nans = []
if (insert_all_nans is None):
insert_all_nans = []
if (kwargs is None):
kwargs = {}
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)
function_sig = inspect.signature(function_to_test)
function_params = function_sig.parameters
args_0d = dict()
args_1d = dict()
args_2d = dict()
args_3d = dict()
param_names = [elm for elm in function_params.keys()]
for (idx, key) in enumerate(function_params):
(args_0d[key], args_1d[key], args_2d[key], args_3d[key]) = _prepare_input(param_names[idx], function_params[key].default, insert_some_nans, insert_all_nans, kwargs)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=PlasmaPyWarning)
result_0d = function_to_test(**args_0d)
result_1d = function_to_test(**args_1d)
result_2d = function_to_test(**args_2d)
result_3d = function_to_test(**args_3d)
try:
scalar_testable = result_0d.value
except AttributeError:
scalar_testable = result_0d
if np.isscalar(scalar_testable):
astrohelper.assert_quantity_allclose(result_0d, result_1d[3])
astrohelper.assert_quantity_allclose(result_0d, result_2d[(1, 1)])
astrohelper.assert_quantity_allclose(result_0d, result_3d[(0, 1, 1)])
else:
for (idx, res_0d) in enumerate(result_0d):
astrohelper.assert_quantity_allclose(res_0d, result_1d[idx][3])
astrohelper.assert_quantity_allclose(res_0d, result_2d[idx][(1, 1)])
astrohelper.assert_quantity_allclose(res_0d, result_3d[idx][(0, 1, 1)])
|
Test for ability to handle numpy array quantities.
Parameters
----------
function_to_test
The function to be tested for ability to handle numpy array quantities.
Arguments are automatically given a vector input based on their
variable name. Current args that are interpreted as vectors are:
`["T", "T_i", "T_e", "temperature"]`
`["n", "n_i", "n_e", "density"]`
`["B"]`
`["V", "Vperp"]`
`["coulomb_log"]`
`["characteristic_length"]`
insert_some_nans: `list`
List of argument names in which to insert some np.nan values.
These must be arguments that will be tested as vectors as listed
above.
insert_all_nans: `list`
List of argument names to fill entirely with np.nan values.
kwargs: `dict`
Arguments to pass directly to the function in under test, in the
normal kwargs python dictionary format.
Raises
------
ValueError
If this function cannot interpret a parameter of function_to_test.
Examples
--------
>>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency
>>> assert_can_handle_nparray(Alfven_speed)
>>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})
>>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})
|
plasmapy/utils/pytest_helpers/pytest_helpers.py
|
assert_can_handle_nparray
|
alfkoehn/PlasmaPy
| 429
|
python
|
def assert_can_handle_nparray(function_to_test, insert_some_nans=None, insert_all_nans=None, kwargs=None):
'\n Test for ability to handle numpy array quantities.\n\n Parameters\n ----------\n function_to_test\n The function to be tested for ability to handle numpy array quantities.\n Arguments are automatically given a vector input based on their\n variable name. Current args that are interpreted as vectors are:\n `["T", "T_i", "T_e", "temperature"]`\n `["n", "n_i", "n_e", "density"]`\n `["B"]`\n `["V", "Vperp"]`\n `["coulomb_log"]`\n `["characteristic_length"]`\n\n insert_some_nans: `list`\n List of argument names in which to insert some np.nan values.\n These must be arguments that will be tested as vectors as listed\n above.\n\n insert_all_nans: `list`\n List of argument names to fill entirely with np.nan values.\n\n kwargs: `dict`\n Arguments to pass directly to the function in under test, in the\n normal kwargs python dictionary format.\n\n Raises\n ------\n ValueError\n If this function cannot interpret a parameter of function_to_test.\n\n Examples\n --------\n >>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency\n >>> assert_can_handle_nparray(Alfven_speed)\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})\n '
if (insert_some_nans is None):
insert_some_nans = []
if (insert_all_nans is None):
insert_all_nans = []
if (kwargs is None):
kwargs = {}
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)
function_sig = inspect.signature(function_to_test)
function_params = function_sig.parameters
args_0d = dict()
args_1d = dict()
args_2d = dict()
args_3d = dict()
param_names = [elm for elm in function_params.keys()]
for (idx, key) in enumerate(function_params):
(args_0d[key], args_1d[key], args_2d[key], args_3d[key]) = _prepare_input(param_names[idx], function_params[key].default, insert_some_nans, insert_all_nans, kwargs)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=PlasmaPyWarning)
result_0d = function_to_test(**args_0d)
result_1d = function_to_test(**args_1d)
result_2d = function_to_test(**args_2d)
result_3d = function_to_test(**args_3d)
try:
scalar_testable = result_0d.value
except AttributeError:
scalar_testable = result_0d
if np.isscalar(scalar_testable):
astrohelper.assert_quantity_allclose(result_0d, result_1d[3])
astrohelper.assert_quantity_allclose(result_0d, result_2d[(1, 1)])
astrohelper.assert_quantity_allclose(result_0d, result_3d[(0, 1, 1)])
else:
for (idx, res_0d) in enumerate(result_0d):
astrohelper.assert_quantity_allclose(res_0d, result_1d[idx][3])
astrohelper.assert_quantity_allclose(res_0d, result_2d[idx][(1, 1)])
astrohelper.assert_quantity_allclose(res_0d, result_3d[idx][(0, 1, 1)])
|
def assert_can_handle_nparray(function_to_test, insert_some_nans=None, insert_all_nans=None, kwargs=None):
'\n Test for ability to handle numpy array quantities.\n\n Parameters\n ----------\n function_to_test\n The function to be tested for ability to handle numpy array quantities.\n Arguments are automatically given a vector input based on their\n variable name. Current args that are interpreted as vectors are:\n `["T", "T_i", "T_e", "temperature"]`\n `["n", "n_i", "n_e", "density"]`\n `["B"]`\n `["V", "Vperp"]`\n `["coulomb_log"]`\n `["characteristic_length"]`\n\n insert_some_nans: `list`\n List of argument names in which to insert some np.nan values.\n These must be arguments that will be tested as vectors as listed\n above.\n\n insert_all_nans: `list`\n List of argument names to fill entirely with np.nan values.\n\n kwargs: `dict`\n Arguments to pass directly to the function in under test, in the\n normal kwargs python dictionary format.\n\n Raises\n ------\n ValueError\n If this function cannot interpret a parameter of function_to_test.\n\n Examples\n --------\n >>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency\n >>> assert_can_handle_nparray(Alfven_speed)\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})\n '
if (insert_some_nans is None):
insert_some_nans = []
if (insert_all_nans is None):
insert_all_nans = []
if (kwargs is None):
kwargs = {}
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)
function_sig = inspect.signature(function_to_test)
function_params = function_sig.parameters
args_0d = dict()
args_1d = dict()
args_2d = dict()
args_3d = dict()
param_names = [elm for elm in function_params.keys()]
for (idx, key) in enumerate(function_params):
(args_0d[key], args_1d[key], args_2d[key], args_3d[key]) = _prepare_input(param_names[idx], function_params[key].default, insert_some_nans, insert_all_nans, kwargs)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=PlasmaPyWarning)
result_0d = function_to_test(**args_0d)
result_1d = function_to_test(**args_1d)
result_2d = function_to_test(**args_2d)
result_3d = function_to_test(**args_3d)
try:
scalar_testable = result_0d.value
except AttributeError:
scalar_testable = result_0d
if np.isscalar(scalar_testable):
astrohelper.assert_quantity_allclose(result_0d, result_1d[3])
astrohelper.assert_quantity_allclose(result_0d, result_2d[(1, 1)])
astrohelper.assert_quantity_allclose(result_0d, result_3d[(0, 1, 1)])
else:
for (idx, res_0d) in enumerate(result_0d):
astrohelper.assert_quantity_allclose(res_0d, result_1d[idx][3])
astrohelper.assert_quantity_allclose(res_0d, result_2d[idx][(1, 1)])
astrohelper.assert_quantity_allclose(res_0d, result_3d[idx][(0, 1, 1)])<|docstring|>Test for ability to handle numpy array quantities.
Parameters
----------
function_to_test
The function to be tested for ability to handle numpy array quantities.
Arguments are automatically given a vector input based on their
variable name. Current args that are interpreted as vectors are:
`["T", "T_i", "T_e", "temperature"]`
`["n", "n_i", "n_e", "density"]`
`["B"]`
`["V", "Vperp"]`
`["coulomb_log"]`
`["characteristic_length"]`
insert_some_nans: `list`
List of argument names in which to insert some np.nan values.
These must be arguments that will be tested as vectors as listed
above.
insert_all_nans: `list`
List of argument names to fill entirely with np.nan values.
kwargs: `dict`
Arguments to pass directly to the function in under test, in the
normal kwargs python dictionary format.
Raises
------
ValueError
If this function cannot interpret a parameter of function_to_test.
Examples
--------
>>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency
>>> assert_can_handle_nparray(Alfven_speed)
>>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})
>>> assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})<|endoftext|>
|
8887de46cdf86dc25af54daaf767dcf5c78b4c31ceed40240bb0a88481252a5b
|
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)
|
Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.
|
plasmapy/utils/pytest_helpers/pytest_helpers.py
|
_prepare_input
|
alfkoehn/PlasmaPy
| 429
|
python
|
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n \n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)
|
def _prepare_input(param_name, param_default, insert_some_nans, insert_all_nans, kwargs):
'\n \n '
if (param_name in kwargs.keys()):
return ((kwargs[param_name],) * 4)
elif (param_name in ['particle', 'ion_particle', 'ion']):
if (not ((param_default is inspect._empty) or (param_default is None))):
return ((param_default,) * 4)
else:
return (('p',) * 4)
elif ((param_name == 'particles') or (param_name == 'species')):
if (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
return ((('e', 'p'),) * 4)
elif (param_name in ['T', 'T_i', 'T_e', 'temperature']):
unit = u.eV
magnitude = 1.0
elif (param_name in ['n', 'n_i', 'n_e', 'density']):
unit = (u.m ** (- 3))
magnitude = 1e+20
elif (param_name == 'B'):
unit = u.G
magnitude = 1000.0
elif (param_name in ['V', 'Vperp']):
unit = (u.m / u.s)
magnitude = 100000.0
elif (param_name == 'coulomb_log'):
unit = 1.0
magnitude = 10.0
elif (param_name == 'characteristic_length'):
unit = u.m
magnitude = 1.0
elif (param_name == 'k'):
unit = (u.m ** (- 1))
magnitude = 1.0
elif (not (param_default is inspect._empty)):
return ((param_default,) * 4)
else:
raise ValueError(f'Unrecognized function input: {param_name}')
input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))
input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))
input_data_1d = np.arange(1.0, 5.0, 1.0)
if (param_name in insert_some_nans):
input_data_3d[(0, 0, 1)] = np.nan
input_data_3d[(0, 1, 0)] = np.nan
input_data_2d[(0, 1)] = np.nan
input_data_2d[(1, 0)] = np.nan
input_data_1d[1] = np.nan
elif (param_name in insert_all_nans):
input_data_3d = (np.ones((2, 2, 2)) * np.nan)
input_data_2d = (np.ones((2, 2)) * np.nan)
input_data_1d = (np.ones(4) * np.nan)
input_data_3d *= magnitude
input_data_3d *= unit
input_data_2d *= magnitude
input_data_2d *= unit
input_data_1d *= magnitude
input_data_1d *= unit
input_data_0d = input_data_1d[3]
return (input_data_0d, input_data_1d, input_data_2d, input_data_3d)<|docstring|>Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.<|endoftext|>
|
27482ef9f197555a1a2f61c3fbd3028abf1f3f1e675bb34204318f758265ab50
|
def mse(x, t):
'Calculates the MSE mean across all dimensions, i.e. feature\n dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean square error across all dimensions\n '
return T.mean(((x - t) ** 2))
|
Calculates the MSE mean across all dimensions, i.e. feature
dimension AND minibatch dimension.
:parameters:
- x : predicted values
- t : target values
:returns:
- output : the mean square error across all dimensions
|
lasagne/objectives.py
|
mse
|
dimatura/nntools
| 0
|
python
|
def mse(x, t):
'Calculates the MSE mean across all dimensions, i.e. feature\n dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean square error across all dimensions\n '
return T.mean(((x - t) ** 2))
|
def mse(x, t):
'Calculates the MSE mean across all dimensions, i.e. feature\n dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean square error across all dimensions\n '
return T.mean(((x - t) ** 2))<|docstring|>Calculates the MSE mean across all dimensions, i.e. feature
dimension AND minibatch dimension.
:parameters:
- x : predicted values
- t : target values
:returns:
- output : the mean square error across all dimensions<|endoftext|>
|
55670b50c825eff06af4af87730edfb0c835c5164f8b796fda7ad0f5bffd3fcf
|
def crossentropy(x, t):
'Calculates the binary crossentropy mean across all dimentions,\n i.e. feature dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean binary cross entropy across all dimensions\n '
return T.mean(T.nnet.binary_crossentropy(x, t))
|
Calculates the binary crossentropy mean across all dimentions,
i.e. feature dimension AND minibatch dimension.
:parameters:
- x : predicted values
- t : target values
:returns:
- output : the mean binary cross entropy across all dimensions
|
lasagne/objectives.py
|
crossentropy
|
dimatura/nntools
| 0
|
python
|
def crossentropy(x, t):
'Calculates the binary crossentropy mean across all dimentions,\n i.e. feature dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean binary cross entropy across all dimensions\n '
return T.mean(T.nnet.binary_crossentropy(x, t))
|
def crossentropy(x, t):
'Calculates the binary crossentropy mean across all dimentions,\n i.e. feature dimension AND minibatch dimension.\n\n :parameters:\n - x : predicted values\n - t : target values\n\n :returns:\n - output : the mean binary cross entropy across all dimensions\n '
return T.mean(T.nnet.binary_crossentropy(x, t))<|docstring|>Calculates the binary crossentropy mean across all dimentions,
i.e. feature dimension AND minibatch dimension.
:parameters:
- x : predicted values
- t : target values
:returns:
- output : the mean binary cross entropy across all dimensions<|endoftext|>
|
7ad99cfc10826cb72ca98c8595ea0f957b8cf718ef98aa11dc3aa412f906f936
|
def multinomial_nll(x, t):
'Calculates the mean multinomial negative-log-loss\n\n :parameters:\n - x : (predicted) class probabilities; a theano expression resulting\n in a 2D array; samples run along axis 0, class probabilities along\n axis 1\n - t : (correct) class probabilities; a theano expression resulting in\n a 2D tensor that gives the class probabilities in its rows, OR\n a 1D integer array that gives the class index of each sample\n (the position of the 1 in the row in a 1-of-N encoding, or\n 1-hot encoding),\n\n :returns:\n - output : the mean multinomial negative log loss\n '
return T.mean(T.nnet.categorical_crossentropy(x, t))
|
Calculates the mean multinomial negative-log-loss
:parameters:
- x : (predicted) class probabilities; a theano expression resulting
in a 2D array; samples run along axis 0, class probabilities along
axis 1
- t : (correct) class probabilities; a theano expression resulting in
a 2D tensor that gives the class probabilities in its rows, OR
a 1D integer array that gives the class index of each sample
(the position of the 1 in the row in a 1-of-N encoding, or
1-hot encoding),
:returns:
- output : the mean multinomial negative log loss
|
lasagne/objectives.py
|
multinomial_nll
|
dimatura/nntools
| 0
|
python
|
def multinomial_nll(x, t):
'Calculates the mean multinomial negative-log-loss\n\n :parameters:\n - x : (predicted) class probabilities; a theano expression resulting\n in a 2D array; samples run along axis 0, class probabilities along\n axis 1\n - t : (correct) class probabilities; a theano expression resulting in\n a 2D tensor that gives the class probabilities in its rows, OR\n a 1D integer array that gives the class index of each sample\n (the position of the 1 in the row in a 1-of-N encoding, or\n 1-hot encoding),\n\n :returns:\n - output : the mean multinomial negative log loss\n '
return T.mean(T.nnet.categorical_crossentropy(x, t))
|
def multinomial_nll(x, t):
'Calculates the mean multinomial negative-log-loss\n\n :parameters:\n - x : (predicted) class probabilities; a theano expression resulting\n in a 2D array; samples run along axis 0, class probabilities along\n axis 1\n - t : (correct) class probabilities; a theano expression resulting in\n a 2D tensor that gives the class probabilities in its rows, OR\n a 1D integer array that gives the class index of each sample\n (the position of the 1 in the row in a 1-of-N encoding, or\n 1-hot encoding),\n\n :returns:\n - output : the mean multinomial negative log loss\n '
return T.mean(T.nnet.categorical_crossentropy(x, t))<|docstring|>Calculates the mean multinomial negative-log-loss
:parameters:
- x : (predicted) class probabilities; a theano expression resulting
in a 2D array; samples run along axis 0, class probabilities along
axis 1
- t : (correct) class probabilities; a theano expression resulting in
a 2D tensor that gives the class probabilities in its rows, OR
a 1D integer array that gives the class index of each sample
(the position of the 1 in the row in a 1-of-N encoding, or
1-hot encoding),
:returns:
- output : the mean multinomial negative log loss<|endoftext|>
|
8e7ddf41111f740d5c278c9555547e19a9a95a1bc435179130ed0a66042861ad
|
def __init__(self, input_layer, loss_function=mse):
'\n Constructor\n\n :parameters:\n - input_layer : a `Layer` whose output is the networks prediction\n given its input\n - loss_function : a loss function of the form `f(x, t)` that\n returns a scalar loss given tensors that represent the\n predicted and true values as arguments..\n '
self.input_layer = input_layer
self.loss_function = loss_function
self.target_var = T.matrix('target')
|
Constructor
:parameters:
- input_layer : a `Layer` whose output is the networks prediction
given its input
- loss_function : a loss function of the form `f(x, t)` that
returns a scalar loss given tensors that represent the
predicted and true values as arguments..
|
lasagne/objectives.py
|
__init__
|
dimatura/nntools
| 0
|
python
|
def __init__(self, input_layer, loss_function=mse):
'\n Constructor\n\n :parameters:\n - input_layer : a `Layer` whose output is the networks prediction\n given its input\n - loss_function : a loss function of the form `f(x, t)` that\n returns a scalar loss given tensors that represent the\n predicted and true values as arguments..\n '
self.input_layer = input_layer
self.loss_function = loss_function
self.target_var = T.matrix('target')
|
def __init__(self, input_layer, loss_function=mse):
'\n Constructor\n\n :parameters:\n - input_layer : a `Layer` whose output is the networks prediction\n given its input\n - loss_function : a loss function of the form `f(x, t)` that\n returns a scalar loss given tensors that represent the\n predicted and true values as arguments..\n '
self.input_layer = input_layer
self.loss_function = loss_function
self.target_var = T.matrix('target')<|docstring|>Constructor
:parameters:
- input_layer : a `Layer` whose output is the networks prediction
given its input
- loss_function : a loss function of the form `f(x, t)` that
returns a scalar loss given tensors that represent the
predicted and true values as arguments..<|endoftext|>
|
8d99eafb229dce03cb4274ba7ef942e0e67fbc87c7d2509e8e40b7d6dfcb3b15
|
def get_loss(self, input=None, target=None, *args, **kwargs):
"\n Get loss scalar expression\n\n :parameters:\n - input : (default `None`) an expression that results in the input\n data that is passed to the network\n - target : (default `None`) an expression that results in the\n desired output that the network is being trained to generate\n given the input\n - args : additional arguments passed to `input_layer`'s\n `get_output` method\n - kwargs : additional keyword arguments passed to `input_layer`'s\n `get_output` method\n\n :returns:\n - output : loss expressions\n "
network_output = self.input_layer.get_output(input, *args, **kwargs)
if (target is None):
target = self.target_var
return self.loss_function(network_output, target)
|
Get loss scalar expression
:parameters:
- input : (default `None`) an expression that results in the input
data that is passed to the network
- target : (default `None`) an expression that results in the
desired output that the network is being trained to generate
given the input
- args : additional arguments passed to `input_layer`'s
`get_output` method
- kwargs : additional keyword arguments passed to `input_layer`'s
`get_output` method
:returns:
- output : loss expressions
|
lasagne/objectives.py
|
get_loss
|
dimatura/nntools
| 0
|
python
|
def get_loss(self, input=None, target=None, *args, **kwargs):
"\n Get loss scalar expression\n\n :parameters:\n - input : (default `None`) an expression that results in the input\n data that is passed to the network\n - target : (default `None`) an expression that results in the\n desired output that the network is being trained to generate\n given the input\n - args : additional arguments passed to `input_layer`'s\n `get_output` method\n - kwargs : additional keyword arguments passed to `input_layer`'s\n `get_output` method\n\n :returns:\n - output : loss expressions\n "
network_output = self.input_layer.get_output(input, *args, **kwargs)
if (target is None):
target = self.target_var
return self.loss_function(network_output, target)
|
def get_loss(self, input=None, target=None, *args, **kwargs):
"\n Get loss scalar expression\n\n :parameters:\n - input : (default `None`) an expression that results in the input\n data that is passed to the network\n - target : (default `None`) an expression that results in the\n desired output that the network is being trained to generate\n given the input\n - args : additional arguments passed to `input_layer`'s\n `get_output` method\n - kwargs : additional keyword arguments passed to `input_layer`'s\n `get_output` method\n\n :returns:\n - output : loss expressions\n "
network_output = self.input_layer.get_output(input, *args, **kwargs)
if (target is None):
target = self.target_var
return self.loss_function(network_output, target)<|docstring|>Get loss scalar expression
:parameters:
- input : (default `None`) an expression that results in the input
data that is passed to the network
- target : (default `None`) an expression that results in the
desired output that the network is being trained to generate
given the input
- args : additional arguments passed to `input_layer`'s
`get_output` method
- kwargs : additional keyword arguments passed to `input_layer`'s
`get_output` method
:returns:
- output : loss expressions<|endoftext|>
|
0ec9fe5f5b5ce0865beb6c6a1874995c88b2aa55bd69e0d055eb4ace3c8189a8
|
def paintRequested(self, *args, **kwargs):
' Signal '
pass
|
Signal
|
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QPrintPreviewWidget.py
|
paintRequested
|
basepipe/developer_onboarding
| 1
|
python
|
def paintRequested(self, *args, **kwargs):
' '
pass
|
def paintRequested(self, *args, **kwargs):
' '
pass<|docstring|>Signal<|endoftext|>
|
1895cc21c2986cf745e903b90b124fa03815108c3ad2cb4c9a910326a1c185c1
|
def previewChanged(self, *args, **kwargs):
' Signal '
pass
|
Signal
|
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QPrintPreviewWidget.py
|
previewChanged
|
basepipe/developer_onboarding
| 1
|
python
|
def previewChanged(self, *args, **kwargs):
' '
pass
|
def previewChanged(self, *args, **kwargs):
' '
pass<|docstring|>Signal<|endoftext|>
|
a1f419e871823963655821922c5277e7bb1e0b2e27d4616bc78830828017ac87
|
@staticmethod
def __new__(S, *more):
' T.__new__(S, ...) -> a new object with type S, a subtype of T '
pass
|
T.__new__(S, ...) -> a new object with type S, a subtype of T
|
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QPrintPreviewWidget.py
|
__new__
|
basepipe/developer_onboarding
| 1
|
python
|
@staticmethod
def __new__(S, *more):
' '
pass
|
@staticmethod
def __new__(S, *more):
' '
pass<|docstring|>T.__new__(S, ...) -> a new object with type S, a subtype of T<|endoftext|>
|
582b5e64f12d82b5431cc1ee1f7e2717505be343643f2eb5cc6652a50d8a85d2
|
def forward(self, input, input_padded, roi_labels, box_info):
'\n compute both predictions and loss for 3 branches (contact_state, link, hand_side)\n :param input: pooled_feat, 2D tensor (128*batch_size, 2048)\n :param input_padded: padded_pooled_feat, 2D tensor (128*batch_size, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
if self.training:
batch_size = roi_labels.size(0)
num_proposals = cfg.TRAIN.BATCH_SIZE
input = input.view(batch_size, num_proposals, (- 1))
input_padded = input_padded.view(batch_size, num_proposals, (- 1))
else:
input = input.unsqueeze(0)
input_padded = input_padded.unsqueeze(0)
loss_list = [self.hand_contactstate_part(input_padded, roi_labels, box_info), self.hand_dxdymagnitude_part(input_padded, roi_labels, box_info), self.hand_handside_part(input, roi_labels, box_info)]
return loss_list
|
compute both predictions and loss for 3 branches (contact_state, link, hand_side)
:param input: pooled_feat, 2D tensor (128*batch_size, 2048)
:param input_padded: padded_pooled_feat, 2D tensor (128*batch_size, 2048)
:param roi_labels: object class labels, 2D tensor (batch, 128)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:
|
lib/model/extension_layers/extension_layers.py
|
forward
|
forever208/hand_object_detector
| 1
|
python
|
def forward(self, input, input_padded, roi_labels, box_info):
'\n compute both predictions and loss for 3 branches (contact_state, link, hand_side)\n :param input: pooled_feat, 2D tensor (128*batch_size, 2048)\n :param input_padded: padded_pooled_feat, 2D tensor (128*batch_size, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
if self.training:
batch_size = roi_labels.size(0)
num_proposals = cfg.TRAIN.BATCH_SIZE
input = input.view(batch_size, num_proposals, (- 1))
input_padded = input_padded.view(batch_size, num_proposals, (- 1))
else:
input = input.unsqueeze(0)
input_padded = input_padded.unsqueeze(0)
loss_list = [self.hand_contactstate_part(input_padded, roi_labels, box_info), self.hand_dxdymagnitude_part(input_padded, roi_labels, box_info), self.hand_handside_part(input, roi_labels, box_info)]
return loss_list
|
def forward(self, input, input_padded, roi_labels, box_info):
'\n compute both predictions and loss for 3 branches (contact_state, link, hand_side)\n :param input: pooled_feat, 2D tensor (128*batch_size, 2048)\n :param input_padded: padded_pooled_feat, 2D tensor (128*batch_size, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
if self.training:
batch_size = roi_labels.size(0)
num_proposals = cfg.TRAIN.BATCH_SIZE
input = input.view(batch_size, num_proposals, (- 1))
input_padded = input_padded.view(batch_size, num_proposals, (- 1))
else:
input = input.unsqueeze(0)
input_padded = input_padded.unsqueeze(0)
loss_list = [self.hand_contactstate_part(input_padded, roi_labels, box_info), self.hand_dxdymagnitude_part(input_padded, roi_labels, box_info), self.hand_handside_part(input, roi_labels, box_info)]
return loss_list<|docstring|>compute both predictions and loss for 3 branches (contact_state, link, hand_side)
:param input: pooled_feat, 2D tensor (128*batch_size, 2048)
:param input_padded: padded_pooled_feat, 2D tensor (128*batch_size, 2048)
:param roi_labels: object class labels, 2D tensor (batch, 128)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:<|endoftext|>
|
393a9b18dc09f3c6697b996d132ce3ff3d92568b1c32905194748fca4ceb6fd1
|
def init_layers_weights(self):
'\n define the the layer and do weights initialisation\n '
self.hand_contact_state_layer = nn.Sequential(nn.Linear(2048, 32), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(32, 5))
self.hand_dydx_layer = torch.nn.Linear(2048, 3)
self.hand_lr_layer = torch.nn.Linear(2048, 1)
self.hand_contactstate_loss = nn.CrossEntropyLoss()
self.hand_dxdymagnitude_loss = nn.MSELoss()
self.hand_handside_loss = nn.BCEWithLogitsLoss()
self._init_weights()
|
define the the layer and do weights initialisation
|
lib/model/extension_layers/extension_layers.py
|
init_layers_weights
|
forever208/hand_object_detector
| 1
|
python
|
def init_layers_weights(self):
'\n \n '
self.hand_contact_state_layer = nn.Sequential(nn.Linear(2048, 32), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(32, 5))
self.hand_dydx_layer = torch.nn.Linear(2048, 3)
self.hand_lr_layer = torch.nn.Linear(2048, 1)
self.hand_contactstate_loss = nn.CrossEntropyLoss()
self.hand_dxdymagnitude_loss = nn.MSELoss()
self.hand_handside_loss = nn.BCEWithLogitsLoss()
self._init_weights()
|
def init_layers_weights(self):
'\n \n '
self.hand_contact_state_layer = nn.Sequential(nn.Linear(2048, 32), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(32, 5))
self.hand_dydx_layer = torch.nn.Linear(2048, 3)
self.hand_lr_layer = torch.nn.Linear(2048, 1)
self.hand_contactstate_loss = nn.CrossEntropyLoss()
self.hand_dxdymagnitude_loss = nn.MSELoss()
self.hand_handside_loss = nn.BCEWithLogitsLoss()
self._init_weights()<|docstring|>define the the layer and do weights initialisation<|endoftext|>
|
766f313e94da7950859692d012892169913c05781d6fde45b8e56cc31d90bf7a
|
def hand_contactstate_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for contact state\n :param input: padded_pooled_feat, 3D tensor (batch, 128, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
contactstate_pred = self.hand_contact_state_layer(input)
contactstate_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 0)]
index = (roi_labels[i] == 2)
if (index.sum() > 0):
contactstate_loss_sub = (0.1 * self.hand_contactstate_loss(contactstate_pred[i][index], gt_labels[index].long()))
contactstate_loss += contactstate_loss_sub
contactstate_loss = (contactstate_loss / input.size(0))
return (contactstate_pred, contactstate_loss)
|
compute the prediction and loss for contact state
:param input: padded_pooled_feat, 3D tensor (batch, 128, 2048)
:param roi_labels: object class labels, 2D tensor (batch, 128)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:
|
lib/model/extension_layers/extension_layers.py
|
hand_contactstate_part
|
forever208/hand_object_detector
| 1
|
python
|
def hand_contactstate_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for contact state\n :param input: padded_pooled_feat, 3D tensor (batch, 128, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
contactstate_pred = self.hand_contact_state_layer(input)
contactstate_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 0)]
index = (roi_labels[i] == 2)
if (index.sum() > 0):
contactstate_loss_sub = (0.1 * self.hand_contactstate_loss(contactstate_pred[i][index], gt_labels[index].long()))
contactstate_loss += contactstate_loss_sub
contactstate_loss = (contactstate_loss / input.size(0))
return (contactstate_pred, contactstate_loss)
|
def hand_contactstate_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for contact state\n :param input: padded_pooled_feat, 3D tensor (batch, 128, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, 128)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
contactstate_pred = self.hand_contact_state_layer(input)
contactstate_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 0)]
index = (roi_labels[i] == 2)
if (index.sum() > 0):
contactstate_loss_sub = (0.1 * self.hand_contactstate_loss(contactstate_pred[i][index], gt_labels[index].long()))
contactstate_loss += contactstate_loss_sub
contactstate_loss = (contactstate_loss / input.size(0))
return (contactstate_pred, contactstate_loss)<|docstring|>compute the prediction and loss for contact state
:param input: padded_pooled_feat, 3D tensor (batch, 128, 2048)
:param roi_labels: object class labels, 2D tensor (batch, 128)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:<|endoftext|>
|
f895d621deabb3af5fe301f01db29b61c89409c359fd7d82a19de23259ef5826
|
def hand_dxdymagnitude_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for link\n :param input: padded_pooled_feat, 3D tensor (batch, num_boxes, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, num_boxes)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
dxdymagnitude_pred = self.hand_dydx_layer(input)
dxdymagnitude_pred_sub = (0.1 * F.normalize(dxdymagnitude_pred[(:, :, 1:)], p=2, dim=2))
dxdymagnitude_pred_norm = torch.cat([dxdymagnitude_pred[(:, :, 0)].unsqueeze((- 1)), dxdymagnitude_pred_sub], dim=2)
dxdymagnitude_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 2:5)]
index = (box_info[(i, :, 0)] > 0)
if (index.sum() > 0):
dxdymagnitude_loss_sub = (0.1 * self.hand_dxdymagnitude_loss(dxdymagnitude_pred_norm[i][index], gt_labels[index]))
dxdymagnitude_loss += dxdymagnitude_loss_sub
dxdymagnitude_loss = (dxdymagnitude_loss / input.size(0))
return (dxdymagnitude_pred_norm, dxdymagnitude_loss)
|
compute the prediction and loss for link
:param input: padded_pooled_feat, 3D tensor (batch, num_boxes, 2048)
:param roi_labels: object class labels, 2D tensor (batch, num_boxes)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:
|
lib/model/extension_layers/extension_layers.py
|
hand_dxdymagnitude_part
|
forever208/hand_object_detector
| 1
|
python
|
def hand_dxdymagnitude_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for link\n :param input: padded_pooled_feat, 3D tensor (batch, num_boxes, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, num_boxes)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
dxdymagnitude_pred = self.hand_dydx_layer(input)
dxdymagnitude_pred_sub = (0.1 * F.normalize(dxdymagnitude_pred[(:, :, 1:)], p=2, dim=2))
dxdymagnitude_pred_norm = torch.cat([dxdymagnitude_pred[(:, :, 0)].unsqueeze((- 1)), dxdymagnitude_pred_sub], dim=2)
dxdymagnitude_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 2:5)]
index = (box_info[(i, :, 0)] > 0)
if (index.sum() > 0):
dxdymagnitude_loss_sub = (0.1 * self.hand_dxdymagnitude_loss(dxdymagnitude_pred_norm[i][index], gt_labels[index]))
dxdymagnitude_loss += dxdymagnitude_loss_sub
dxdymagnitude_loss = (dxdymagnitude_loss / input.size(0))
return (dxdymagnitude_pred_norm, dxdymagnitude_loss)
|
def hand_dxdymagnitude_part(self, input, roi_labels, box_info):
'\n compute the prediction and loss for link\n :param input: padded_pooled_feat, 3D tensor (batch, num_boxes, 2048)\n :param roi_labels: object class labels, 2D tensor (batch, num_boxes)\n :param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]\n :return:\n '
dxdymagnitude_pred = self.hand_dydx_layer(input)
dxdymagnitude_pred_sub = (0.1 * F.normalize(dxdymagnitude_pred[(:, :, 1:)], p=2, dim=2))
dxdymagnitude_pred_norm = torch.cat([dxdymagnitude_pred[(:, :, 0)].unsqueeze((- 1)), dxdymagnitude_pred_sub], dim=2)
dxdymagnitude_loss = torch.zeros(1, dtype=torch.float).to(self.device)
if self.training:
for i in range(input.size(0)):
gt_labels = box_info[(i, :, 2:5)]
index = (box_info[(i, :, 0)] > 0)
if (index.sum() > 0):
dxdymagnitude_loss_sub = (0.1 * self.hand_dxdymagnitude_loss(dxdymagnitude_pred_norm[i][index], gt_labels[index]))
dxdymagnitude_loss += dxdymagnitude_loss_sub
dxdymagnitude_loss = (dxdymagnitude_loss / input.size(0))
return (dxdymagnitude_pred_norm, dxdymagnitude_loss)<|docstring|>compute the prediction and loss for link
:param input: padded_pooled_feat, 3D tensor (batch, num_boxes, 2048)
:param roi_labels: object class labels, 2D tensor (batch, num_boxes)
:param box_info: 3D tensor (batch, num_boxes, 5), each row is [contactstate, handside, magnitude, unitdx, unitdy]
:return:<|endoftext|>
|
3c92fb2aa43af73e6781f35bfc20f08e34acfb088d59c262bcb0984f060b7c1f
|
def normal_init(m, mean, stddev, truncated=False):
'\n weight initializer: truncated normal and random normal.\n '
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
|
weight initializer: truncated normal and random normal.
|
lib/model/extension_layers/extension_layers.py
|
normal_init
|
forever208/hand_object_detector
| 1
|
python
|
def normal_init(m, mean, stddev, truncated=False):
'\n \n '
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
|
def normal_init(m, mean, stddev, truncated=False):
'\n \n '
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()<|docstring|>weight initializer: truncated normal and random normal.<|endoftext|>
|
c69c3d25b57269798bf3b20def6567ddd433f4fbb2062210393241f442e9046e
|
def euclidian_distance(a, b):
'\n Compute Euclidian distance between two 1-D arrays.\n\n Parameters\n ----------\n a: array_like\n Input array.\n b: array_like\n Input array.\n\n Returns\n -------\n Euclidian distance between two vectors.\n '
distance = 0.0
for i in range(len(a)):
distance += ((a[i] - b[i]) ** 2)
return np.sqrt(distance)
|
Compute Euclidian distance between two 1-D arrays.
Parameters
----------
a: array_like
Input array.
b: array_like
Input array.
Returns
-------
Euclidian distance between two vectors.
|
zero2ml/utils/distance_metrics.py
|
euclidian_distance
|
bekzatalish/zero2ml
| 0
|
python
|
def euclidian_distance(a, b):
'\n Compute Euclidian distance between two 1-D arrays.\n\n Parameters\n ----------\n a: array_like\n Input array.\n b: array_like\n Input array.\n\n Returns\n -------\n Euclidian distance between two vectors.\n '
distance = 0.0
for i in range(len(a)):
distance += ((a[i] - b[i]) ** 2)
return np.sqrt(distance)
|
def euclidian_distance(a, b):
'\n Compute Euclidian distance between two 1-D arrays.\n\n Parameters\n ----------\n a: array_like\n Input array.\n b: array_like\n Input array.\n\n Returns\n -------\n Euclidian distance between two vectors.\n '
distance = 0.0
for i in range(len(a)):
distance += ((a[i] - b[i]) ** 2)
return np.sqrt(distance)<|docstring|>Compute Euclidian distance between two 1-D arrays.
Parameters
----------
a: array_like
Input array.
b: array_like
Input array.
Returns
-------
Euclidian distance between two vectors.<|endoftext|>
|
74bf95c03e2865c9226744bc0ceb750e6260b4265fb900fccaa3bc79c71f19ef
|
def wait_for_confirmation(algod_client, transaction_id, timeout):
"Wait until the transaction is confirmed or rejected, or until 'timeout'\n number of rounds have passed.\n\n Args:\n algod_client (AlgodClient): Algod Client\n transaction_id (str): the transaction to wait for\n timeout (int): maximum number of rounds to wait\n\n Returns:\n (dict): pending transaction information, or throws an error if the\n transaction is not confirmed or rejected in the next timeout rounds\n "
start_round = (algod_client.status()['last-round'] + 1)
current_round = start_round
while (current_round < (start_round + timeout)):
algod_client.status_after_block(current_round)
try:
pending_txn = algod_client.pending_transaction_info(transaction_id)
except Exception:
return
if (pending_txn.get('confirmed-round', 0) > 0):
return pending_txn
elif pending_txn['pool-error']:
raise Exception('pool error: {}'.format(pending_txn['pool-error']))
current_round += 1
raise Exception('pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))
|
Wait until the transaction is confirmed or rejected, or until 'timeout'
number of rounds have passed.
Args:
algod_client (AlgodClient): Algod Client
transaction_id (str): the transaction to wait for
timeout (int): maximum number of rounds to wait
Returns:
(dict): pending transaction information, or throws an error if the
transaction is not confirmed or rejected in the next timeout rounds
|
ppos_dex_data.py
|
wait_for_confirmation
|
cusma/pposdex
| 10
|
python
|
def wait_for_confirmation(algod_client, transaction_id, timeout):
"Wait until the transaction is confirmed or rejected, or until 'timeout'\n number of rounds have passed.\n\n Args:\n algod_client (AlgodClient): Algod Client\n transaction_id (str): the transaction to wait for\n timeout (int): maximum number of rounds to wait\n\n Returns:\n (dict): pending transaction information, or throws an error if the\n transaction is not confirmed or rejected in the next timeout rounds\n "
start_round = (algod_client.status()['last-round'] + 1)
current_round = start_round
while (current_round < (start_round + timeout)):
algod_client.status_after_block(current_round)
try:
pending_txn = algod_client.pending_transaction_info(transaction_id)
except Exception:
return
if (pending_txn.get('confirmed-round', 0) > 0):
return pending_txn
elif pending_txn['pool-error']:
raise Exception('pool error: {}'.format(pending_txn['pool-error']))
current_round += 1
raise Exception('pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))
|
def wait_for_confirmation(algod_client, transaction_id, timeout):
"Wait until the transaction is confirmed or rejected, or until 'timeout'\n number of rounds have passed.\n\n Args:\n algod_client (AlgodClient): Algod Client\n transaction_id (str): the transaction to wait for\n timeout (int): maximum number of rounds to wait\n\n Returns:\n (dict): pending transaction information, or throws an error if the\n transaction is not confirmed or rejected in the next timeout rounds\n "
start_round = (algod_client.status()['last-round'] + 1)
current_round = start_round
while (current_round < (start_round + timeout)):
algod_client.status_after_block(current_round)
try:
pending_txn = algod_client.pending_transaction_info(transaction_id)
except Exception:
return
if (pending_txn.get('confirmed-round', 0) > 0):
return pending_txn
elif pending_txn['pool-error']:
raise Exception('pool error: {}'.format(pending_txn['pool-error']))
current_round += 1
raise Exception('pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))<|docstring|>Wait until the transaction is confirmed or rejected, or until 'timeout'
number of rounds have passed.
Args:
algod_client (AlgodClient): Algod Client
transaction_id (str): the transaction to wait for
timeout (int): maximum number of rounds to wait
Returns:
(dict): pending transaction information, or throws an error if the
transaction is not confirmed or rejected in the next timeout rounds<|endoftext|>
|
509fb4091f7aaa7b384134e52eae74b0e2f369bf6d85d0152b08970dc4938147
|
def query_prop_data(dir_name=None, ents_dict=None, locations=None, depth=0, timespan=None, interval=None):
'\n Queries data for the module property for given location(s), depth, timespan and interval.\n '
(df, ents_dict) = data_utils.query_wd_prop(dir_name=dir_name, ents_dict=ents_dict, locations=locations, depth=depth, timespan=timespan, interval=interval, pid=pid, sub_pid=sub_pid, col_name=col_name, col_prefix=col_prefix, ignore_char=ignore_char, span=span)
org_renames = [('mem_world_trade_organization', 'mem_wto'), ('mem_european_union', 'mem_eu'), ('mem_organisation_for_economic_cooperation_and_development', 'mem_oecd'), ('mem_united_nations', 'mem_un'), ('mem_world_health_organization', 'mem_who'), ('mem_international_monetary_fund', 'mem_imf')]
for o_r in org_renames:
if (o_r[0] in df.columns):
df.rename(columns={o_r[0]: o_r[1]}, inplace=True)
df.fillna(value=False, inplace=True)
return (df, ents_dict)
|
Queries data for the module property for given location(s), depth, timespan and interval.
|
src/wikirepo/data/institutional/org_membership.py
|
query_prop_data
|
andrewtavis/wikirepo
| 21
|
python
|
def query_prop_data(dir_name=None, ents_dict=None, locations=None, depth=0, timespan=None, interval=None):
'\n \n '
(df, ents_dict) = data_utils.query_wd_prop(dir_name=dir_name, ents_dict=ents_dict, locations=locations, depth=depth, timespan=timespan, interval=interval, pid=pid, sub_pid=sub_pid, col_name=col_name, col_prefix=col_prefix, ignore_char=ignore_char, span=span)
org_renames = [('mem_world_trade_organization', 'mem_wto'), ('mem_european_union', 'mem_eu'), ('mem_organisation_for_economic_cooperation_and_development', 'mem_oecd'), ('mem_united_nations', 'mem_un'), ('mem_world_health_organization', 'mem_who'), ('mem_international_monetary_fund', 'mem_imf')]
for o_r in org_renames:
if (o_r[0] in df.columns):
df.rename(columns={o_r[0]: o_r[1]}, inplace=True)
df.fillna(value=False, inplace=True)
return (df, ents_dict)
|
def query_prop_data(dir_name=None, ents_dict=None, locations=None, depth=0, timespan=None, interval=None):
'\n \n '
(df, ents_dict) = data_utils.query_wd_prop(dir_name=dir_name, ents_dict=ents_dict, locations=locations, depth=depth, timespan=timespan, interval=interval, pid=pid, sub_pid=sub_pid, col_name=col_name, col_prefix=col_prefix, ignore_char=ignore_char, span=span)
org_renames = [('mem_world_trade_organization', 'mem_wto'), ('mem_european_union', 'mem_eu'), ('mem_organisation_for_economic_cooperation_and_development', 'mem_oecd'), ('mem_united_nations', 'mem_un'), ('mem_world_health_organization', 'mem_who'), ('mem_international_monetary_fund', 'mem_imf')]
for o_r in org_renames:
if (o_r[0] in df.columns):
df.rename(columns={o_r[0]: o_r[1]}, inplace=True)
df.fillna(value=False, inplace=True)
return (df, ents_dict)<|docstring|>Queries data for the module property for given location(s), depth, timespan and interval.<|endoftext|>
|
4f5d461a8cfa5030e1d91729547f63eaf3a65576ccee66741c90e7d9d97d5521
|
def build_crf_net(self, input_blob, initial_state, transitions):
'\n Adds the crf_net recurrent operator to the model.\n\n model: model_helper.ModelHelper object new operators would be added\n to\n\n input_blob: the input sequence in a format T x N x D\n where T is sequence size, N - batch size and D - input dimention\n ##Only supports batch-size 1##\n\n seq_lengths: blob containing sequence lengths (unused)\n '
scope = 'crf_net'
def s(name):
''
return '{}/{}'.format(str(scope), str(name))
step_model = model_helper.ModelHelper(name='crf_step', param_model=self.model)
(input_t, cell_t_prev, _) = step_model.net.AddExternalInputs(core.ScopedBlobReference('input_t'), core.ScopedBlobReference('cell_t_prev'), transitions)
zero_segment_id = step_model.param_init_net.ConstantFill([], [s('zero_segment_id')], value=0, shape=[self.num_classes_padded], dtype=core.DataType.INT32)
step_model.param_init_net.AddExternalOutput(zero_segment_id)
' the CRF step '
prev_transpose = brew.transpose(step_model, cell_t_prev, [s('prev_transpose')], axes=(0, 2, 1))
prev_tiled = step_model.net.Tile(prev_transpose, [s('prev_tiled')], tiles=self.num_classes_padded, axis=2)
input_t_tiled = step_model.net.Tile(input_t, [s('input_t_tiled')], tiles=self.num_classes_padded, axis=1)
input_with_prev = step_model.net.Add([prev_tiled, input_t_tiled], [s('input_with_prev')])
all_with_transitions = step_model.net.Add([input_with_prev, transitions], [s('prev_with_transitions')], broadcast=1, use_grad_hack=1)
(all_with_transitions_reshaped, _) = step_model.net.Reshape(all_with_transitions, [s('all_with_transitions_reshaped'), s('all_with_transitions_orig')], shape=(self.num_classes_padded, self.num_classes_padded))
cell_t = step_model.net.SortedSegmentRangeLogSumExp([all_with_transitions_reshaped, zero_segment_id], [s('cell_t')])
step_model.net.AddExternalOutputs(cell_t)
' recurrent network '
cell_input_blob = initial_state
(out_all, out_last) = recurrent.recurrent_net(net=self.model.net, cell_net=step_model.net, inputs=[(input_t, input_blob)], initial_cell_inputs=[(cell_t_prev, cell_input_blob)], links={cell_t_prev: cell_t}, scope=scope, outputs_with_grads=(1,))
return out_last
|
Adds the crf_net recurrent operator to the model.
model: model_helper.ModelHelper object new operators would be added
to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimention
##Only supports batch-size 1##
seq_lengths: blob containing sequence lengths (unused)
|
caffe2/python/crf.py
|
build_crf_net
|
ZiboMeng/caffe2
| 585
|
python
|
def build_crf_net(self, input_blob, initial_state, transitions):
'\n Adds the crf_net recurrent operator to the model.\n\n model: model_helper.ModelHelper object new operators would be added\n to\n\n input_blob: the input sequence in a format T x N x D\n where T is sequence size, N - batch size and D - input dimention\n ##Only supports batch-size 1##\n\n seq_lengths: blob containing sequence lengths (unused)\n '
scope = 'crf_net'
def s(name):
return '{}/{}'.format(str(scope), str(name))
step_model = model_helper.ModelHelper(name='crf_step', param_model=self.model)
(input_t, cell_t_prev, _) = step_model.net.AddExternalInputs(core.ScopedBlobReference('input_t'), core.ScopedBlobReference('cell_t_prev'), transitions)
zero_segment_id = step_model.param_init_net.ConstantFill([], [s('zero_segment_id')], value=0, shape=[self.num_classes_padded], dtype=core.DataType.INT32)
step_model.param_init_net.AddExternalOutput(zero_segment_id)
' the CRF step '
prev_transpose = brew.transpose(step_model, cell_t_prev, [s('prev_transpose')], axes=(0, 2, 1))
prev_tiled = step_model.net.Tile(prev_transpose, [s('prev_tiled')], tiles=self.num_classes_padded, axis=2)
input_t_tiled = step_model.net.Tile(input_t, [s('input_t_tiled')], tiles=self.num_classes_padded, axis=1)
input_with_prev = step_model.net.Add([prev_tiled, input_t_tiled], [s('input_with_prev')])
all_with_transitions = step_model.net.Add([input_with_prev, transitions], [s('prev_with_transitions')], broadcast=1, use_grad_hack=1)
(all_with_transitions_reshaped, _) = step_model.net.Reshape(all_with_transitions, [s('all_with_transitions_reshaped'), s('all_with_transitions_orig')], shape=(self.num_classes_padded, self.num_classes_padded))
cell_t = step_model.net.SortedSegmentRangeLogSumExp([all_with_transitions_reshaped, zero_segment_id], [s('cell_t')])
step_model.net.AddExternalOutputs(cell_t)
' recurrent network '
cell_input_blob = initial_state
(out_all, out_last) = recurrent.recurrent_net(net=self.model.net, cell_net=step_model.net, inputs=[(input_t, input_blob)], initial_cell_inputs=[(cell_t_prev, cell_input_blob)], links={cell_t_prev: cell_t}, scope=scope, outputs_with_grads=(1,))
return out_last
|
def build_crf_net(self, input_blob, initial_state, transitions):
'\n Adds the crf_net recurrent operator to the model.\n\n model: model_helper.ModelHelper object new operators would be added\n to\n\n input_blob: the input sequence in a format T x N x D\n where T is sequence size, N - batch size and D - input dimention\n ##Only supports batch-size 1##\n\n seq_lengths: blob containing sequence lengths (unused)\n '
scope = 'crf_net'
def s(name):
return '{}/{}'.format(str(scope), str(name))
step_model = model_helper.ModelHelper(name='crf_step', param_model=self.model)
(input_t, cell_t_prev, _) = step_model.net.AddExternalInputs(core.ScopedBlobReference('input_t'), core.ScopedBlobReference('cell_t_prev'), transitions)
zero_segment_id = step_model.param_init_net.ConstantFill([], [s('zero_segment_id')], value=0, shape=[self.num_classes_padded], dtype=core.DataType.INT32)
step_model.param_init_net.AddExternalOutput(zero_segment_id)
' the CRF step '
prev_transpose = brew.transpose(step_model, cell_t_prev, [s('prev_transpose')], axes=(0, 2, 1))
prev_tiled = step_model.net.Tile(prev_transpose, [s('prev_tiled')], tiles=self.num_classes_padded, axis=2)
input_t_tiled = step_model.net.Tile(input_t, [s('input_t_tiled')], tiles=self.num_classes_padded, axis=1)
input_with_prev = step_model.net.Add([prev_tiled, input_t_tiled], [s('input_with_prev')])
all_with_transitions = step_model.net.Add([input_with_prev, transitions], [s('prev_with_transitions')], broadcast=1, use_grad_hack=1)
(all_with_transitions_reshaped, _) = step_model.net.Reshape(all_with_transitions, [s('all_with_transitions_reshaped'), s('all_with_transitions_orig')], shape=(self.num_classes_padded, self.num_classes_padded))
cell_t = step_model.net.SortedSegmentRangeLogSumExp([all_with_transitions_reshaped, zero_segment_id], [s('cell_t')])
step_model.net.AddExternalOutputs(cell_t)
' recurrent network '
cell_input_blob = initial_state
(out_all, out_last) = recurrent.recurrent_net(net=self.model.net, cell_net=step_model.net, inputs=[(input_t, input_blob)], initial_cell_inputs=[(cell_t_prev, cell_input_blob)], links={cell_t_prev: cell_t}, scope=scope, outputs_with_grads=(1,))
return out_last<|docstring|>Adds the crf_net recurrent operator to the model.
model: model_helper.ModelHelper object new operators would be added
to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimention
##Only supports batch-size 1##
seq_lengths: blob containing sequence lengths (unused)<|endoftext|>
|
bf500eafe811e4b5b9246f457f0049ba56562af1763142b40d4dde59e595f7d7
|
def __init__(self, db=':memory:'):
' Open new connection and save link to it. Ensure only one connection\n is open all the time.\n '
if (self.__class__.__connection__ is None):
self.__class__.__connection__ = sqlite3.connect(db, check_same_thread=False)
self.__class__.__connection__.row_factory = sqlite3.Row
|
Open new connection and save link to it. Ensure only one connection
is open all the time.
|
ankor/db.py
|
__init__
|
kolybasov/ankor
| 0
|
python
|
def __init__(self, db=':memory:'):
' Open new connection and save link to it. Ensure only one connection\n is open all the time.\n '
if (self.__class__.__connection__ is None):
self.__class__.__connection__ = sqlite3.connect(db, check_same_thread=False)
self.__class__.__connection__.row_factory = sqlite3.Row
|
def __init__(self, db=':memory:'):
' Open new connection and save link to it. Ensure only one connection\n is open all the time.\n '
if (self.__class__.__connection__ is None):
self.__class__.__connection__ = sqlite3.connect(db, check_same_thread=False)
self.__class__.__connection__.row_factory = sqlite3.Row<|docstring|>Open new connection and save link to it. Ensure only one connection
is open all the time.<|endoftext|>
|
0c29cb57bfa128c46bcc62bc75eff68ccb882e8319dedc6d61d1e182096b1dc7
|
def execute(self, *args):
' Shorthand for sqlite3.Connection.execute method. '
return self.__class__.__connection__.execute(*args)
|
Shorthand for sqlite3.Connection.execute method.
|
ankor/db.py
|
execute
|
kolybasov/ankor
| 0
|
python
|
def execute(self, *args):
' '
return self.__class__.__connection__.execute(*args)
|
def execute(self, *args):
' '
return self.__class__.__connection__.execute(*args)<|docstring|>Shorthand for sqlite3.Connection.execute method.<|endoftext|>
|
f44ed3189e817521b6132bbd5613737b6a50b662c70f9b4e68d5f8b06d942daa
|
def close(self):
' Close DB connection and remove links to it. '
self.__class__.__connection__.close()
self.__class__.__connection__ = None
|
Close DB connection and remove links to it.
|
ankor/db.py
|
close
|
kolybasov/ankor
| 0
|
python
|
def close(self):
' '
self.__class__.__connection__.close()
self.__class__.__connection__ = None
|
def close(self):
' '
self.__class__.__connection__.close()
self.__class__.__connection__ = None<|docstring|>Close DB connection and remove links to it.<|endoftext|>
|
9c7c2324620c0b382ef1502629d9ba5199cbe402c0337015248dbcc202044e47
|
def conllu_reader(f):
'\n file中的格式类似与\n\n 1\t“\t“\tPUNCT\t``\t_\t20\tpunct\t20:punct\tSpaceAfter=No\n 2\tWhile\twhile\tSCONJ\tIN\t_\t9\tmark\t9:mark\t_\n 3\tmuch\tmuch\tADJ\tJJ\tDegree=Pos\t9\tnsubj\t9:nsubj\t_\n 4\tof\tof\tADP\tIN\t_\t7\tcase\t7:case\t_\n\n :param f:\n :return:\n '
ex = root()
for line in f:
line = line.strip()
if (not line):
(yield ex)
ex = root()
continue
if (line[0] == '#'):
continue
parts = line.split()
assert (len(parts) == 10), ('invalid conllx line: %s' % line)
(_id, _form, _lemma, _upos, _xpos, _feats, _head, _deprel, _deps, _misc) = parts
ex['id'].append(_id)
ex['form'].append(_form)
ex['lemma'].append(_lemma)
ex['upos'].append(_upos)
ex['pos'].append(_xpos)
ex['feats'].append(_feats)
if (_head == '_'):
_head = 0
ex['head'].append(_head)
ex['deprel'].append(_deprel)
ex['deps'].append(_deps)
ex['misc'].append(_misc)
if (len(ex['form']) > 0):
(yield ex)
|
file中的格式类似与
1 “ “ PUNCT `` _ 20 punct 20:punct SpaceAfter=No
2 While while SCONJ IN _ 9 mark 9:mark _
3 much much ADJ JJ Degree=Pos 9 nsubj 9:nsubj _
4 of of ADP IN _ 7 case 7:case _
:param f:
:return:
|
Perturbed-Masking/utils/conlludataset.py
|
conllu_reader
|
missakaherath/RoBERTaABSA
| 0
|
python
|
def conllu_reader(f):
'\n file中的格式类似与\n\n 1\t“\t“\tPUNCT\t``\t_\t20\tpunct\t20:punct\tSpaceAfter=No\n 2\tWhile\twhile\tSCONJ\tIN\t_\t9\tmark\t9:mark\t_\n 3\tmuch\tmuch\tADJ\tJJ\tDegree=Pos\t9\tnsubj\t9:nsubj\t_\n 4\tof\tof\tADP\tIN\t_\t7\tcase\t7:case\t_\n\n :param f:\n :return:\n '
ex = root()
for line in f:
line = line.strip()
if (not line):
(yield ex)
ex = root()
continue
if (line[0] == '#'):
continue
parts = line.split()
assert (len(parts) == 10), ('invalid conllx line: %s' % line)
(_id, _form, _lemma, _upos, _xpos, _feats, _head, _deprel, _deps, _misc) = parts
ex['id'].append(_id)
ex['form'].append(_form)
ex['lemma'].append(_lemma)
ex['upos'].append(_upos)
ex['pos'].append(_xpos)
ex['feats'].append(_feats)
if (_head == '_'):
_head = 0
ex['head'].append(_head)
ex['deprel'].append(_deprel)
ex['deps'].append(_deps)
ex['misc'].append(_misc)
if (len(ex['form']) > 0):
(yield ex)
|
def conllu_reader(f):
'\n file中的格式类似与\n\n 1\t“\t“\tPUNCT\t``\t_\t20\tpunct\t20:punct\tSpaceAfter=No\n 2\tWhile\twhile\tSCONJ\tIN\t_\t9\tmark\t9:mark\t_\n 3\tmuch\tmuch\tADJ\tJJ\tDegree=Pos\t9\tnsubj\t9:nsubj\t_\n 4\tof\tof\tADP\tIN\t_\t7\tcase\t7:case\t_\n\n :param f:\n :return:\n '
ex = root()
for line in f:
line = line.strip()
if (not line):
(yield ex)
ex = root()
continue
if (line[0] == '#'):
continue
parts = line.split()
assert (len(parts) == 10), ('invalid conllx line: %s' % line)
(_id, _form, _lemma, _upos, _xpos, _feats, _head, _deprel, _deps, _misc) = parts
ex['id'].append(_id)
ex['form'].append(_form)
ex['lemma'].append(_lemma)
ex['upos'].append(_upos)
ex['pos'].append(_xpos)
ex['feats'].append(_feats)
if (_head == '_'):
_head = 0
ex['head'].append(_head)
ex['deprel'].append(_deprel)
ex['deps'].append(_deps)
ex['misc'].append(_misc)
if (len(ex['form']) > 0):
(yield ex)<|docstring|>file中的格式类似与
1 “ “ PUNCT `` _ 20 punct 20:punct SpaceAfter=No
2 While while SCONJ IN _ 9 mark 9:mark _
3 much much ADJ JJ Degree=Pos 9 nsubj 9:nsubj _
4 of of ADP IN _ 7 case 7:case _
:param f:
:return:<|endoftext|>
|
9f3db8ebf91d5071a6e5c8f2bc87d4fa7d3380ba30f04a6d29ef51ade9b46ede
|
def load(self, paths):
'\n 输出的DataSet包含以下的field\n tokens pos dep aspects\n ["The", "bread", ...] ["DET", "NOUN",...] [["dep", 2, 1], ["nsubj", 4, 2], ...] [{"term": ["bread"], "polarity": "positive", "from": 1, "to": 2}]\n 其中dep中["dep", 2, 1]指当前这个word的head是2(0是root,这里2就是bread),"dep"是依赖关系为dep\n\n :param paths:\n :return:\n '
data_bundle = DataBundle()
folder_name = os.path.basename(paths)
fns = [f'{folder_name}_Test_biaffine_depparsed.json', f'{folder_name}_Train_biaffine_depparsed.json']
if (not os.path.exists(os.path.join(paths, fns[0]))):
fns = [f'Test_biaffine_depparsed.json', f'Train_biaffine_depparsed.json']
for (split, name) in zip(['test', 'train'], fns):
fp = os.path.join(paths, name)
with open(fp, 'r', encoding='utf-8') as f:
data = json.load(f)
ds = DataSet()
for ins in data:
tokens = ins['token']
pos = ins['pos']
dep = ins['dependencies']
aspects = ins['aspects']
ins = Instance(tokens=tokens, pos=pos, dep=dep, aspects=aspects)
ds.append(ins)
data_bundle.set_dataset(ds, name=split)
return data_bundle
|
输出的DataSet包含以下的field
tokens pos dep aspects
["The", "bread", ...] ["DET", "NOUN",...] [["dep", 2, 1], ["nsubj", 4, 2], ...] [{"term": ["bread"], "polarity": "positive", "from": 1, "to": 2}]
其中dep中["dep", 2, 1]指当前这个word的head是2(0是root,这里2就是bread),"dep"是依赖关系为dep
:param paths:
:return:
|
Perturbed-Masking/utils/conlludataset.py
|
load
|
missakaherath/RoBERTaABSA
| 0
|
python
|
def load(self, paths):
'\n 输出的DataSet包含以下的field\n tokens pos dep aspects\n ["The", "bread", ...] ["DET", "NOUN",...] [["dep", 2, 1], ["nsubj", 4, 2], ...] [{"term": ["bread"], "polarity": "positive", "from": 1, "to": 2}]\n 其中dep中["dep", 2, 1]指当前这个word的head是2(0是root,这里2就是bread),"dep"是依赖关系为dep\n\n :param paths:\n :return:\n '
data_bundle = DataBundle()
folder_name = os.path.basename(paths)
fns = [f'{folder_name}_Test_biaffine_depparsed.json', f'{folder_name}_Train_biaffine_depparsed.json']
if (not os.path.exists(os.path.join(paths, fns[0]))):
fns = [f'Test_biaffine_depparsed.json', f'Train_biaffine_depparsed.json']
for (split, name) in zip(['test', 'train'], fns):
fp = os.path.join(paths, name)
with open(fp, 'r', encoding='utf-8') as f:
data = json.load(f)
ds = DataSet()
for ins in data:
tokens = ins['token']
pos = ins['pos']
dep = ins['dependencies']
aspects = ins['aspects']
ins = Instance(tokens=tokens, pos=pos, dep=dep, aspects=aspects)
ds.append(ins)
data_bundle.set_dataset(ds, name=split)
return data_bundle
|
def load(self, paths):
'\n 输出的DataSet包含以下的field\n tokens pos dep aspects\n ["The", "bread", ...] ["DET", "NOUN",...] [["dep", 2, 1], ["nsubj", 4, 2], ...] [{"term": ["bread"], "polarity": "positive", "from": 1, "to": 2}]\n 其中dep中["dep", 2, 1]指当前这个word的head是2(0是root,这里2就是bread),"dep"是依赖关系为dep\n\n :param paths:\n :return:\n '
data_bundle = DataBundle()
folder_name = os.path.basename(paths)
fns = [f'{folder_name}_Test_biaffine_depparsed.json', f'{folder_name}_Train_biaffine_depparsed.json']
if (not os.path.exists(os.path.join(paths, fns[0]))):
fns = [f'Test_biaffine_depparsed.json', f'Train_biaffine_depparsed.json']
for (split, name) in zip(['test', 'train'], fns):
fp = os.path.join(paths, name)
with open(fp, 'r', encoding='utf-8') as f:
data = json.load(f)
ds = DataSet()
for ins in data:
tokens = ins['token']
pos = ins['pos']
dep = ins['dependencies']
aspects = ins['aspects']
ins = Instance(tokens=tokens, pos=pos, dep=dep, aspects=aspects)
ds.append(ins)
data_bundle.set_dataset(ds, name=split)
return data_bundle<|docstring|>输出的DataSet包含以下的field
tokens pos dep aspects
["The", "bread", ...] ["DET", "NOUN",...] [["dep", 2, 1], ["nsubj", 4, 2], ...] [{"term": ["bread"], "polarity": "positive", "from": 1, "to": 2}]
其中dep中["dep", 2, 1]指当前这个word的head是2(0是root,这里2就是bread),"dep"是依赖关系为dep
:param paths:
:return:<|endoftext|>
|
1e592f4c2829c25d8bcb53adcfb34e13c7cc4522e30a9a5d6a01115c3c097cf6
|
def find_init_robot_root(initial_target: str, up_to_dir: str) -> Optional[str]:
'\n Find the root __init__.robot file which should be considered to create a\n suite based on the initial_target given.\n\n Returns the folder to be considered the root (which contains the __init__.robot).\n If an __init__.robot file is not found returns None.\n '
check_dir: str
if os.path.isdir(initial_target):
check_dir = initial_target
else:
check_dir = os.path.dirname(initial_target)
found: Optional[str] = None
while True:
if os.path.exists(os.path.join(check_dir, '__init__.robot')):
found = check_dir
if up_to_dir:
if os.path.samefile(check_dir, up_to_dir):
return found
parent = os.path.dirname(check_dir)
if ((not parent) or (parent == check_dir)):
return found
check_dir = parent
|
Find the root __init__.robot file which should be considered to create a
suite based on the initial_target given.
Returns the folder to be considered the root (which contains the __init__.robot).
If an __init__.robot file is not found returns None.
|
robotframework-ls/src/robotframework_debug_adapter/launch_process.py
|
find_init_robot_root
|
mrdimfox/robotframework-lsp
| 0
|
python
|
def find_init_robot_root(initial_target: str, up_to_dir: str) -> Optional[str]:
'\n Find the root __init__.robot file which should be considered to create a\n suite based on the initial_target given.\n\n Returns the folder to be considered the root (which contains the __init__.robot).\n If an __init__.robot file is not found returns None.\n '
check_dir: str
if os.path.isdir(initial_target):
check_dir = initial_target
else:
check_dir = os.path.dirname(initial_target)
found: Optional[str] = None
while True:
if os.path.exists(os.path.join(check_dir, '__init__.robot')):
found = check_dir
if up_to_dir:
if os.path.samefile(check_dir, up_to_dir):
return found
parent = os.path.dirname(check_dir)
if ((not parent) or (parent == check_dir)):
return found
check_dir = parent
|
def find_init_robot_root(initial_target: str, up_to_dir: str) -> Optional[str]:
'\n Find the root __init__.robot file which should be considered to create a\n suite based on the initial_target given.\n\n Returns the folder to be considered the root (which contains the __init__.robot).\n If an __init__.robot file is not found returns None.\n '
check_dir: str
if os.path.isdir(initial_target):
check_dir = initial_target
else:
check_dir = os.path.dirname(initial_target)
found: Optional[str] = None
while True:
if os.path.exists(os.path.join(check_dir, '__init__.robot')):
found = check_dir
if up_to_dir:
if os.path.samefile(check_dir, up_to_dir):
return found
parent = os.path.dirname(check_dir)
if ((not parent) or (parent == check_dir)):
return found
check_dir = parent<|docstring|>Find the root __init__.robot file which should be considered to create a
suite based on the initial_target given.
Returns the folder to be considered the root (which contains the __init__.robot).
If an __init__.robot file is not found returns None.<|endoftext|>
|
18f3ab76b685195d8fac543c1d6204b5c62b07e032e2366fb9345855e0ebe79b
|
def compute_cmd_line_and_env(run_robot_py: str, target: Union[(str, List[str])], make_suite: bool, port: int, args: List[str], run_in_debug_mode: bool, cwd: str, suite_target: Optional[Union[(str, List[str])]], env: dict) -> Tuple[(List[str], dict)]:
'\n Note that cwd and target MUST be absolute at this point.\n '
target_args: List[str] = (target if isinstance(target, list) else [target])
new_target_args: List[str] = target_args[:]
import sys
new_env = env.copy()
need_env_filtering = False
suite_filter_args = []
if make_suite:
if suite_target:
new_target_args = (suite_target if isinstance(suite_target, list) else [suite_target])
need_env_filtering = True
else:
found_roots: Set[Union[(str, None)]] = set()
new_target_args = []
for target in target_args:
found = find_init_robot_root(target, cwd)
found_roots.add(found)
if (not found):
if (target not in new_target_args):
new_target_args.append(target)
elif (found not in new_target_args):
new_target_args.append(found)
assert (len(found_roots) > 0)
if (len(found_roots) == 1):
base_root = next(iter(found_roots))
if (base_root is None):
need_env_filtering = False
else:
for target in target_args:
target_no_ext = os.path.splitext(target)[0]
relative = os.path.relpath(target_no_ext, base_root)
suite_to_filter = relative.replace('\\', '/').replace('/', '.')
suite_filter_args.append('--suite')
if (suite_to_filter == '.'):
suite_filter_args.append(os.path.basename(base_root))
else:
suite_filter_args.append(((os.path.basename(base_root) + '.') + suite_to_filter))
new_target_args = [base_root]
need_env_filtering = False
else:
need_env_filtering = True
if need_env_filtering:
new_env.update(_compute_env_filtering(env, target_args))
if ('RFLS_PRERUN_FILTER_TESTS' in new_env):
found_filter = ('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor' in args)
if (not found_filter):
args.append('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor')
cmdline = ((([sys.executable, '-u', run_robot_py, '--port', str(port), ('--debug' if run_in_debug_mode else '--no-debug')] + args) + suite_filter_args) + new_target_args)
return (cmdline, new_env)
|
Note that cwd and target MUST be absolute at this point.
|
robotframework-ls/src/robotframework_debug_adapter/launch_process.py
|
compute_cmd_line_and_env
|
mrdimfox/robotframework-lsp
| 0
|
python
|
def compute_cmd_line_and_env(run_robot_py: str, target: Union[(str, List[str])], make_suite: bool, port: int, args: List[str], run_in_debug_mode: bool, cwd: str, suite_target: Optional[Union[(str, List[str])]], env: dict) -> Tuple[(List[str], dict)]:
'\n \n '
target_args: List[str] = (target if isinstance(target, list) else [target])
new_target_args: List[str] = target_args[:]
import sys
new_env = env.copy()
need_env_filtering = False
suite_filter_args = []
if make_suite:
if suite_target:
new_target_args = (suite_target if isinstance(suite_target, list) else [suite_target])
need_env_filtering = True
else:
found_roots: Set[Union[(str, None)]] = set()
new_target_args = []
for target in target_args:
found = find_init_robot_root(target, cwd)
found_roots.add(found)
if (not found):
if (target not in new_target_args):
new_target_args.append(target)
elif (found not in new_target_args):
new_target_args.append(found)
assert (len(found_roots) > 0)
if (len(found_roots) == 1):
base_root = next(iter(found_roots))
if (base_root is None):
need_env_filtering = False
else:
for target in target_args:
target_no_ext = os.path.splitext(target)[0]
relative = os.path.relpath(target_no_ext, base_root)
suite_to_filter = relative.replace('\\', '/').replace('/', '.')
suite_filter_args.append('--suite')
if (suite_to_filter == '.'):
suite_filter_args.append(os.path.basename(base_root))
else:
suite_filter_args.append(((os.path.basename(base_root) + '.') + suite_to_filter))
new_target_args = [base_root]
need_env_filtering = False
else:
need_env_filtering = True
if need_env_filtering:
new_env.update(_compute_env_filtering(env, target_args))
if ('RFLS_PRERUN_FILTER_TESTS' in new_env):
found_filter = ('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor' in args)
if (not found_filter):
args.append('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor')
cmdline = ((([sys.executable, '-u', run_robot_py, '--port', str(port), ('--debug' if run_in_debug_mode else '--no-debug')] + args) + suite_filter_args) + new_target_args)
return (cmdline, new_env)
|
def compute_cmd_line_and_env(run_robot_py: str, target: Union[(str, List[str])], make_suite: bool, port: int, args: List[str], run_in_debug_mode: bool, cwd: str, suite_target: Optional[Union[(str, List[str])]], env: dict) -> Tuple[(List[str], dict)]:
'\n \n '
target_args: List[str] = (target if isinstance(target, list) else [target])
new_target_args: List[str] = target_args[:]
import sys
new_env = env.copy()
need_env_filtering = False
suite_filter_args = []
if make_suite:
if suite_target:
new_target_args = (suite_target if isinstance(suite_target, list) else [suite_target])
need_env_filtering = True
else:
found_roots: Set[Union[(str, None)]] = set()
new_target_args = []
for target in target_args:
found = find_init_robot_root(target, cwd)
found_roots.add(found)
if (not found):
if (target not in new_target_args):
new_target_args.append(target)
elif (found not in new_target_args):
new_target_args.append(found)
assert (len(found_roots) > 0)
if (len(found_roots) == 1):
base_root = next(iter(found_roots))
if (base_root is None):
need_env_filtering = False
else:
for target in target_args:
target_no_ext = os.path.splitext(target)[0]
relative = os.path.relpath(target_no_ext, base_root)
suite_to_filter = relative.replace('\\', '/').replace('/', '.')
suite_filter_args.append('--suite')
if (suite_to_filter == '.'):
suite_filter_args.append(os.path.basename(base_root))
else:
suite_filter_args.append(((os.path.basename(base_root) + '.') + suite_to_filter))
new_target_args = [base_root]
need_env_filtering = False
else:
need_env_filtering = True
if need_env_filtering:
new_env.update(_compute_env_filtering(env, target_args))
if ('RFLS_PRERUN_FILTER_TESTS' in new_env):
found_filter = ('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor' in args)
if (not found_filter):
args.append('--prerunmodifier=robotframework_debug_adapter.prerun_modifiers.FilteringTestsSuiteVisitor')
cmdline = ((([sys.executable, '-u', run_robot_py, '--port', str(port), ('--debug' if run_in_debug_mode else '--no-debug')] + args) + suite_filter_args) + new_target_args)
return (cmdline, new_env)<|docstring|>Note that cwd and target MUST be absolute at this point.<|endoftext|>
|
a6e609bf8b024dabc9cd202a36163c63089258f8a9660bee42845f57e78d040e
|
def send_and_wait_for_configuration_done_request(self) -> bool:
'\n :return: Whether the configuration done response was received.\n '
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneArguments
event_robot = threading.Event()
track_events = [event_robot]
self._debug_adapter_robot_target_comm.write_to_robot_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_robot.set()))
if self._run_in_debug_mode:
event_pydevd = threading.Event()
track_events.append(event_pydevd)
self._debug_adapter_pydevd_target_comm.write_to_pydevd_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_pydevd.set()))
log.debug(('Wating for configuration_done response for %s seconds.' % (DEFAULT_TIMEOUT,)))
ret = True
for event in track_events:
ret = (ret and event.wait(DEFAULT_TIMEOUT))
if (not ret):
break
log.debug(('Received configuration_done response: %s' % (ret,)))
return ret
|
:return: Whether the configuration done response was received.
|
robotframework-ls/src/robotframework_debug_adapter/launch_process.py
|
send_and_wait_for_configuration_done_request
|
mrdimfox/robotframework-lsp
| 0
|
python
|
def send_and_wait_for_configuration_done_request(self) -> bool:
'\n \n '
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneArguments
event_robot = threading.Event()
track_events = [event_robot]
self._debug_adapter_robot_target_comm.write_to_robot_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_robot.set()))
if self._run_in_debug_mode:
event_pydevd = threading.Event()
track_events.append(event_pydevd)
self._debug_adapter_pydevd_target_comm.write_to_pydevd_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_pydevd.set()))
log.debug(('Wating for configuration_done response for %s seconds.' % (DEFAULT_TIMEOUT,)))
ret = True
for event in track_events:
ret = (ret and event.wait(DEFAULT_TIMEOUT))
if (not ret):
break
log.debug(('Received configuration_done response: %s' % (ret,)))
return ret
|
def send_and_wait_for_configuration_done_request(self) -> bool:
'\n \n '
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ConfigurationDoneArguments
event_robot = threading.Event()
track_events = [event_robot]
self._debug_adapter_robot_target_comm.write_to_robot_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_robot.set()))
if self._run_in_debug_mode:
event_pydevd = threading.Event()
track_events.append(event_pydevd)
self._debug_adapter_pydevd_target_comm.write_to_pydevd_message(ConfigurationDoneRequest(ConfigurationDoneArguments()), on_response=(lambda *args, **kwargs: event_pydevd.set()))
log.debug(('Wating for configuration_done response for %s seconds.' % (DEFAULT_TIMEOUT,)))
ret = True
for event in track_events:
ret = (ret and event.wait(DEFAULT_TIMEOUT))
if (not ret):
break
log.debug(('Received configuration_done response: %s' % (ret,)))
return ret<|docstring|>:return: Whether the configuration done response was received.<|endoftext|>
|
4bcaa3174b3a24072ec6fc36fdca74c8ec987f87fcba766236e98dd231ce2706
|
def interpreter_requires_environment():
"\n Returns True if our sys.executable interpreter requires environment\n variables in order to be able to run at all.\n\n This is designed to be used with @unittest.skipIf() to annotate tests\n that need to use an assert_python*() function to launch an isolated\n mode (-I) or no environment mode (-E) sub-interpreter process.\n\n A normal build & test does not run into this situation but it can happen\n when trying to run the standard library test suite from an interpreter that\n doesn't have an obvious home with Python's current home finding logic.\n\n Setting PYTHONHOME is one way to get most of the testsuite to run in that\n situation. PYTHONPATH or PYTHONUSERSITE are other common environment\n variables that might impact whether or not the interpreter can start.\n "
global __cached_interp_requires_environment
if (__cached_interp_requires_environment is None):
try:
subprocess.check_call([sys.executable, '-E', '-c', 'import sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment
|
Returns True if our sys.executable interpreter requires environment
variables in order to be able to run at all.
This is designed to be used with @unittest.skipIf() to annotate tests
that need to use an assert_python*() function to launch an isolated
mode (-I) or no environment mode (-E) sub-interpreter process.
A normal build & test does not run into this situation but it can happen
when trying to run the standard library test suite from an interpreter that
doesn't have an obvious home with Python's current home finding logic.
Setting PYTHONHOME is one way to get most of the testsuite to run in that
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
variables that might impact whether or not the interpreter can start.
|
Lib/test/support/script_helper.py
|
interpreter_requires_environment
|
s-wakaba/bitpacked-cpython
| 486
|
python
|
def interpreter_requires_environment():
"\n Returns True if our sys.executable interpreter requires environment\n variables in order to be able to run at all.\n\n This is designed to be used with @unittest.skipIf() to annotate tests\n that need to use an assert_python*() function to launch an isolated\n mode (-I) or no environment mode (-E) sub-interpreter process.\n\n A normal build & test does not run into this situation but it can happen\n when trying to run the standard library test suite from an interpreter that\n doesn't have an obvious home with Python's current home finding logic.\n\n Setting PYTHONHOME is one way to get most of the testsuite to run in that\n situation. PYTHONPATH or PYTHONUSERSITE are other common environment\n variables that might impact whether or not the interpreter can start.\n "
global __cached_interp_requires_environment
if (__cached_interp_requires_environment is None):
try:
subprocess.check_call([sys.executable, '-E', '-c', 'import sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment
|
def interpreter_requires_environment():
"\n Returns True if our sys.executable interpreter requires environment\n variables in order to be able to run at all.\n\n This is designed to be used with @unittest.skipIf() to annotate tests\n that need to use an assert_python*() function to launch an isolated\n mode (-I) or no environment mode (-E) sub-interpreter process.\n\n A normal build & test does not run into this situation but it can happen\n when trying to run the standard library test suite from an interpreter that\n doesn't have an obvious home with Python's current home finding logic.\n\n Setting PYTHONHOME is one way to get most of the testsuite to run in that\n situation. PYTHONPATH or PYTHONUSERSITE are other common environment\n variables that might impact whether or not the interpreter can start.\n "
global __cached_interp_requires_environment
if (__cached_interp_requires_environment is None):
try:
subprocess.check_call([sys.executable, '-E', '-c', 'import sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment<|docstring|>Returns True if our sys.executable interpreter requires environment
variables in order to be able to run at all.
This is designed to be used with @unittest.skipIf() to annotate tests
that need to use an assert_python*() function to launch an isolated
mode (-I) or no environment mode (-E) sub-interpreter process.
A normal build & test does not run into this situation but it can happen
when trying to run the standard library test suite from an interpreter that
doesn't have an obvious home with Python's current home finding logic.
Setting PYTHONHOME is one way to get most of the testsuite to run in that
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
variables that might impact whether or not the interpreter can start.<|endoftext|>
|
eaa7160278ff3418da51c0f5bfc7fd474710fc750a24c734b2950e6aa246e290
|
def assert_python_ok(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,\n stderr) tuple.\n\n If the __cleanenv keyword is set, env_vars is used as a fresh environment.\n\n Python is started in isolated mode (command line option -I),\n except if the __isolated keyword is set to False.\n '
return _assert_python(True, *args, **env_vars)
|
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
|
Lib/test/support/script_helper.py
|
assert_python_ok
|
s-wakaba/bitpacked-cpython
| 486
|
python
|
def assert_python_ok(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,\n stderr) tuple.\n\n If the __cleanenv keyword is set, env_vars is used as a fresh environment.\n\n Python is started in isolated mode (command line option -I),\n except if the __isolated keyword is set to False.\n '
return _assert_python(True, *args, **env_vars)
|
def assert_python_ok(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,\n stderr) tuple.\n\n If the __cleanenv keyword is set, env_vars is used as a fresh environment.\n\n Python is started in isolated mode (command line option -I),\n except if the __isolated keyword is set to False.\n '
return _assert_python(True, *args, **env_vars)<|docstring|>Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.<|endoftext|>
|
f9e5ec2994e588d53eea91180c375c1b5b86f022985e7af2021286511c1a9d3f
|
def assert_python_failure(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` fails (rc != 0) and return a (return code, stdout,\n stderr) tuple.\n\n See assert_python_ok() for more options.\n '
return _assert_python(False, *args, **env_vars)
|
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.
|
Lib/test/support/script_helper.py
|
assert_python_failure
|
s-wakaba/bitpacked-cpython
| 486
|
python
|
def assert_python_failure(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` fails (rc != 0) and return a (return code, stdout,\n stderr) tuple.\n\n See assert_python_ok() for more options.\n '
return _assert_python(False, *args, **env_vars)
|
def assert_python_failure(*args, **env_vars):
'\n Assert that running the interpreter with `args` and optional environment\n variables `env_vars` fails (rc != 0) and return a (return code, stdout,\n stderr) tuple.\n\n See assert_python_ok() for more options.\n '
return _assert_python(False, *args, **env_vars)<|docstring|>Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.<|endoftext|>
|
f0047a078d5a152ea85797a02eff9494b37a23190a66a3c867901c2b8a6e1f80
|
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
'Run a Python subprocess with the given arguments.\n\n kw is extra keyword args to pass to subprocess.Popen. Returns a Popen\n object.\n '
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
|
Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.
|
Lib/test/support/script_helper.py
|
spawn_python
|
s-wakaba/bitpacked-cpython
| 486
|
python
|
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
'Run a Python subprocess with the given arguments.\n\n kw is extra keyword args to pass to subprocess.Popen. Returns a Popen\n object.\n '
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
|
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
'Run a Python subprocess with the given arguments.\n\n kw is extra keyword args to pass to subprocess.Popen. Returns a Popen\n object.\n '
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)<|docstring|>Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.<|endoftext|>
|
e0ef5f850cf2fe04fa715d7e173beb06cdf9fd8ad20e73b0018fe7a74809258a
|
def kill_python(p):
'Run the given Popen process until completion and return stdout.'
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
p.wait()
subprocess._cleanup()
return data
|
Run the given Popen process until completion and return stdout.
|
Lib/test/support/script_helper.py
|
kill_python
|
s-wakaba/bitpacked-cpython
| 486
|
python
|
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
p.wait()
subprocess._cleanup()
return data
|
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
p.wait()
subprocess._cleanup()
return data<|docstring|>Run the given Popen process until completion and return stdout.<|endoftext|>
|
e9a5e394025e3449dc5cdb99a2056653e26b1564959085712b28a9ae7bb4f56f
|
def _handle_message_event(self, event_data):
'\n *event* is a ts3.response.TS3Event instance, that contains the name\n of the event and the data.\n '
message = event_data.get('msg')
rec_from_name = event_data.get('invokername').encode('utf-8')
rec_from_uid = event_data.get('invokeruid')
rec_from_id = event_data.get('invokerid')
rec_type = event_data.get('targetmode')
if (rec_from_id == self._own_client_id):
return
try:
if (rec_type == '2'):
self.handle_channel_msg(message, rec_from_id, rec_from_name, rec_from_uid)
elif (rec_type == '1'):
self.handle_private_message(message, rec_from_id, rec_from_name, rec_from_uid)
except Exception as ex:
LOG.error(('BOT Event: Something went wrong during message received from teamspeak server.' + ' Likely bad user command/message.'), exc_info=ex)
return None
|
*event* is a ts3.response.TS3Event instance, that contains the name
of the event and the data.
|
bot/event_looper.py
|
_handle_message_event
|
FlussuferOrga/ts-gw2-verifyBot
| 2
|
python
|
def _handle_message_event(self, event_data):
'\n *event* is a ts3.response.TS3Event instance, that contains the name\n of the event and the data.\n '
message = event_data.get('msg')
rec_from_name = event_data.get('invokername').encode('utf-8')
rec_from_uid = event_data.get('invokeruid')
rec_from_id = event_data.get('invokerid')
rec_type = event_data.get('targetmode')
if (rec_from_id == self._own_client_id):
return
try:
if (rec_type == '2'):
self.handle_channel_msg(message, rec_from_id, rec_from_name, rec_from_uid)
elif (rec_type == '1'):
self.handle_private_message(message, rec_from_id, rec_from_name, rec_from_uid)
except Exception as ex:
LOG.error(('BOT Event: Something went wrong during message received from teamspeak server.' + ' Likely bad user command/message.'), exc_info=ex)
return None
|
def _handle_message_event(self, event_data):
'\n *event* is a ts3.response.TS3Event instance, that contains the name\n of the event and the data.\n '
message = event_data.get('msg')
rec_from_name = event_data.get('invokername').encode('utf-8')
rec_from_uid = event_data.get('invokeruid')
rec_from_id = event_data.get('invokerid')
rec_type = event_data.get('targetmode')
if (rec_from_id == self._own_client_id):
return
try:
if (rec_type == '2'):
self.handle_channel_msg(message, rec_from_id, rec_from_name, rec_from_uid)
elif (rec_type == '1'):
self.handle_private_message(message, rec_from_id, rec_from_name, rec_from_uid)
except Exception as ex:
LOG.error(('BOT Event: Something went wrong during message received from teamspeak server.' + ' Likely bad user command/message.'), exc_info=ex)
return None<|docstring|>*event* is a ts3.response.TS3Event instance, that contains the name
of the event and the data.<|endoftext|>
|
f6d9d70e7adebddcf1782d10e637c8705d25e2ad817330fe5f30d32b2b8e761e
|
def __init__(self, configuration=None, do_validate=True):
'Creates an execution configuration from the given JSON dict\n\n :param configuration: The JSON dictionary\n :type configuration: dict\n :param do_validate: Whether to perform validation on the JSON schema\n :type do_validate: bool\n\n :raises :class:`job.execution.configuration.exceptions.InvalidExecutionConfiguration`: If the JSON is invalid\n '
if (not configuration):
configuration = {}
self._configuration = configuration
if ('version' not in self._configuration):
self._configuration['version'] = SCHEMA_VERSION
if (self._configuration['version'] != SCHEMA_VERSION):
self._configuration = ExecutionConfiguration._convert_configuration(configuration)
self._populate_default_values()
try:
if do_validate:
validate(configuration, EXE_CONFIG_SCHEMA)
except ValidationError as validation_error:
raise InvalidExecutionConfiguration(validation_error)
|
Creates an execution configuration from the given JSON dict
:param configuration: The JSON dictionary
:type configuration: dict
:param do_validate: Whether to perform validation on the JSON schema
:type do_validate: bool
:raises :class:`job.execution.configuration.exceptions.InvalidExecutionConfiguration`: If the JSON is invalid
|
scale/job/execution/configuration/json/exe_config.py
|
__init__
|
kaydoh/scale
| 121
|
python
|
def __init__(self, configuration=None, do_validate=True):
'Creates an execution configuration from the given JSON dict\n\n :param configuration: The JSON dictionary\n :type configuration: dict\n :param do_validate: Whether to perform validation on the JSON schema\n :type do_validate: bool\n\n :raises :class:`job.execution.configuration.exceptions.InvalidExecutionConfiguration`: If the JSON is invalid\n '
if (not configuration):
configuration = {}
self._configuration = configuration
if ('version' not in self._configuration):
self._configuration['version'] = SCHEMA_VERSION
if (self._configuration['version'] != SCHEMA_VERSION):
self._configuration = ExecutionConfiguration._convert_configuration(configuration)
self._populate_default_values()
try:
if do_validate:
validate(configuration, EXE_CONFIG_SCHEMA)
except ValidationError as validation_error:
raise InvalidExecutionConfiguration(validation_error)
|
def __init__(self, configuration=None, do_validate=True):
'Creates an execution configuration from the given JSON dict\n\n :param configuration: The JSON dictionary\n :type configuration: dict\n :param do_validate: Whether to perform validation on the JSON schema\n :type do_validate: bool\n\n :raises :class:`job.execution.configuration.exceptions.InvalidExecutionConfiguration`: If the JSON is invalid\n '
if (not configuration):
configuration = {}
self._configuration = configuration
if ('version' not in self._configuration):
self._configuration['version'] = SCHEMA_VERSION
if (self._configuration['version'] != SCHEMA_VERSION):
self._configuration = ExecutionConfiguration._convert_configuration(configuration)
self._populate_default_values()
try:
if do_validate:
validate(configuration, EXE_CONFIG_SCHEMA)
except ValidationError as validation_error:
raise InvalidExecutionConfiguration(validation_error)<|docstring|>Creates an execution configuration from the given JSON dict
:param configuration: The JSON dictionary
:type configuration: dict
:param do_validate: Whether to perform validation on the JSON schema
:type do_validate: bool
:raises :class:`job.execution.configuration.exceptions.InvalidExecutionConfiguration`: If the JSON is invalid<|endoftext|>
|
54174945dfdbcc59adf51ee9bb39c2bed6229b7518a44bfafd8fc16372367a68
|
def add_to_task(self, task_type, args=None, docker_params=None, env_vars=None, mount_volumes=None, resources=None, settings=None, wksp_volumes=None, workspaces=None):
'Adds the given parameters to the task with the given type. The task with the given type must already exist. A\n mount volume of None indicates a required mount that is missing. A setting value of None indicates a required\n setting that is missing.\n\n :param task_type: The task type to add the parameters to\n :type task_type: string\n :param args: The command arguments for the task\n :type args: string\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n :param env_vars: A dict of env var names and values to add to the task\n :type env_vars: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
task_dict = self._get_task_dict(task_type)
if args:
ExecutionConfiguration._add_args_to_task(task_dict, args)
if docker_params:
ExecutionConfiguration._add_docker_params_to_task(task_dict, docker_params)
if env_vars:
ExecutionConfiguration._add_env_vars_to_task(task_dict, env_vars)
if mount_volumes:
ExecutionConfiguration._add_mount_volumes_to_task(task_dict, mount_volumes)
if resources:
ExecutionConfiguration._add_resources_to_task(task_dict, resources)
if settings:
ExecutionConfiguration._add_settings_to_task(task_dict, settings)
if wksp_volumes:
ExecutionConfiguration._add_workspace_volumes_to_task(task_dict, wksp_volumes)
if workspaces:
ExecutionConfiguration._add_workspaces_to_task(task_dict, workspaces)
|
Adds the given parameters to the task with the given type. The task with the given type must already exist. A
mount volume of None indicates a required mount that is missing. A setting value of None indicates a required
setting that is missing.
:param task_type: The task type to add the parameters to
:type task_type: string
:param args: The command arguments for the task
:type args: string
:param docker_params: The Docker parameters
:type docker_params: :func:`list`
:param env_vars: A dict of env var names and values to add to the task
:type env_vars: dict
:param mount_volumes: The mount volumes stored by mount name (a volume may be None)
:type mount_volumes: dict
:param resources: The resources
:type resources: :class:`node.resources.node_resources.NodeResources`
:param settings: The setting names and values (a value may be None)
:type settings: dict
:param wksp_volumes: The workspace volumes stored by workspace name
:type wksp_volumes: dict
:param workspaces: The workspaces stored by name
:type workspaces: dict
|
scale/job/execution/configuration/json/exe_config.py
|
add_to_task
|
kaydoh/scale
| 121
|
python
|
def add_to_task(self, task_type, args=None, docker_params=None, env_vars=None, mount_volumes=None, resources=None, settings=None, wksp_volumes=None, workspaces=None):
'Adds the given parameters to the task with the given type. The task with the given type must already exist. A\n mount volume of None indicates a required mount that is missing. A setting value of None indicates a required\n setting that is missing.\n\n :param task_type: The task type to add the parameters to\n :type task_type: string\n :param args: The command arguments for the task\n :type args: string\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n :param env_vars: A dict of env var names and values to add to the task\n :type env_vars: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
task_dict = self._get_task_dict(task_type)
if args:
ExecutionConfiguration._add_args_to_task(task_dict, args)
if docker_params:
ExecutionConfiguration._add_docker_params_to_task(task_dict, docker_params)
if env_vars:
ExecutionConfiguration._add_env_vars_to_task(task_dict, env_vars)
if mount_volumes:
ExecutionConfiguration._add_mount_volumes_to_task(task_dict, mount_volumes)
if resources:
ExecutionConfiguration._add_resources_to_task(task_dict, resources)
if settings:
ExecutionConfiguration._add_settings_to_task(task_dict, settings)
if wksp_volumes:
ExecutionConfiguration._add_workspace_volumes_to_task(task_dict, wksp_volumes)
if workspaces:
ExecutionConfiguration._add_workspaces_to_task(task_dict, workspaces)
|
def add_to_task(self, task_type, args=None, docker_params=None, env_vars=None, mount_volumes=None, resources=None, settings=None, wksp_volumes=None, workspaces=None):
'Adds the given parameters to the task with the given type. The task with the given type must already exist. A\n mount volume of None indicates a required mount that is missing. A setting value of None indicates a required\n setting that is missing.\n\n :param task_type: The task type to add the parameters to\n :type task_type: string\n :param args: The command arguments for the task\n :type args: string\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n :param env_vars: A dict of env var names and values to add to the task\n :type env_vars: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
task_dict = self._get_task_dict(task_type)
if args:
ExecutionConfiguration._add_args_to_task(task_dict, args)
if docker_params:
ExecutionConfiguration._add_docker_params_to_task(task_dict, docker_params)
if env_vars:
ExecutionConfiguration._add_env_vars_to_task(task_dict, env_vars)
if mount_volumes:
ExecutionConfiguration._add_mount_volumes_to_task(task_dict, mount_volumes)
if resources:
ExecutionConfiguration._add_resources_to_task(task_dict, resources)
if settings:
ExecutionConfiguration._add_settings_to_task(task_dict, settings)
if wksp_volumes:
ExecutionConfiguration._add_workspace_volumes_to_task(task_dict, wksp_volumes)
if workspaces:
ExecutionConfiguration._add_workspaces_to_task(task_dict, workspaces)<|docstring|>Adds the given parameters to the task with the given type. The task with the given type must already exist. A
mount volume of None indicates a required mount that is missing. A setting value of None indicates a required
setting that is missing.
:param task_type: The task type to add the parameters to
:type task_type: string
:param args: The command arguments for the task
:type args: string
:param docker_params: The Docker parameters
:type docker_params: :func:`list`
:param env_vars: A dict of env var names and values to add to the task
:type env_vars: dict
:param mount_volumes: The mount volumes stored by mount name (a volume may be None)
:type mount_volumes: dict
:param resources: The resources
:type resources: :class:`node.resources.node_resources.NodeResources`
:param settings: The setting names and values (a value may be None)
:type settings: dict
:param wksp_volumes: The workspace volumes stored by workspace name
:type wksp_volumes: dict
:param workspaces: The workspaces stored by name
:type workspaces: dict<|endoftext|>
|
ee7faaa3dc596f5d313e3802c506356cea112e59cc1a6fd7cc758b893193de78
|
def create_copy(self):
'Creates and returns a copy of this configuration\n\n :returns: The copied configuration\n :rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`\n '
return ExecutionConfiguration(deepcopy(self._configuration), do_validate=False)
|
Creates and returns a copy of this configuration
:returns: The copied configuration
:rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`
|
scale/job/execution/configuration/json/exe_config.py
|
create_copy
|
kaydoh/scale
| 121
|
python
|
def create_copy(self):
'Creates and returns a copy of this configuration\n\n :returns: The copied configuration\n :rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`\n '
return ExecutionConfiguration(deepcopy(self._configuration), do_validate=False)
|
def create_copy(self):
'Creates and returns a copy of this configuration\n\n :returns: The copied configuration\n :rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`\n '
return ExecutionConfiguration(deepcopy(self._configuration), do_validate=False)<|docstring|>Creates and returns a copy of this configuration
:returns: The copied configuration
:rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`<|endoftext|>
|
92c966bda940894a15e2c5e55612df1d5353aa638d47ce00fadb1040f13fff2f
|
def create_tasks(self, task_types):
'Makes sure that tasks with the given types are created and in the given order. If an already existing task\n type is not included in the given list, it will be removed.\n\n :param task_types: The list of task types\n :type task_types: :func:`list`\n '
tasks_by_type = {}
for task_dict in self._configuration['tasks']:
tasks_by_type[task_dict['type']] = task_dict
tasks = []
for task_type in task_types:
if (task_type in tasks_by_type):
tasks.append(tasks_by_type[task_type])
del tasks_by_type[task_type]
else:
tasks.append(ExecutionConfiguration._create_task(task_type))
self._configuration['tasks'] = tasks
|
Makes sure that tasks with the given types are created and in the given order. If an already existing task
type is not included in the given list, it will be removed.
:param task_types: The list of task types
:type task_types: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
create_tasks
|
kaydoh/scale
| 121
|
python
|
def create_tasks(self, task_types):
'Makes sure that tasks with the given types are created and in the given order. If an already existing task\n type is not included in the given list, it will be removed.\n\n :param task_types: The list of task types\n :type task_types: :func:`list`\n '
tasks_by_type = {}
for task_dict in self._configuration['tasks']:
tasks_by_type[task_dict['type']] = task_dict
tasks = []
for task_type in task_types:
if (task_type in tasks_by_type):
tasks.append(tasks_by_type[task_type])
del tasks_by_type[task_type]
else:
tasks.append(ExecutionConfiguration._create_task(task_type))
self._configuration['tasks'] = tasks
|
def create_tasks(self, task_types):
'Makes sure that tasks with the given types are created and in the given order. If an already existing task\n type is not included in the given list, it will be removed.\n\n :param task_types: The list of task types\n :type task_types: :func:`list`\n '
tasks_by_type = {}
for task_dict in self._configuration['tasks']:
tasks_by_type[task_dict['type']] = task_dict
tasks = []
for task_type in task_types:
if (task_type in tasks_by_type):
tasks.append(tasks_by_type[task_type])
del tasks_by_type[task_type]
else:
tasks.append(ExecutionConfiguration._create_task(task_type))
self._configuration['tasks'] = tasks<|docstring|>Makes sure that tasks with the given types are created and in the given order. If an already existing task
type is not included in the given list, it will be removed.
:param task_types: The list of task types
:type task_types: :func:`list`<|endoftext|>
|
d21756cc0a135d5b430a5b50f66ab1bad630aeaffed1c18f2a5fafb2971780e1
|
def get_args(self, task_type):
"Returns the command arguments for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The command arguments, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
return task_dict['args']
return None
|
Returns the command arguments for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The command arguments, possibly None
:rtype: string
|
scale/job/execution/configuration/json/exe_config.py
|
get_args
|
kaydoh/scale
| 121
|
python
|
def get_args(self, task_type):
"Returns the command arguments for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The command arguments, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
return task_dict['args']
return None
|
def get_args(self, task_type):
"Returns the command arguments for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The command arguments, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
return task_dict['args']
return None<|docstring|>Returns the command arguments for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The command arguments, possibly None
:rtype: string<|endoftext|>
|
3e4b57d62d697f561a4c6421f158c6586b335c0d7bf337f1bbb311230f265e1c
|
def get_dict(self):
'Returns the internal dictionary that represents this execution configuration\n\n :returns: The internal dictionary\n :rtype: dict\n '
return self._configuration
|
Returns the internal dictionary that represents this execution configuration
:returns: The internal dictionary
:rtype: dict
|
scale/job/execution/configuration/json/exe_config.py
|
get_dict
|
kaydoh/scale
| 121
|
python
|
def get_dict(self):
'Returns the internal dictionary that represents this execution configuration\n\n :returns: The internal dictionary\n :rtype: dict\n '
return self._configuration
|
def get_dict(self):
'Returns the internal dictionary that represents this execution configuration\n\n :returns: The internal dictionary\n :rtype: dict\n '
return self._configuration<|docstring|>Returns the internal dictionary that represents this execution configuration
:returns: The internal dictionary
:rtype: dict<|endoftext|>
|
eb2d73427a2fa03a2e0a008fd99d9db2ef7128525a817809d4f618b670b79d9e
|
def get_docker_params(self, task_type):
'Returns the Docker parameters for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of Docker parameters\n :rtype: :func:`list`\n '
params = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('docker_params' in task_dict):
for param_dict in task_dict['docker_params']:
params.append(DockerParameter(param_dict['flag'], param_dict['value']))
return params
|
Returns the Docker parameters for the given task type
:param task_type: The task type
:type task_type: string
:returns: The list of Docker parameters
:rtype: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
get_docker_params
|
kaydoh/scale
| 121
|
python
|
def get_docker_params(self, task_type):
'Returns the Docker parameters for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of Docker parameters\n :rtype: :func:`list`\n '
params = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('docker_params' in task_dict):
for param_dict in task_dict['docker_params']:
params.append(DockerParameter(param_dict['flag'], param_dict['value']))
return params
|
def get_docker_params(self, task_type):
'Returns the Docker parameters for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of Docker parameters\n :rtype: :func:`list`\n '
params = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('docker_params' in task_dict):
for param_dict in task_dict['docker_params']:
params.append(DockerParameter(param_dict['flag'], param_dict['value']))
return params<|docstring|>Returns the Docker parameters for the given task type
:param task_type: The task type
:type task_type: string
:returns: The list of Docker parameters
:rtype: :func:`list`<|endoftext|>
|
f461ca7bd24246840de0059c59d56d2b6ff4d11478b5a27acc9ef6b8a175d259
|
def get_env_vars(self, task_type):
'Returns the environment variables for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of environment variables\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('env_vars' in task_dict):
return task_dict['env_vars']
return {}
|
Returns the environment variables for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of environment variables
:rtype: dict
|
scale/job/execution/configuration/json/exe_config.py
|
get_env_vars
|
kaydoh/scale
| 121
|
python
|
def get_env_vars(self, task_type):
'Returns the environment variables for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of environment variables\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('env_vars' in task_dict):
return task_dict['env_vars']
return {}
|
def get_env_vars(self, task_type):
'Returns the environment variables for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of environment variables\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('env_vars' in task_dict):
return task_dict['env_vars']
return {}<|docstring|>Returns the environment variables for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of environment variables
:rtype: dict<|endoftext|>
|
1d29e6c21dd0f56c55de447632ec45f5227b0d499ebda4ab236b4f0aa3856c40
|
def get_input_workspace_names(self):
'Returns a list of the names of all input workspaces\n\n :returns: The list of the names of all input workspaces\n :rtype: :func:`list`\n '
workspace_names = set()
if ('input_files' in self._configuration):
for file_list in self._configuration['input_files'].values():
for file_dict in file_list:
workspace_names.add(file_dict['workspace_name'])
return list(workspace_names)
|
Returns a list of the names of all input workspaces
:returns: The list of the names of all input workspaces
:rtype: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
get_input_workspace_names
|
kaydoh/scale
| 121
|
python
|
def get_input_workspace_names(self):
'Returns a list of the names of all input workspaces\n\n :returns: The list of the names of all input workspaces\n :rtype: :func:`list`\n '
workspace_names = set()
if ('input_files' in self._configuration):
for file_list in self._configuration['input_files'].values():
for file_dict in file_list:
workspace_names.add(file_dict['workspace_name'])
return list(workspace_names)
|
def get_input_workspace_names(self):
'Returns a list of the names of all input workspaces\n\n :returns: The list of the names of all input workspaces\n :rtype: :func:`list`\n '
workspace_names = set()
if ('input_files' in self._configuration):
for file_list in self._configuration['input_files'].values():
for file_dict in file_list:
workspace_names.add(file_dict['workspace_name'])
return list(workspace_names)<|docstring|>Returns a list of the names of all input workspaces
:returns: The list of the names of all input workspaces
:rtype: :func:`list`<|endoftext|>
|
575cc87a122b15ebdf4b281a615bfab93cbcf637b2fe76e60121b060ccc15f41
|
def get_mounts(self, task_type):
'Returns the mounts for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of mount names mapping to volume names\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('mounts' in task_dict):
return task_dict['mounts']
return {}
|
Returns the mounts for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of mount names mapping to volume names
:rtype: dict
|
scale/job/execution/configuration/json/exe_config.py
|
get_mounts
|
kaydoh/scale
| 121
|
python
|
def get_mounts(self, task_type):
'Returns the mounts for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of mount names mapping to volume names\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('mounts' in task_dict):
return task_dict['mounts']
return {}
|
def get_mounts(self, task_type):
'Returns the mounts for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of mount names mapping to volume names\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('mounts' in task_dict):
return task_dict['mounts']
return {}<|docstring|>Returns the mounts for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of mount names mapping to volume names
:rtype: dict<|endoftext|>
|
74b7b1f1add119f6d70f4e990f5b4d8bcb5076e039eda67908ed9357230e5a33
|
def get_named_docker_volumes(self):
'Returns the names of all (non-host) Docker volumes\n\n :returns: The list of all named Docker volumes\n :rtype: :func:`list`\n '
volumes = set()
for task_dict in self._configuration['tasks']:
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'volume'):
volumes.add(name)
return list(volumes)
|
Returns the names of all (non-host) Docker volumes
:returns: The list of all named Docker volumes
:rtype: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
get_named_docker_volumes
|
kaydoh/scale
| 121
|
python
|
def get_named_docker_volumes(self):
'Returns the names of all (non-host) Docker volumes\n\n :returns: The list of all named Docker volumes\n :rtype: :func:`list`\n '
volumes = set()
for task_dict in self._configuration['tasks']:
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'volume'):
volumes.add(name)
return list(volumes)
|
def get_named_docker_volumes(self):
'Returns the names of all (non-host) Docker volumes\n\n :returns: The list of all named Docker volumes\n :rtype: :func:`list`\n '
volumes = set()
for task_dict in self._configuration['tasks']:
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'volume'):
volumes.add(name)
return list(volumes)<|docstring|>Returns the names of all (non-host) Docker volumes
:returns: The list of all named Docker volumes
:rtype: :func:`list`<|endoftext|>
|
3f7ff0e72a0090fc49430b2bed50533b0694e13dc646e37f842151c23f0cd4f2
|
def get_output_workspace_names(self):
'Returns a list of the names of all output workspaces\n\n :returns: The list of the names of all output workspaces\n :rtype: :func:`list`\n '
if ('output_workspaces' in self._configuration):
return list(self._configuration['output_workspaces'].values())
return []
|
Returns a list of the names of all output workspaces
:returns: The list of the names of all output workspaces
:rtype: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
get_output_workspace_names
|
kaydoh/scale
| 121
|
python
|
def get_output_workspace_names(self):
'Returns a list of the names of all output workspaces\n\n :returns: The list of the names of all output workspaces\n :rtype: :func:`list`\n '
if ('output_workspaces' in self._configuration):
return list(self._configuration['output_workspaces'].values())
return []
|
def get_output_workspace_names(self):
'Returns a list of the names of all output workspaces\n\n :returns: The list of the names of all output workspaces\n :rtype: :func:`list`\n '
if ('output_workspaces' in self._configuration):
return list(self._configuration['output_workspaces'].values())
return []<|docstring|>Returns a list of the names of all output workspaces
:returns: The list of the names of all output workspaces
:rtype: :func:`list`<|endoftext|>
|
aeb61949bc1b3d0b75a4cd924c1a05712edcf73f94c02db1bf28fd844f45a8ec
|
def get_resources(self, task_type):
"Returns the resources for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task resources, possibly None\n :rtype: :class:`node.resources.node_resources.NodeResources`\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('resources' in task_dict)):
resources = []
for (name, value) in task_dict['resources'].items():
resources.append(ScalarResource(name, value))
return NodeResources(resources)
return None
|
Returns the resources for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The task resources, possibly None
:rtype: :class:`node.resources.node_resources.NodeResources`
|
scale/job/execution/configuration/json/exe_config.py
|
get_resources
|
kaydoh/scale
| 121
|
python
|
def get_resources(self, task_type):
"Returns the resources for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task resources, possibly None\n :rtype: :class:`node.resources.node_resources.NodeResources`\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('resources' in task_dict)):
resources = []
for (name, value) in task_dict['resources'].items():
resources.append(ScalarResource(name, value))
return NodeResources(resources)
return None
|
def get_resources(self, task_type):
"Returns the resources for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task resources, possibly None\n :rtype: :class:`node.resources.node_resources.NodeResources`\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('resources' in task_dict)):
resources = []
for (name, value) in task_dict['resources'].items():
resources.append(ScalarResource(name, value))
return NodeResources(resources)
return None<|docstring|>Returns the resources for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The task resources, possibly None
:rtype: :class:`node.resources.node_resources.NodeResources`<|endoftext|>
|
06428a88688d832a057fafce34237498c7bf2c0bd494482de13e782563868c41
|
def get_settings(self, task_type):
'Returns the settings for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of settings\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('settings' in task_dict):
return task_dict['settings']
return {}
|
Returns the settings for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of settings
:rtype: dict
|
scale/job/execution/configuration/json/exe_config.py
|
get_settings
|
kaydoh/scale
| 121
|
python
|
def get_settings(self, task_type):
'Returns the settings for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of settings\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('settings' in task_dict):
return task_dict['settings']
return {}
|
def get_settings(self, task_type):
'Returns the settings for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of settings\n :rtype: dict\n '
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('settings' in task_dict):
return task_dict['settings']
return {}<|docstring|>Returns the settings for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of settings
:rtype: dict<|endoftext|>
|
a4dd3568769d6ef70797b50ea975cfc7fed2762cbd24d4e77bddc80e11904a3e
|
def get_task_id(self, task_type):
"Returns the task ID for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task ID, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('task_id' in task_dict)):
return task_dict['task_id']
return None
|
Returns the task ID for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The task ID, possibly None
:rtype: string
|
scale/job/execution/configuration/json/exe_config.py
|
get_task_id
|
kaydoh/scale
| 121
|
python
|
def get_task_id(self, task_type):
"Returns the task ID for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task ID, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('task_id' in task_dict)):
return task_dict['task_id']
return None
|
def get_task_id(self, task_type):
"Returns the task ID for the given task type, None if the task type doesn't exist\n\n :param task_type: The task type\n :type task_type: string\n :returns: The task ID, possibly None\n :rtype: string\n "
for task_dict in self._configuration['tasks']:
if ((task_dict['type'] == task_type) and ('task_id' in task_dict)):
return task_dict['task_id']
return None<|docstring|>Returns the task ID for the given task type, None if the task type doesn't exist
:param task_type: The task type
:type task_type: string
:returns: The task ID, possibly None
:rtype: string<|endoftext|>
|
5051f98b709179bc637df6386ef0b7cf74204bea3e152c296318ec256e2b4c4c
|
def get_task_types(self):
'Returns all task types in the configuration in order\n\n :returns: The ordered list of task types\n :rtype: :func:`list`\n '
task_types = []
for task_dict in self._configuration['tasks']:
task_types.append(task_dict['type'])
return task_types
|
Returns all task types in the configuration in order
:returns: The ordered list of task types
:rtype: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
get_task_types
|
kaydoh/scale
| 121
|
python
|
def get_task_types(self):
'Returns all task types in the configuration in order\n\n :returns: The ordered list of task types\n :rtype: :func:`list`\n '
task_types = []
for task_dict in self._configuration['tasks']:
task_types.append(task_dict['type'])
return task_types
|
def get_task_types(self):
'Returns all task types in the configuration in order\n\n :returns: The ordered list of task types\n :rtype: :func:`list`\n '
task_types = []
for task_dict in self._configuration['tasks']:
task_types.append(task_dict['type'])
return task_types<|docstring|>Returns all task types in the configuration in order
:returns: The ordered list of task types
:rtype: :func:`list`<|endoftext|>
|
934a8c7c57c1dd7b76b1091bf5b2bc238a22a43303f4f8a12ce8a71540661337
|
def get_volumes(self, task_type):
'Returns the Docker volumes for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of Docker volumes stored by volume name\n :rtype: dict\n '
volumes = {}
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'host'):
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=True, host_path=vol_dict['host_path'])
else:
driver = None
driver_opts = None
if ('driver' in vol_dict):
driver = vol_dict['driver']
if ('driver_opts' in vol_dict):
driver_opts = vol_dict['driver_opts']
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=False, driver=driver, driver_opts=driver_opts)
volumes[name] = vol
return volumes
|
Returns the Docker volumes for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of Docker volumes stored by volume name
:rtype: dict
|
scale/job/execution/configuration/json/exe_config.py
|
get_volumes
|
kaydoh/scale
| 121
|
python
|
def get_volumes(self, task_type):
'Returns the Docker volumes for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of Docker volumes stored by volume name\n :rtype: dict\n '
volumes = {}
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'host'):
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=True, host_path=vol_dict['host_path'])
else:
driver = None
driver_opts = None
if ('driver' in vol_dict):
driver = vol_dict['driver']
if ('driver_opts' in vol_dict):
driver_opts = vol_dict['driver_opts']
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=False, driver=driver, driver_opts=driver_opts)
volumes[name] = vol
return volumes
|
def get_volumes(self, task_type):
'Returns the Docker volumes for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The dict of Docker volumes stored by volume name\n :rtype: dict\n '
volumes = {}
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('volumes' in task_dict):
for (name, vol_dict) in task_dict['volumes'].items():
if (vol_dict['type'] == 'host'):
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=True, host_path=vol_dict['host_path'])
else:
driver = None
driver_opts = None
if ('driver' in vol_dict):
driver = vol_dict['driver']
if ('driver_opts' in vol_dict):
driver_opts = vol_dict['driver_opts']
vol = Volume(name, vol_dict['container_path'], vol_dict['mode'], is_host=False, driver=driver, driver_opts=driver_opts)
volumes[name] = vol
return volumes<|docstring|>Returns the Docker volumes for the given task type
:param task_type: The task type
:type task_type: string
:returns: The dict of Docker volumes stored by volume name
:rtype: dict<|endoftext|>
|
1f552f23e54e806a6e0feadecf7617c95835a3bba902837d61953c3eea528926
|
def get_workspaces(self, task_type):
'Returns the workspaces for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of workspaces\n :rtype: [:class:`job.execution.configuration.workspace.TaskWorkspace`]\n '
workspaces = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('workspaces' in task_dict):
for (name, workspace_dict) in task_dict['workspaces'].items():
workspaces.append(TaskWorkspace(name, workspace_dict['mode']))
return workspaces
|
Returns the workspaces for the given task type
:param task_type: The task type
:type task_type: string
:returns: The list of workspaces
:rtype: [:class:`job.execution.configuration.workspace.TaskWorkspace`]
|
scale/job/execution/configuration/json/exe_config.py
|
get_workspaces
|
kaydoh/scale
| 121
|
python
|
def get_workspaces(self, task_type):
'Returns the workspaces for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of workspaces\n :rtype: [:class:`job.execution.configuration.workspace.TaskWorkspace`]\n '
workspaces = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('workspaces' in task_dict):
for (name, workspace_dict) in task_dict['workspaces'].items():
workspaces.append(TaskWorkspace(name, workspace_dict['mode']))
return workspaces
|
def get_workspaces(self, task_type):
'Returns the workspaces for the given task type\n\n :param task_type: The task type\n :type task_type: string\n :returns: The list of workspaces\n :rtype: [:class:`job.execution.configuration.workspace.TaskWorkspace`]\n '
workspaces = []
for task_dict in self._configuration['tasks']:
if (task_dict['type'] == task_type):
if ('workspaces' in task_dict):
for (name, workspace_dict) in task_dict['workspaces'].items():
workspaces.append(TaskWorkspace(name, workspace_dict['mode']))
return workspaces<|docstring|>Returns the workspaces for the given task type
:param task_type: The task type
:type task_type: string
:returns: The list of workspaces
:rtype: [:class:`job.execution.configuration.workspace.TaskWorkspace`]<|endoftext|>
|
fa7c7f208df2dcc994904be0f3c6112fa207964d237ac7acb78f33a98a55daa0
|
def set_input_files(self, input_files):
'Sets the given input files in the configuration\n\n :param input_files: A dict where data input name maps to a list of input files\n :type input_files: dict\n '
files_dict = {}
for input_name in input_files:
file_list = []
for input_file in input_files[input_name]:
file_dict = {'id': input_file.file_id, 'type': input_file.file_type, 'workspace_name': input_file.workspace_name, 'workspace_path': input_file.workspace_path, 'is_deleted': input_file.is_deleted}
if input_file.local_file_name:
file_dict['local_file_name'] = input_file.local_file_name
file_list.append(file_dict)
files_dict[input_name] = file_list
if files_dict:
self._configuration['input_files'] = files_dict
|
Sets the given input files in the configuration
:param input_files: A dict where data input name maps to a list of input files
:type input_files: dict
|
scale/job/execution/configuration/json/exe_config.py
|
set_input_files
|
kaydoh/scale
| 121
|
python
|
def set_input_files(self, input_files):
'Sets the given input files in the configuration\n\n :param input_files: A dict where data input name maps to a list of input files\n :type input_files: dict\n '
files_dict = {}
for input_name in input_files:
file_list = []
for input_file in input_files[input_name]:
file_dict = {'id': input_file.file_id, 'type': input_file.file_type, 'workspace_name': input_file.workspace_name, 'workspace_path': input_file.workspace_path, 'is_deleted': input_file.is_deleted}
if input_file.local_file_name:
file_dict['local_file_name'] = input_file.local_file_name
file_list.append(file_dict)
files_dict[input_name] = file_list
if files_dict:
self._configuration['input_files'] = files_dict
|
def set_input_files(self, input_files):
'Sets the given input files in the configuration\n\n :param input_files: A dict where data input name maps to a list of input files\n :type input_files: dict\n '
files_dict = {}
for input_name in input_files:
file_list = []
for input_file in input_files[input_name]:
file_dict = {'id': input_file.file_id, 'type': input_file.file_type, 'workspace_name': input_file.workspace_name, 'workspace_path': input_file.workspace_path, 'is_deleted': input_file.is_deleted}
if input_file.local_file_name:
file_dict['local_file_name'] = input_file.local_file_name
file_list.append(file_dict)
files_dict[input_name] = file_list
if files_dict:
self._configuration['input_files'] = files_dict<|docstring|>Sets the given input files in the configuration
:param input_files: A dict where data input name maps to a list of input files
:type input_files: dict<|endoftext|>
|
a89fc46e5aaef2a69bf3f08d756fb56e7105ea8092141a430b7e97021ff4d3af
|
def set_output_workspaces(self, output_workspaces):
'Sets the given output workspaces in the configuration\n\n :param output_workspaces: A dict where job output parameters map to output workspace name\n :type output_workspaces: dict\n '
if output_workspaces:
self._configuration['output_workspaces'] = output_workspaces
|
Sets the given output workspaces in the configuration
:param output_workspaces: A dict where job output parameters map to output workspace name
:type output_workspaces: dict
|
scale/job/execution/configuration/json/exe_config.py
|
set_output_workspaces
|
kaydoh/scale
| 121
|
python
|
def set_output_workspaces(self, output_workspaces):
'Sets the given output workspaces in the configuration\n\n :param output_workspaces: A dict where job output parameters map to output workspace name\n :type output_workspaces: dict\n '
if output_workspaces:
self._configuration['output_workspaces'] = output_workspaces
|
def set_output_workspaces(self, output_workspaces):
'Sets the given output workspaces in the configuration\n\n :param output_workspaces: A dict where job output parameters map to output workspace name\n :type output_workspaces: dict\n '
if output_workspaces:
self._configuration['output_workspaces'] = output_workspaces<|docstring|>Sets the given output workspaces in the configuration
:param output_workspaces: A dict where job output parameters map to output workspace name
:type output_workspaces: dict<|endoftext|>
|
0a6a5c53b77e3d4bd837d914fdd9d3857c26f19d9854e7044518f3e40426fe71
|
def set_task_ids(self, cluster_id):
'Sets the IDs for all of the tasks\n\n :param cluster_id: The cluster ID for the job execution\n :type cluster_id: string\n '
for task_dict in self._configuration['tasks']:
task_dict['task_id'] = ('%s_%s' % (cluster_id, task_dict['type']))
|
Sets the IDs for all of the tasks
:param cluster_id: The cluster ID for the job execution
:type cluster_id: string
|
scale/job/execution/configuration/json/exe_config.py
|
set_task_ids
|
kaydoh/scale
| 121
|
python
|
def set_task_ids(self, cluster_id):
'Sets the IDs for all of the tasks\n\n :param cluster_id: The cluster ID for the job execution\n :type cluster_id: string\n '
for task_dict in self._configuration['tasks']:
task_dict['task_id'] = ('%s_%s' % (cluster_id, task_dict['type']))
|
def set_task_ids(self, cluster_id):
'Sets the IDs for all of the tasks\n\n :param cluster_id: The cluster ID for the job execution\n :type cluster_id: string\n '
for task_dict in self._configuration['tasks']:
task_dict['task_id'] = ('%s_%s' % (cluster_id, task_dict['type']))<|docstring|>Sets the IDs for all of the tasks
:param cluster_id: The cluster ID for the job execution
:type cluster_id: string<|endoftext|>
|
b25f7031146d12ee12c502d95414eb9bc001f57096fe010e796188bbdbd11fd7
|
@staticmethod
def _add_args_to_task(task_dict, args):
'Adds the given command arguments to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param args: The command arguments\n :type args: string\n '
task_dict['args'] = args
|
Adds the given command arguments to the given task
:param task_dict: The task dict
:type task_dict: dict
:param args: The command arguments
:type args: string
|
scale/job/execution/configuration/json/exe_config.py
|
_add_args_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_args_to_task(task_dict, args):
'Adds the given command arguments to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param args: The command arguments\n :type args: string\n '
task_dict['args'] = args
|
@staticmethod
def _add_args_to_task(task_dict, args):
'Adds the given command arguments to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param args: The command arguments\n :type args: string\n '
task_dict['args'] = args<|docstring|>Adds the given command arguments to the given task
:param task_dict: The task dict
:type task_dict: dict
:param args: The command arguments
:type args: string<|endoftext|>
|
4ead747d58650ed9352de41944d4eda7225adedcd644f1ba45b9a5b54750cb13
|
@staticmethod
def _add_docker_params_to_task(task_dict, docker_params):
'Adds the given Docker parameters to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n '
if ('docker_params' in task_dict):
task_docker_params = task_dict['docker_params']
else:
task_docker_params = []
task_dict['docker_params'] = task_docker_params
for param in docker_params:
task_docker_params.append({'flag': param.flag, 'value': param.value})
|
Adds the given Docker parameters to the given task
:param task_dict: The task dict
:type task_dict: dict
:param docker_params: The Docker parameters
:type docker_params: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
_add_docker_params_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_docker_params_to_task(task_dict, docker_params):
'Adds the given Docker parameters to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n '
if ('docker_params' in task_dict):
task_docker_params = task_dict['docker_params']
else:
task_docker_params = []
task_dict['docker_params'] = task_docker_params
for param in docker_params:
task_docker_params.append({'flag': param.flag, 'value': param.value})
|
@staticmethod
def _add_docker_params_to_task(task_dict, docker_params):
'Adds the given Docker parameters to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param docker_params: The Docker parameters\n :type docker_params: :func:`list`\n '
if ('docker_params' in task_dict):
task_docker_params = task_dict['docker_params']
else:
task_docker_params = []
task_dict['docker_params'] = task_docker_params
for param in docker_params:
task_docker_params.append({'flag': param.flag, 'value': param.value})<|docstring|>Adds the given Docker parameters to the given task
:param task_dict: The task dict
:type task_dict: dict
:param docker_params: The Docker parameters
:type docker_params: :func:`list`<|endoftext|>
|
086c6db5340da376fa67e18df603b94fcec59c3ed8317abd0fbfcc2e0598cf5f
|
@staticmethod
def _add_env_vars_to_task(task_dict, env_vars):
'Adds the given environment variables to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param env_vars: The environment variables\n :type env_vars: dict\n '
if ('env_vars' in task_dict):
task_env_vars = task_dict['env_vars']
else:
task_env_vars = {}
task_dict['env_vars'] = task_env_vars
for (name, value) in env_vars.items():
task_env_vars[name] = value
|
Adds the given environment variables to the given task
:param task_dict: The task dict
:type task_dict: dict
:param env_vars: The environment variables
:type env_vars: dict
|
scale/job/execution/configuration/json/exe_config.py
|
_add_env_vars_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_env_vars_to_task(task_dict, env_vars):
'Adds the given environment variables to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param env_vars: The environment variables\n :type env_vars: dict\n '
if ('env_vars' in task_dict):
task_env_vars = task_dict['env_vars']
else:
task_env_vars = {}
task_dict['env_vars'] = task_env_vars
for (name, value) in env_vars.items():
task_env_vars[name] = value
|
@staticmethod
def _add_env_vars_to_task(task_dict, env_vars):
'Adds the given environment variables to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param env_vars: The environment variables\n :type env_vars: dict\n '
if ('env_vars' in task_dict):
task_env_vars = task_dict['env_vars']
else:
task_env_vars = {}
task_dict['env_vars'] = task_env_vars
for (name, value) in env_vars.items():
task_env_vars[name] = value<|docstring|>Adds the given environment variables to the given task
:param task_dict: The task dict
:type task_dict: dict
:param env_vars: The environment variables
:type env_vars: dict<|endoftext|>
|
af784c60aa778bc67b7d6cbb421c01190a1aedbec17cc7f7f7141626456c4337
|
@staticmethod
def _add_mount_volumes_to_task(task_dict, mount_volumes):
'Adds the given mount volumes to the given task. A mount volume of None indicates a required mount that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n '
if ('mounts' in task_dict):
task_mounts = task_dict['mounts']
else:
task_mounts = {}
task_dict['mounts'] = task_mounts
volumes = []
for (mount_name, volume) in mount_volumes.items():
if volume:
task_mounts[mount_name] = volume.name
volumes.append(volume)
else:
task_mounts[mount_name] = None
ExecutionConfiguration._add_volumes_to_task(task_dict, volumes)
|
Adds the given mount volumes to the given task. A mount volume of None indicates a required mount that is
missing.
:param task_dict: The task dict
:type task_dict: dict
:param mount_volumes: The mount volumes stored by mount name (a volume may be None)
:type mount_volumes: dict
|
scale/job/execution/configuration/json/exe_config.py
|
_add_mount_volumes_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_mount_volumes_to_task(task_dict, mount_volumes):
'Adds the given mount volumes to the given task. A mount volume of None indicates a required mount that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n '
if ('mounts' in task_dict):
task_mounts = task_dict['mounts']
else:
task_mounts = {}
task_dict['mounts'] = task_mounts
volumes = []
for (mount_name, volume) in mount_volumes.items():
if volume:
task_mounts[mount_name] = volume.name
volumes.append(volume)
else:
task_mounts[mount_name] = None
ExecutionConfiguration._add_volumes_to_task(task_dict, volumes)
|
@staticmethod
def _add_mount_volumes_to_task(task_dict, mount_volumes):
'Adds the given mount volumes to the given task. A mount volume of None indicates a required mount that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param mount_volumes: The mount volumes stored by mount name (a volume may be None)\n :type mount_volumes: dict\n '
if ('mounts' in task_dict):
task_mounts = task_dict['mounts']
else:
task_mounts = {}
task_dict['mounts'] = task_mounts
volumes = []
for (mount_name, volume) in mount_volumes.items():
if volume:
task_mounts[mount_name] = volume.name
volumes.append(volume)
else:
task_mounts[mount_name] = None
ExecutionConfiguration._add_volumes_to_task(task_dict, volumes)<|docstring|>Adds the given mount volumes to the given task. A mount volume of None indicates a required mount that is
missing.
:param task_dict: The task dict
:type task_dict: dict
:param mount_volumes: The mount volumes stored by mount name (a volume may be None)
:type mount_volumes: dict<|endoftext|>
|
029d9d0028edad29b5123f2265cc2e736e82c642cd9c48ab28c374a5259f5602
|
@staticmethod
def _add_resources_to_task(task_dict, resources):
'Adds the given resources to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n '
resources_dict = {}
for resource in resources.resources:
resources_dict[resource.name] = resource.value
task_dict['resources'] = resources_dict
|
Adds the given resources to the given task
:param task_dict: The task dict
:type task_dict: dict
:param resources: The resources
:type resources: :class:`node.resources.node_resources.NodeResources`
|
scale/job/execution/configuration/json/exe_config.py
|
_add_resources_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_resources_to_task(task_dict, resources):
'Adds the given resources to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n '
resources_dict = {}
for resource in resources.resources:
resources_dict[resource.name] = resource.value
task_dict['resources'] = resources_dict
|
@staticmethod
def _add_resources_to_task(task_dict, resources):
'Adds the given resources to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param resources: The resources\n :type resources: :class:`node.resources.node_resources.NodeResources`\n '
resources_dict = {}
for resource in resources.resources:
resources_dict[resource.name] = resource.value
task_dict['resources'] = resources_dict<|docstring|>Adds the given resources to the given task
:param task_dict: The task dict
:type task_dict: dict
:param resources: The resources
:type resources: :class:`node.resources.node_resources.NodeResources`<|endoftext|>
|
9e6f0071339c991df6d4884f20eddfd991e033e85d764ebe3e6d7cb17d4117e8
|
@staticmethod
def _add_settings_to_task(task_dict, settings):
'Adds the given settings to the given task. A setting value of None indicates a required setting that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n '
if ('settings' in task_dict):
task_settings = task_dict['settings']
else:
task_settings = {}
task_dict['settings'] = task_settings
for (name, value) in settings.items():
task_settings[name] = value
|
Adds the given settings to the given task. A setting value of None indicates a required setting that is
missing.
:param task_dict: The task dict
:type task_dict: dict
:param settings: The setting names and values (a value may be None)
:type settings: dict
|
scale/job/execution/configuration/json/exe_config.py
|
_add_settings_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_settings_to_task(task_dict, settings):
'Adds the given settings to the given task. A setting value of None indicates a required setting that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n '
if ('settings' in task_dict):
task_settings = task_dict['settings']
else:
task_settings = {}
task_dict['settings'] = task_settings
for (name, value) in settings.items():
task_settings[name] = value
|
@staticmethod
def _add_settings_to_task(task_dict, settings):
'Adds the given settings to the given task. A setting value of None indicates a required setting that is\n missing.\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param settings: The setting names and values (a value may be None)\n :type settings: dict\n '
if ('settings' in task_dict):
task_settings = task_dict['settings']
else:
task_settings = {}
task_dict['settings'] = task_settings
for (name, value) in settings.items():
task_settings[name] = value<|docstring|>Adds the given settings to the given task. A setting value of None indicates a required setting that is
missing.
:param task_dict: The task dict
:type task_dict: dict
:param settings: The setting names and values (a value may be None)
:type settings: dict<|endoftext|>
|
be8fdfd10100eacd0529e9603c630e6766b92e9a8256e8d64f2445997c32b5d2
|
@staticmethod
def _add_volumes_to_task(task_dict, volumes):
'Adds the given volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param volumes: The list of volumes\n :type volumes: :func:`list`\n '
if (not volumes):
return
if ('volumes' in task_dict):
task_volumes = task_dict['volumes']
else:
task_volumes = {}
task_dict['volumes'] = task_volumes
for volume in volumes:
if volume.is_host:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'host', 'host_path': volume.host_path}
else:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'volume'}
if volume.driver:
vol_dict['driver'] = volume.driver
if volume.driver_opts:
vol_dict['driver_opts'] = volume.driver_opts
task_volumes[volume.name] = vol_dict
|
Adds the given volumes to the given task
:param task_dict: The task dict
:type task_dict: dict
:param volumes: The list of volumes
:type volumes: :func:`list`
|
scale/job/execution/configuration/json/exe_config.py
|
_add_volumes_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_volumes_to_task(task_dict, volumes):
'Adds the given volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param volumes: The list of volumes\n :type volumes: :func:`list`\n '
if (not volumes):
return
if ('volumes' in task_dict):
task_volumes = task_dict['volumes']
else:
task_volumes = {}
task_dict['volumes'] = task_volumes
for volume in volumes:
if volume.is_host:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'host', 'host_path': volume.host_path}
else:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'volume'}
if volume.driver:
vol_dict['driver'] = volume.driver
if volume.driver_opts:
vol_dict['driver_opts'] = volume.driver_opts
task_volumes[volume.name] = vol_dict
|
@staticmethod
def _add_volumes_to_task(task_dict, volumes):
'Adds the given volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param volumes: The list of volumes\n :type volumes: :func:`list`\n '
if (not volumes):
return
if ('volumes' in task_dict):
task_volumes = task_dict['volumes']
else:
task_volumes = {}
task_dict['volumes'] = task_volumes
for volume in volumes:
if volume.is_host:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'host', 'host_path': volume.host_path}
else:
vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'volume'}
if volume.driver:
vol_dict['driver'] = volume.driver
if volume.driver_opts:
vol_dict['driver_opts'] = volume.driver_opts
task_volumes[volume.name] = vol_dict<|docstring|>Adds the given volumes to the given task
:param task_dict: The task dict
:type task_dict: dict
:param volumes: The list of volumes
:type volumes: :func:`list`<|endoftext|>
|
35ba8b28e265dbe57edfd9e51bd0b2b4433613710431259223730365d6256997
|
@staticmethod
def _add_workspace_volumes_to_task(task_dict, wksp_volumes):
'Adds the given workspace volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for (name, volume) in wksp_volumes.items():
wksp_dict = {'mode': volume.mode}
if volume.name:
wksp_dict['volume_name'] = volume.name
task_workspaces[name] = wksp_dict
ExecutionConfiguration._add_volumes_to_task(task_dict, wksp_volumes.values())
|
Adds the given workspace volumes to the given task
:param task_dict: The task dict
:type task_dict: dict
:param wksp_volumes: The workspace volumes stored by workspace name
:type wksp_volumes: dict
|
scale/job/execution/configuration/json/exe_config.py
|
_add_workspace_volumes_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_workspace_volumes_to_task(task_dict, wksp_volumes):
'Adds the given workspace volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for (name, volume) in wksp_volumes.items():
wksp_dict = {'mode': volume.mode}
if volume.name:
wksp_dict['volume_name'] = volume.name
task_workspaces[name] = wksp_dict
ExecutionConfiguration._add_volumes_to_task(task_dict, wksp_volumes.values())
|
@staticmethod
def _add_workspace_volumes_to_task(task_dict, wksp_volumes):
'Adds the given workspace volumes to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param wksp_volumes: The workspace volumes stored by workspace name\n :type wksp_volumes: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for (name, volume) in wksp_volumes.items():
wksp_dict = {'mode': volume.mode}
if volume.name:
wksp_dict['volume_name'] = volume.name
task_workspaces[name] = wksp_dict
ExecutionConfiguration._add_volumes_to_task(task_dict, wksp_volumes.values())<|docstring|>Adds the given workspace volumes to the given task
:param task_dict: The task dict
:type task_dict: dict
:param wksp_volumes: The workspace volumes stored by workspace name
:type wksp_volumes: dict<|endoftext|>
|
1a7b335b10f4935e3a0d8ab1f04d55b0696b8a4a5223e313e73a8828f2f6e8d9
|
@staticmethod
def _add_workspaces_to_task(task_dict, workspaces):
'Adds the given workspaces to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for workspace in workspaces.values():
if (workspace.name in task_workspaces):
existing_workspace = task_workspaces[workspace.name]
if ((existing_workspace['mode'] == MODE_RW) or (workspace.mode == MODE_RO)):
continue
workspace_dict = {'mode': workspace.mode}
if workspace.volume_name:
workspace_dict['volume_name'] = workspace.volume_name
task_workspaces[workspace.name] = workspace_dict
|
Adds the given workspaces to the given task
:param task_dict: The task dict
:type task_dict: dict
:param workspaces: The workspaces stored by name
:type workspaces: dict
|
scale/job/execution/configuration/json/exe_config.py
|
_add_workspaces_to_task
|
kaydoh/scale
| 121
|
python
|
@staticmethod
def _add_workspaces_to_task(task_dict, workspaces):
'Adds the given workspaces to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for workspace in workspaces.values():
if (workspace.name in task_workspaces):
existing_workspace = task_workspaces[workspace.name]
if ((existing_workspace['mode'] == MODE_RW) or (workspace.mode == MODE_RO)):
continue
workspace_dict = {'mode': workspace.mode}
if workspace.volume_name:
workspace_dict['volume_name'] = workspace.volume_name
task_workspaces[workspace.name] = workspace_dict
|
@staticmethod
def _add_workspaces_to_task(task_dict, workspaces):
'Adds the given workspaces to the given task\n\n :param task_dict: The task dict\n :type task_dict: dict\n :param workspaces: The workspaces stored by name\n :type workspaces: dict\n '
if ('workspaces' in task_dict):
task_workspaces = task_dict['workspaces']
else:
task_workspaces = {}
task_dict['workspaces'] = task_workspaces
for workspace in workspaces.values():
if (workspace.name in task_workspaces):
existing_workspace = task_workspaces[workspace.name]
if ((existing_workspace['mode'] == MODE_RW) or (workspace.mode == MODE_RO)):
continue
workspace_dict = {'mode': workspace.mode}
if workspace.volume_name:
workspace_dict['volume_name'] = workspace.volume_name
task_workspaces[workspace.name] = workspace_dict<|docstring|>Adds the given workspaces to the given task
:param task_dict: The task dict
:type task_dict: dict
:param workspaces: The workspaces stored by name
:type workspaces: dict<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.