content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
import pygame from game.game import Game def initialization(): """Инициализация нужных файлов игры""" pygame.init() pygame.display.set_icon(pygame.image.load("data/icon.bmp")) pygame.display.set_caption('SPACE') if __name__ == "__main__": initialization() game = Game() game.run() pygame.quit()
main.py
361
Инициализация нужных файлов игры
32
ru
0.999181
# External Dependencies from __future__ import division from numpy import isclose from svgpathtools import Path # Internal Dependencies from misc4rings import isNear class ClosedRingsOverlapError(Exception): def __init__(self,mes): self.mes = mes def __str__(self): return repr(self.mes) def findAppropriateTstep(path, T, stepInPositiveDirection): # Often the overlapping part of two paths is so small that when removed, pathXpathIntersections, will still consider the two curves as intersecting. This function is to find the smallest (signed) Tstep such that isNear(path(T),path(T+Tstep))==False. # note: stepInPositiveDirection should be True if Tstep should be positve # set initial guess as max possible step distance (and set sign of Tstep) # T = float(T) if stepInPositiveDirection: Tstep = 1 - T else: Tstep = 0 - T #check that what we're asking for is possible if isNear(path.point(T + Tstep), path.point(T)): raise Exception("An impossible Tstep was asked for.") #Find a lower bound for Tstep by bisection maxIts = 200 # limits Tstep to be > (1/2)**200 its = 0 while not isNear(path.point(T + Tstep), path.point(T)) and its < maxIts: Tstep /= 2 its += 1 if its >= maxIts: raise Exception("Max iterations reached in bisection to find " "appropriate Tstep. This could theoretically be ok " "if you have a curve with a huge number of " "segments... just increase the maxIts in " "findAppropriateTstep if you have a such a curve " "(but I doubt that's the case - so tell Andy).") return 2 * Tstep def shortPart(path,T): if isclose(T, 0) or isclose(T, 1): return Path() if T < 1-T: # T is closer to 0 # return cropPath(path,0,T) return path.cropped(0, T) else: # T is closer to 1 # return cropPath(path,T,1) return path.cropped(T, 1) def longPart(path, T, remove_a_little_extra=True): if remove_a_little_extra: if T < 1 - T: # T is closer to 0 than 1 extra = T if isNear(path.point(T + extra), path.point(T)): extra = findAppropriateTstep(path, T, True) else: # T is closer to 1 than 0 extra = 1-T if isNear(path.point(T+extra), path.point(T)): extra = -1 * findAppropriateTstep(path, T, False) else: extra = 0 if T < 1 - T: #T is closer to 0 than 1 # return cropPath(path,T+extra,1) return path.cropped(T + extra, 1) else: #T is closer to 1 than 0 # return cropPath(path,0,T-extra) return path.cropped(0, T - extra) def remove_intersections(ipath, jpath, iclosed, jclosed, iringupdated=False, jringupdated=False): #removes one intersection at a time until all are gone new_ipath = ipath new_jpath = jpath #find all intersections res = ipath.intersect(jpath, justonemode=True) # res = pathXpathIntersections(ipath, jpath, justonemode=True) if res: iT, iseg, i_t = res[0] jT, jseg, j_t = res[1] # iT = ipath.t2T(iseg, i_t) # jT = jpath.t2T(jseg, j_t) else: run_again = False return new_ipath, new_jpath, iringupdated, jringupdated, run_again #Now find crop the path (if one ring is closed, crop the other ring) if iclosed and jclosed: #then crop jpath raise ClosedRingsOverlapError("") elif jclosed: #jring closed so crop iring new_ipath = longPart(ipath, iT) new_jpath = jpath iringupdated = True elif iclosed: #iring closed so crop jring new_jpath = longPart(jpath, jT) new_ipath = ipath jringupdated = True else: #both rings are incomplete if iT in [0, 1]: new_ipath = longPart(ipath, iT) new_jpath = jpath iringupdated = True elif jT in [0, 1]: new_jpath = longPart(jpath, jT) new_ipath = ipath jringupdated = True elif shortPart(ipath, iT).length() < shortPart(jpath, jT).length(): new_ipath = longPart(ipath, iT) new_jpath = jpath iringupdated = True else: new_jpath = longPart(jpath, jT) new_ipath = ipath jringupdated = True run_again = True # might be more intersections to remove, so run again return new_ipath, new_jpath, iringupdated, jringupdated, run_again def remove_intersections_from_rings(rings): from options4rings import intersection_removal_progress_output_on from time import time as current_time from andysmod import n_choose_k, format_time [r.record_wasClosed() for r in rings] # record the current closure status #for output num_segments_in_ring_list = sum(len(r.path) for r in rings) num_seg_pairs2check = n_choose_k(num_segments_in_ring_list, 2) num_seg_pairs_checked = 0 current_percent_complete = 0 start_time = current_time() count = 0 overlappingClosedRingPairs = [] for i in range(len(rings)): iring = rings[i] ipath = iring.path new_ipath = ipath iclosed = iring.wasClosed iringupdated = False num_segs_in_ipath = len(ipath) # for progress output for j in range(i+1, len(rings)): if rings[j].maxR < rings[i].minR or rings[i].maxR < rings[j].minR: continue jring = rings[j] jpath = jring.path new_jpath = jpath jclosed = jring.wasClosed jringupdated = False num_segs_in_jpath = len(jpath) #for progress output # while loop to remove intersections between iring and jring (if any exist) run_again = True maxits = 20 its = 0 while run_again and its < maxits: try: args = (new_ipath, new_jpath, iclosed, jclosed) res = remove_intersections(*args, iringupdated=iringupdated, jringupdated=jringupdated) new_ipath, new_jpath, iringupdated, jringupdated, run_again = res except ClosedRingsOverlapError: overlappingClosedRingPairs.append((i, j)) run_again = False pass its += 1 # raise Exception if while loop terminateded due to reaching max allowed iteratations if its >= maxits: # remove_intersections_from_rings([iring, jring]) # print(iring.xml) # print(jring.xml) raise Exception("Max iterations reached while removing intersections. Either the above two rings have over 20 intersections or this is a bug.") # Output progess if intersection_removal_progress_output_on.b: num_seg_pairs_checked += num_segs_in_jpath*num_segs_in_ipath if 100 * num_seg_pairs_checked / num_seg_pairs2check > int(100 * current_percent_complete): current_percent_complete = num_seg_pairs_checked / num_seg_pairs2check time_elapsed = current_time() - start_time estimated_time_remaining = (1-current_percent_complete) * time_elapsed / current_percent_complete stuff = (int(100 * current_percent_complete), format_time(estimated_time_remaining), format_time(time_elapsed)) mes = ("[%s%% complete || Est. Remaining Time = %s || " "Elapsed Time = %s]\r" % stuff) intersection_removal_progress_output_on.dprint(mes) # update jring if jpath was trimmed if jringupdated: jring.updatePath(new_jpath) count += 1 # update iring if ipath was trimmed if iringupdated: iring.updatePath(new_ipath) count += 1 return rings, count, overlappingClosedRingPairs
noIntersections4rings.py
8,157
External Dependencies Internal Dependencies Often the overlapping part of two paths is so small that when removed, pathXpathIntersections, will still consider the two curves as intersecting. This function is to find the smallest (signed) Tstep such that isNear(path(T),path(T+Tstep))==False. note: stepInPositiveDirection should be True if Tstep should be positve set initial guess as max possible step distance (and set sign of Tstep) T = float(T)check that what we're asking for is possibleFind a lower bound for Tstep by bisection limits Tstep to be > (1/2)**200 T is closer to 0 return cropPath(path,0,T) T is closer to 1 return cropPath(path,T,1) T is closer to 0 than 1 T is closer to 1 than 0T is closer to 0 than 1 return cropPath(path,T+extra,1)T is closer to 1 than 0 return cropPath(path,0,T-extra)removes one intersection at a time until all are gonefind all intersections res = pathXpathIntersections(ipath, jpath, justonemode=True) iT = ipath.t2T(iseg, i_t) jT = jpath.t2T(jseg, j_t)Now find crop the path (if one ring is closed, crop the other ring)then crop jpathjring closed so crop iringiring closed so crop jringboth rings are incomplete might be more intersections to remove, so run again record the current closure statusfor output for progress outputfor progress output while loop to remove intersections between iring and jring (if any exist) raise Exception if while loop terminateded due to reaching max allowed iteratations remove_intersections_from_rings([iring, jring]) print(iring.xml) print(jring.xml) Output progess update jring if jpath was trimmed update iring if ipath was trimmed
1,615
en
0.8675
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.commands.arm import is_valid_resource_id, resource_id, parse_resource_id from azure.cli.core.util import CLIError def get_target_resource_validator(dest, required): def _validator(namespace): name_or_id = getattr(namespace, dest) rg = namespace.resource_group_name res_ns = namespace.namespace parent = namespace.parent res_type = namespace.resource_type usage_error = CLIError('usage error: --{0} ID | --{0} NAME --resource-group NAME ' '--{0}-type TYPE [--{0}-parent PARENT] ' '[--{0}-namespace NAMESPACE]'.format(dest)) if not name_or_id and required: raise usage_error elif name_or_id: if is_valid_resource_id(name_or_id) and any((res_ns, parent, res_type)): raise usage_error elif not is_valid_resource_id(name_or_id): from azure.cli.core.commands.client_factory import get_subscription_id if res_type and '/' in res_type: res_ns = res_ns or res_type.rsplit('/', 1)[0] res_type = res_type.rsplit('/', 1)[1] if not all((rg, res_ns, res_type, name_or_id)): raise usage_error setattr(namespace, dest, '/subscriptions/{}/resourceGroups/{}/providers/{}/{}{}/{}'.format( get_subscription_id(), rg, res_ns, parent + '/' if parent else '', res_type, name_or_id)) del namespace.namespace del namespace.parent del namespace.resource_type return _validator def validate_diagnostic_settings(namespace): from azure.cli.core.commands.client_factory import get_subscription_id resource_group_error = "--resource-group is required when name is provided for "\ "storage account or workspace or service bus namespace and rule. " if namespace.namespace or namespace.rule_name: if namespace.namespace is None: raise CLIError(resource_group_error) if namespace.rule_name is None: raise CLIError(resource_group_error) if namespace.resource_group is None: raise CLIError(resource_group_error) if not is_valid_resource_id(namespace.namespace): namespace.service_bus_rule_id = resource_id(subscription=get_subscription_id(), resource_group=namespace.resource_group, namespace='microsoft.ServiceBus', type='namespaces', name=namespace.namespace, child_type='AuthorizationRules', child_name=namespace.rule_name) else: resource_dict = parse_resource_id(namespace.namespace) namespace.service_bus_rule_id = resource_id(subscription=resource_dict['subscription'], resource_group=resource_dict['resource_group'], namespace=resource_dict['namespace'], type=resource_dict['type'], name=resource_dict['name'], child_type='AuthorizationRules', child_name=namespace.rule_name) if namespace.storage_account and not is_valid_resource_id(namespace.storage_account): if namespace.resource_group is None: raise CLIError(resource_group_error) namespace.storage_account = resource_id(subscription=get_subscription_id(), resource_group=namespace.resource_group, namespace='microsoft.Storage', type='storageAccounts', name=namespace.storage_account) if namespace.workspace and not is_valid_resource_id(namespace.workspace): if namespace.resource_group is None: raise CLIError(resource_group_error) namespace.workspace = resource_id(subscription=get_subscription_id(), resource_group=namespace.resource_group, namespace='microsoft.OperationalInsights', type='workspaces', name=namespace.workspace) _validate_tags(namespace) def _validate_tags(namespace): """ Extracts multiple space-separated tags in key[=value] format """ if isinstance(namespace.tags, list): tags_dict = {} for item in namespace.tags: tags_dict.update(_validate_tag(item)) namespace.tags = tags_dict def _validate_tag(string): """ Extracts a single tag in key[=value] format """ result = {} if string: comps = string.split('=', 1) result = {comps[0]: comps[1]} if len(comps) > 1 else {string: ''} return result def process_action_group_detail_for_creation(namespace): from azure.mgmt.monitor.models import ActionGroupResource, EmailReceiver, SmsReceiver, WebhookReceiver _validate_tags(namespace) ns = vars(namespace) name = ns['action_group_name'] receivers = ns.pop('receivers') or [] action_group_resource_properties = { 'location': 'global', # as of now, 'global' is the only available location for action group 'group_short_name': ns.pop('short_name') or name[:12], # '12' is the short name length limitation 'email_receivers': [r for r in receivers if isinstance(r, EmailReceiver)], 'sms_receivers': [r for r in receivers if isinstance(r, SmsReceiver)], 'webhook_receivers': [r for r in receivers if isinstance(r, WebhookReceiver)], 'tags': ns.get('tags') or None } ns['action_group'] = ActionGroupResource(**action_group_resource_properties)
src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/validators.py
6,645
Extracts a single tag in key[=value] format Extracts multiple space-separated tags in key[=value] format -------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- as of now, 'global' is the only available location for action group '12' is the short name length limitation
554
en
0.557131
import numbers import warnings import torch from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler, \ BaseWeightsScalarHandler, BaseWeightsHistHandler __all__ = ['TensorboardLogger', 'OptimizerParamsHandler', 'OutputHandler', 'WeightsScalarHandler', 'WeightsHistHandler', 'GradsScalarHandler', 'GradsHistHandler'] class OutputHandler(BaseOutputHandler): """Helper handler to log engine's output and/or metrics Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer` tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) Example with CustomPeriodicEvent, where model is evaluated every 500 iterations: .. code-block:: python from ignite.contrib.handlers import CustomPeriodicEvent cpe = CustomPeriodicEvent(n_iterations=500) cpe.attach(trainer) @trainer.on(cpe.Events.ITERATIONS_500_COMPLETED) def evaluate(engine): evaluator.run(validation_set, max_epochs=1) from ignite.contrib.handlers.tensorboard_logger import * tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") def global_step_transform(*args, **kwargs): return trainer.state.iteration # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # every 500 iterations. Since evaluator engine does not have CustomPeriodicEvent attached to it, we # provide a global_step_transform to return the trainer.state.iteration for the global_step, each time # evaluator metrics are plotted on Tensorboard. tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metrics=["nll", "accuracy"], global_step_transform=global_step_transform), event_name=Events.EPOCH_COMPLETED) Args: tag (str): common title for all produced plots. For example, 'training' metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available metrics. output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot with corresponding keys. another_engine (Engine): another engine to use to provide the value of event. Typically, user can provide the trainer if this handler is attached to an evaluator and thus it logs proper trainer's epoch/iteration value. global_step_transform (callable, optional): global step transform function to output a desired global step. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. """ def __init__(self, tag, metric_names=None, output_transform=None, another_engine=None, global_step_transform=None): super(OutputHandler, self).__init__(tag, metric_names, output_transform, another_engine, global_step_transform) def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'OutputHandler' works only with TensorboardLogger") metrics = self._setup_output_metrics(engine) engine = engine if not hasattr(self, 'another_engine') or self.another_engine is None else self.another_engine global_step = self.global_step_transform(engine, event_name) if not isinstance(global_step, int): raise TypeError("global_step must be int, got {}." " Please check the output of global_step_transform.".format(type(global_step))) for key, value in metrics.items(): if isinstance(value, numbers.Number) or \ isinstance(value, torch.Tensor) and value.ndimension() == 0: logger.writer.add_scalar("{}/{}".format(self.tag, key), value, global_step) elif isinstance(value, torch.Tensor) and value.ndimension() == 1: for i, v in enumerate(value): logger.writer.add_scalar("{}/{}/{}".format(self.tag, key, i), v.item(), global_step) else: warnings.warn("TensorboardLogger output_handler can not log " "metrics value type {}".format(type(value))) class OptimizerParamsHandler(BaseOptimizerParamsHandler): """Helper handler to log optimizer parameters Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) Args: optimizer (torch.optim.Optimizer): torch optimizer which parameters to log param_name (str): parameter name tag (str, optional): common title for all produced plots. For example, 'generator' """ def __init__(self, optimizer, param_name="lr", tag=None, global_step_transform=None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) if global_step_transform is None: def global_step_transform(engine, event_name): return engine.state.get_event_attrib_value(event_name) self.global_step_transform = global_step_transform def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'OptimizerParamsHandler' works only with TensorboardLogger") global_step = self.global_step_transform(engine, event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups)} for k, v in params.items(): logger.writer.add_scalar(k, v, global_step) class WeightsScalarHandler(BaseWeightsScalarHandler): """Helper handler to log model's weights as scalars. Handler iterates over named parameters of the model, applies reduction function to each parameter produce a scalar and then logs the scalar. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model, reduction=torch.norm), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights reduction (callable): function to reduce parameters into scalar """ def __init__(self, model, reduction=torch.norm): super(WeightsScalarHandler, self).__init__(model, reduction) def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace('.', '/') logger.writer.add_scalar("weights_{}/{}".format(self.reduction.__name__, name), self.reduction(p.data), global_step) class WeightsHistHandler(BaseWeightsHistHandler): """Helper handler to log model's weights as histograms. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsHistHandler(model), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights """ def __init__(self, model, tag=None, global_step_transform=None): super(WeightsHistHandler, self).__init__(model) self.tag = tag if global_step_transform is None: def global_step_transform(engine, event_name): return engine.state.get_event_attrib_value(event_name) self.global_step_transform = global_step_transform def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger") global_step = self.global_step_transform(engine, event_name) tag_prefix = "{}/".format(self.tag) if self.tag else "" for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace('.', '/') logger.writer.add_histogram(tag="{}weights/{}".format(tag_prefix, name), values=p.data.detach().cpu().numpy(), global_step=global_step) class GradsScalarHandler(BaseWeightsScalarHandler): """Helper handler to log model's gradients as scalars. Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter produce a scalar and then logs the scalar. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=GradsScalarHandler(model, reduction=torch.norm), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights reduction (callable): function to reduce parameters into scalar """ def __init__(self, model, reduction=torch.norm): super(GradsScalarHandler, self).__init__(model, reduction) def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace('.', '/') logger.writer.add_scalar("grads_{}/{}".format(self.reduction.__name__, name), self.reduction(p.grad), global_step) class GradsHistHandler(BaseWeightsHistHandler): """Helper handler to log model's gradients as histograms. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=GradsHistHandler(model), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights """ def __init__(self, model): super(GradsHistHandler, self).__init__(model) def __call__(self, engine, logger, event_name): if not isinstance(logger, TensorboardLogger): raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger") global_step = engine.state.get_event_attrib_value(event_name) for name, p in self.model.named_parameters(): if p.grad is None: continue name = name.replace('.', '/') logger.writer.add_histogram(tag="grads/{}".format(name), values=p.grad.detach().cpu().numpy(), global_step=global_step) class TensorboardLogger(BaseLogger): """ TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation. This class requires `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package to be installed: .. code-block:: bash pip install tensorboardX Args: log_dir (str): path to the directory where to log. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log training loss at each iteration tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'loss': loss}), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch # We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `train_evaluator`. tb_logger.attach(train_evaluator, log_handler=OutputHandler(tag="training", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `evaluator`. tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the trainer to log model's weights as a histogram after each epoch tb_logger.attach(trainer, log_handler=WeightsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the trainer to log model's gradients norm after each iteration tb_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the trainer to log model's gradients as a histogram after each epoch tb_logger.attach(trainer, log_handler=GradsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # We need to close the logger with we are done tb_logger.close() It is also possible to use the logger as context manager: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger: trainer = Engine(update_fn) # Attach the logger to the trainer to log training loss at each iteration tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'loss': loss}), event_name=Events.ITERATION_COMPLETED) """ def __init__(self, log_dir): try: from torch.utils.tensorboard import SummaryWriter except ImportError: try: from tensorboardX import SummaryWriter except ImportError: raise RuntimeError("This contrib module requires tensorboardX to be installed. " "Please install it with command: \n pip install tensorboardX") try: self.writer = SummaryWriter(log_dir) except TypeError as err: if "type object got multiple values for keyword argument 'logdir'" == str(err): self.writer = SummaryWriter(log_dir=log_dir) warnings.warn('tensorboardX version < 1.7 will not be supported ' 'after ignite 0.3.0; please upgrade', DeprecationWarning) else: raise err def close(self): self.writer.close()
helper/custom_ignite_handlers/tensorboard_logger.py
19,069
Helper handler to log model's gradients as histograms. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=GradsHistHandler(model), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights Helper handler to log model's gradients as scalars. Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter produce a scalar and then logs the scalar. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=GradsScalarHandler(model, reduction=torch.norm), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights reduction (callable): function to reduce parameters into scalar Helper handler to log optimizer parameters Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) Args: optimizer (torch.optim.Optimizer): torch optimizer which parameters to log param_name (str): parameter name tag (str, optional): common title for all produced plots. For example, 'generator' Helper handler to log engine's output and/or metrics Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer` tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) Example with CustomPeriodicEvent, where model is evaluated every 500 iterations: .. code-block:: python from ignite.contrib.handlers import CustomPeriodicEvent cpe = CustomPeriodicEvent(n_iterations=500) cpe.attach(trainer) @trainer.on(cpe.Events.ITERATIONS_500_COMPLETED) def evaluate(engine): evaluator.run(validation_set, max_epochs=1) from ignite.contrib.handlers.tensorboard_logger import * tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") def global_step_transform(*args, **kwargs): return trainer.state.iteration # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # every 500 iterations. Since evaluator engine does not have CustomPeriodicEvent attached to it, we # provide a global_step_transform to return the trainer.state.iteration for the global_step, each time # evaluator metrics are plotted on Tensorboard. tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metrics=["nll", "accuracy"], global_step_transform=global_step_transform), event_name=Events.EPOCH_COMPLETED) Args: tag (str): common title for all produced plots. For example, 'training' metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available metrics. output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot with corresponding keys. another_engine (Engine): another engine to use to provide the value of event. Typically, user can provide the trainer if this handler is attached to an evaluator and thus it logs proper trainer's epoch/iteration value. global_step_transform (callable, optional): global step transform function to output a desired global step. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation. This class requires `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package to be installed: .. code-block:: bash pip install tensorboardX Args: log_dir (str): path to the directory where to log. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log training loss at each iteration tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'loss': loss}), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch # We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `train_evaluator`. tb_logger.attach(train_evaluator, log_handler=OutputHandler(tag="training", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after # each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `evaluator`. tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=["nll", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the trainer to log model's weights as a histogram after each epoch tb_logger.attach(trainer, log_handler=WeightsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # Attach the logger to the trainer to log model's gradients norm after each iteration tb_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED) # Attach the logger to the trainer to log model's gradients as a histogram after each epoch tb_logger.attach(trainer, log_handler=GradsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # We need to close the logger with we are done tb_logger.close() It is also possible to use the logger as context manager: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger: trainer = Engine(update_fn) # Attach the logger to the trainer to log training loss at each iteration tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'loss': loss}), event_name=Events.ITERATION_COMPLETED) Helper handler to log model's weights as histograms. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsHistHandler(model), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights Helper handler to log model's weights as scalars. Handler iterates over named parameters of the model, applies reduction function to each parameter produce a scalar and then logs the scalar. Examples: .. code-block:: python from ignite.contrib.handlers.tensorboard_logger import * # Create a logger tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") # Attach the logger to the trainer to log model's weights norm after each iteration tb_logger.attach(trainer, log_handler=WeightsScalarHandler(model, reduction=torch.norm), event_name=Events.ITERATION_COMPLETED) Args: model (torch.nn.Module): model to log weights reduction (callable): function to reduce parameters into scalar
10,630
en
0.628539
# Copyright 2013 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. from __future__ import print_function import glob import hashlib import json import os import random import re import shutil import sys import time import unittest from subprocess import PIPE, STDOUT from functools import wraps from textwrap import dedent if __name__ == '__main__': raise Exception('do not run this file directly; do something like: tests/runner.py') from tools.shared import run_js, run_process, try_delete from tools.shared import NODE_JS, V8_ENGINE, JS_ENGINES, SPIDERMONKEY_ENGINE, PYTHON, EMCC, EMAR, WINDOWS, MACOS, AUTODEBUGGER, LLVM_ROOT from tools import jsrun, shared, building from runner import RunnerCore, path_from_root from runner import skip_if, no_wasm_backend, no_fastcomp, needs_dlfcn, no_windows, no_asmjs, is_slow_test, create_test_file, parameterized from runner import js_engines_modify, wasm_engines_modify, env_modify, with_env_modify # decorators for limiting which modes a test can run in def wasm_simd(f): def decorated(self): if not self.is_wasm_backend(): self.skipTest('wasm simd not compatible with asm.js or asm2wasm') if not V8_ENGINE or V8_ENGINE not in JS_ENGINES: self.skipTest('wasm simd only supported in d8 for now') if self.is_wasm_backend() and not self.get_setting('WASM'): self.skipTest('wasm2js only supports MVP for now') self.emcc_args.append('-msimd128') self.emcc_args.append('-fno-lax-vector-conversions') with js_engines_modify([V8_ENGINE + ['--experimental-wasm-simd']]): f(self) return decorated def bleeding_edge_wasm_backend(f): def decorated(self): if not self.is_wasm_backend(): self.skipTest('only works in wasm backend') if not V8_ENGINE or V8_ENGINE not in JS_ENGINES: self.skipTest('only works in d8 for now') if self.is_wasm_backend() and not self.get_setting('WASM'): self.skipTest('wasm2js only supports MVP for now') with js_engines_modify([V8_ENGINE]): f(self) return decorated def also_with_wasm_bigint(f): def decorated(self): self.set_setting('WASM_BIGINT', 0) f(self) if self.is_wasm_backend() and self.get_setting('WASM'): self.set_setting('WASM_BIGINT', 1) with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]): f(self) return decorated # without EMTEST_ALL_ENGINES set we only run tests in a single VM by # default. in some tests we know that cross-VM differences may happen and # so are worth testing, and they should be marked with this decorator def all_engines(f): def decorated(self): old = self.use_all_engines self.use_all_engines = True try: f(self) finally: self.use_all_engines = old return decorated # Tests exception handling in emscripten exception handling mode, and if # possible, new wasm EH mode. def with_both_exception_handling(f): def decorated(self): self.set_setting('DISABLE_EXCEPTION_CATCHING', 0) f(self) self.set_setting('DISABLE_EXCEPTION_CATCHING', 1) # Wasm EH is currently supported only in wasm backend and V8 if self.is_wasm_backend() and V8_ENGINE and \ V8_ENGINE in JS_ENGINES and self.get_setting('WASM'): self.emcc_args.append('-fwasm-exceptions') with js_engines_modify([V8_ENGINE + ['--experimental-wasm-eh']]): f(self) return decorated def no_wasm(note=''): assert not callable(note) def decorated(f): return skip_if(f, 'is_wasm', note) return decorated def no_wasm2js(note=''): assert not callable(note) def decorated(f): return skip_if(f, 'is_wasm2js', note) return decorated # Async wasm compilation can't work in some tests, they are set up synchronously def sync(f): assert callable(f) def decorated(self): if self.get_setting('WASM') or self.is_wasm_backend(): self.emcc_args += ['-s', 'WASM_ASYNC_COMPILATION=0'] # test is set up synchronously f(self) return decorated def also_with_noderawfs(func): def decorated(self): orig_args = self.emcc_args[:] func(self) print('noderawfs') self.emcc_args = orig_args + ['-s', 'NODERAWFS=1', '-DNODERAWFS'] with js_engines_modify([NODE_JS]): func(self) return decorated def can_do_standalone(self): return self.is_wasm_backend() and self.get_setting('WASM') and \ not self.get_setting('SAFE_STACK') and \ '-fsanitize=address' not in self.emcc_args # Also run the test with -s STANDALONE. If we have wasm runtimes, also run in # them (regardless we also check that the js+wasm combo works in js vms). def also_with_standalone_wasm(func): def decorated(self): func(self) # Standalone mode is only supported in the wasm backend, and not in all # modes there. if can_do_standalone(self): print('standalone') self.set_setting('STANDALONE_WASM', 1) # we will not legalize the JS ffi interface, so we must use BigInt # support in order for JS to have a chance to run this without trapping # when it sees an i64 on the ffi. self.set_setting('WASM_BIGINT', 1) with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]): func(self) return decorated # Similar to also_with_standalone_wasm, but suitable for tests that cannot # run in a wasm VM yet, as they are not 100% standalone. We can still # run them with the JS code though. def also_with_impure_standalone_wasm(func): def decorated(self): func(self) # Standalone mode is only supported in the wasm backend, and not in all # modes there. if can_do_standalone(self): print('standalone (impure; no wasm runtimes)') self.set_setting('STANDALONE_WASM', 1) # we will not legalize the JS ffi interface, so we must use BigInt # support in order for JS to have a chance to run this without trapping # when it sees an i64 on the ffi. self.set_setting('WASM_BIGINT', 1) with wasm_engines_modify([]): with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]): func(self) return decorated # Similar to also_with_standalone_wasm, but suitable for tests that can *only* # run in a wasm VM, or in non-standalone mode, but not in standalone mode with # our JS. def also_with_only_standalone_wasm(func): def decorated(self): func(self) # Standalone mode is only supported in the wasm backend, and not in all # modes there. if can_do_standalone(self): print('standalone (only; no js runtimes)') self.set_setting('STANDALONE_WASM', 1) with js_engines_modify([]): func(self) return decorated def node_pthreads(f): def decorated(self): self.set_setting('USE_PTHREADS', 1) if not self.is_wasm_backend(): self.skipTest('node pthreads only supported on wasm backend') if not self.get_setting('WASM'): self.skipTest("pthreads doesn't work in non-wasm yet") if '-fsanitize=address' in self.emcc_args: self.skipTest('asan ends up using atomics that are not yet supported in node 12') with js_engines_modify([NODE_JS + ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']]): f(self) return decorated # A simple check whether the compiler arguments cause optimization. def is_optimizing(args): return '-O' in str(args) and '-O0' not in args def no_optimize(note=''): assert not callable(note) def decorator(func): assert callable(func) def decorated(self): if is_optimizing(self.emcc_args): self.skipTest(note) func(self) return decorated return decorator def needs_make(note=''): assert not callable(note) if WINDOWS: return unittest.skip('Tool not available on Windows bots (%s)' % note) return lambda f: f def no_asan(note): assert not callable(note) def decorator(f): assert callable(f) @wraps(f) def decorated(self, *args, **kwargs): if '-fsanitize=address' in self.emcc_args: self.skipTest(note) f(self, *args, **kwargs) return decorated return decorator def no_lsan(note): assert not callable(note) def decorator(f): assert callable(f) @wraps(f) def decorated(self, *args, **kwargs): if '-fsanitize=leak' in self.emcc_args: self.skipTest(note) f(self, *args, **kwargs) return decorated return decorator def no_minimal_runtime(note): assert not callable(note) def decorator(f): assert callable(f) @wraps(f) def decorated(self, *args, **kwargs): if 'MINIMAL_RUNTIME=1' in self.emcc_args or self.get_setting('MINIMAL_RUNTIME'): self.skipTest(note) f(self, *args, **kwargs) return decorated return decorator class TestCoreBase(RunnerCore): def is_wasm2js(self): return self.is_wasm_backend() and not self.get_setting('WASM') # whether the test mode supports duplicate function elimination in js def supports_js_dfe(self): # wasm does this when optimizing anyhow, and the wasm backend always # optimizes the wasm even if it does wasm2js later if self.is_wasm() or self.is_wasm_backend(): return False supported_opt_levels = ['-O2', '-O3', '-Oz', '-Os'] for opt_level in supported_opt_levels: if opt_level in self.emcc_args: return True return False # Use closure in some tests for some additional coverage def maybe_closure(self): if '-g' not in self.emcc_args and ('-O2' in self.emcc_args or '-Os' in self.emcc_args): self.emcc_args += ['--closure', '1'] return True return False def verify_in_strict_mode(self, filename): with open(filename) as infile: js = infile.read() filename += '.strict.js' with open(filename, 'w') as outfile: outfile.write('"use strict";\n' + js) run_js(filename) def get_bullet_library(self, use_cmake): if use_cmake: configure_commands = ['cmake', '.'] configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF'] # Depending on whether 'configure' or 'cmake' is used to build, Bullet # places output files in different directory structures. generated_libs = [os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'), os.path.join('src', 'BulletCollision', 'libBulletCollision.a'), os.path.join('src', 'LinearMath', 'libLinearMath.a')] else: configure_commands = ['sh', './configure'] # Force a nondefault --host= so that the configure script will interpret # that we are doing cross-compilation # and skip attempting to run the generated executable with './a.out', # which would fail since we are building a .js file. configure_args = ['--disable-shared', '--host=i686-pc-linux-gnu', '--disable-demos', '--disable-dependency-tracking'] generated_libs = [os.path.join('src', '.libs', 'libBulletDynamics.a'), os.path.join('src', '.libs', 'libBulletCollision.a'), os.path.join('src', '.libs', 'libLinearMath.a')] return self.get_library(os.path.join('third_party', 'bullet'), generated_libs, configure=configure_commands, configure_args=configure_args, cache_name_extra=configure_commands[0]) @also_with_standalone_wasm def test_hello_world(self): self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') # must not emit this unneeded internal thing self.assertNotContained('EMSCRIPTEN_GENERATED_FUNCTIONS', open('src.c.o.js').read()) @sync def test_wasm_synchronous_compilation(self): self.set_setting('STRICT_JS') self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') def test_intvars(self): self.do_run_in_out_file_test('tests', 'core', 'test_intvars') def test_sintvars(self): self.do_run_in_out_file_test('tests', 'core', 'test_sintvars', force_c=True) def test_int53(self): self.emcc_args += ['-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[$convertI32PairToI53,$convertU32PairToI53,$readI53FromU64,$readI53FromI64,$writeI53ToI64,$writeI53ToI64Clamped,$writeI53ToU64Clamped,$writeI53ToI64Signaling,$writeI53ToU64Signaling]'] if not self.is_wasm_backend(): self.emcc_args += ['-s', 'BINARYEN_TRAP_MODE=js'] self.do_run_in_out_file_test('tests', 'core', 'test_int53') def test_i64(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64') def test_i64_2(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_2') def test_i64_3(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_3') def test_i64_4(self): # stuff that also needs sign corrections self.do_run_in_out_file_test('tests', 'core', 'test_i64_4') def test_i64_b(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_b') def test_i64_cmp(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_cmp') def test_i64_cmp2(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_cmp2') def test_i64_double(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_double') def test_i64_umul(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_umul') @also_with_standalone_wasm def test_i64_precise(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_precise') def test_i64_precise_needed(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_precise_needed') def test_i64_llabs(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_llabs') def test_i64_zextneg(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_zextneg') def test_i64_7z(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_i64_7z', args=['hallo']) def test_i64_i16(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_i16') def test_i64_qdouble(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_qdouble') def test_i64_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_i64_varargs', args='waka fleefl asdfasdfasdfasdf' .split(' ')) @no_fastcomp('wasm bigint') @no_wasm2js('wasm_bigint') def test_i64_invoke_bigint(self): self.set_setting('WASM_BIGINT', 1) self.emcc_args += ['-fexceptions'] self.do_run_in_out_file_test('tests', 'core', 'test_i64_invoke_bigint', js_engines=[NODE_JS + ['--experimental-wasm-bigint']]) def test_vararg_copy(self): self.do_run_in_out_file_test('tests', 'va_arg', 'test_va_copy') def test_llvm_fabs(self): self.set_setting('PRECISE_F32', 1) self.do_run_in_out_file_test('tests', 'core', 'test_llvm_fabs') def test_double_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_double_varargs') def test_trivial_struct_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_trivial_struct_varargs') def test_struct_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_struct_varargs') def test_zero_struct_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_zero_struct_varargs') def zzztest_nested_struct_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_nested_struct_varargs') def test_i32_mul_precise(self): self.do_run_in_out_file_test('tests', 'core', 'test_i32_mul_precise') def test_i16_emcc_intrinsic(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_i16_emcc_intrinsic') def test_double_i64_conversion(self): self.do_run_in_out_file_test('tests', 'core', 'test_double_i64_conversion') def test_float32_precise(self): self.set_setting('PRECISE_F32', 1) self.do_run_in_out_file_test('tests', 'core', 'test_float32_precise') def test_negative_zero(self): self.do_run_in_out_file_test('tests', 'core', 'test_negative_zero') def test_line_endings(self): self.build(open(path_from_root('tests', 'hello_world.cpp')).read(), self.get_dir(), 'hello_world.cpp') def test_literal_negative_zero(self): self.do_run_in_out_file_test('tests', 'core', 'test_literal_negative_zero') @no_wasm_backend('test uses calls to expected js imports, rather than using llvm intrinsics directly') def test_llvm_intrinsics(self): self.do_run_in_out_file_test('tests', 'core', 'test_llvm_intrinsics') @no_wasm_backend('test looks for js impls of intrinsics') def test_lower_intrinsics(self): self.emcc_args += ['-g1'] self.do_run_in_out_file_test('tests', 'core', 'test_lower_intrinsics') # intrinsics should be lowered out js = open('src.c.o.js').read() assert ('llvm_' not in js) == is_optimizing(self.emcc_args) or not self.is_wasm(), 'intrinsics must be lowered when optimizing' @also_with_standalone_wasm def test_bswap64(self): self.do_run_in_out_file_test('tests', 'core', 'test_bswap64') @no_wasm_backend('uses EMULATED_FUNCTION_POINTERS') def test_bswap64_emulate_fps(self): # extra coverages for emulate_casts in [0, 1]: for emulate_fps in [0, 1, 2]: print(emulate_casts, emulate_fps) self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', emulate_casts) self.set_setting('EMULATED_FUNCTION_POINTERS', emulate_fps) self.do_run_in_out_file_test('tests', 'core', 'test_bswap64') def test_sha1(self): self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6') @no_wasm_backend('test checks that __asmjs__ is #defined') def test_asmjs_unknown_emscripten(self): # No other configuration is supported, so always run this. self.do_run(open(path_from_root('tests', 'asmjs-unknown-emscripten.c')).read(), '') def test_cube2md5(self): self.emcc_args += ['--embed-file', 'cube2md5.txt'] shutil.copyfile(path_from_root('tests', 'cube2md5.txt'), 'cube2md5.txt') self.do_run(open(path_from_root('tests', 'cube2md5.cpp')).read(), open(path_from_root('tests', 'cube2md5.ok')).read(), assert_returncode=None) @also_with_standalone_wasm @needs_make('make') def test_cube2hash(self): # A good test of i64 math self.do_run('', 'Usage: hashstring <seed>', libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None), includes=[path_from_root('tests', 'cube2hash')], assert_returncode=None) for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'), ('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'), ('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]: self.do_run('src.cpp.o.js', 'hash value: ' + output, [text], no_build=True, assert_returncode=None) def test_unaligned(self): self.skipTest('LLVM marks the reads of s as fully aligned, making this test invalid') src = r''' #include <stdio.h> struct S { double x; int y; }; int main() { // the 64-bit value here will not be 8-byte aligned S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}}; char buffer[10*sizeof(S)]; int b = int(buffer); S *s = (S*)(b + 4-b%8); s[0] = s0[0]; s[1] = s0[1]; s[2] = s0[2]; printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8, ((unsigned int)&s[1]) - ((unsigned int)&s[0])); s[0].x++; s[0].y++; s[1].x++; s[1].y++; printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y); return 0; } ''' # TODO: A version of this with int64s as well self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n') return # TODO: continue to the next part here # Test for undefined behavior in C. This is not legitimate code, but does exist src = r''' #include <stdio.h> int main() { int x[10]; char *p = (char*)&x[0]; p++; short *q = (short*)p; *q = 300; printf("*%d:%d*\n", *q, ((int)q)%2); int *r = (int*)p; *r = 515559; printf("*%d*\n", *r); long long *t = (long long*)p; *t = 42949672960; printf("*%lld*\n", *t); return 0; } ''' try: self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n') except Exception as e: assert 'must be aligned' in str(e), e # expected to fail without emulation def test_align64(self): src = r''' #include <stdio.h> // inspired by poppler enum Type { A = 10, B = 20 }; struct Object { Type type; union { int intg; double real; char *name; }; }; struct Principal { double x; Object a; double y; }; int main(int argc, char **argv) { int base = argc-1; Object *o = NULL; printf("%d,%d\n", sizeof(Object), sizeof(Principal)); printf("%d,%d,%d,%d\n", (int)&o[base].type, (int)&o[base].intg, (int)&o[base].real, (int)&o[base].name); printf("%d,%d,%d,%d\n", (int)&o[base+1].type, (int)&o[base+1].intg, (int)&o[base+1].real, (int)&o[base+1].name); Principal p, q; p.x = p.y = q.x = q.y = 0; p.a.type = A; p.a.real = 123.456; *(&q.a) = p.a; printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y); return 0; } ''' self.do_run(src, '''16,32 0,8,8,8 16,24,24,24 0.00,10,123.46,0.00 : 0.00,10,123.46,0.00 ''') @no_asan('asan errors on corner cases we check') def test_aligned_alloc(self): self.do_run(open(path_from_root('tests', 'test_aligned_alloc.c')).read(), '', assert_returncode=0) def test_unsigned(self): src = ''' #include <stdio.h> const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \\FF, and needs re-signing int main() { { unsigned char x = 200; printf("*%d*\\n", x); unsigned char y = -22; printf("*%d*\\n", y); } int varey = 100; unsigned int MAXEY = -1, MAXEY2 = -77; printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned! int y = cvals[0]; printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0); y = cvals[1]; printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0); // zext issue - see mathop in jsifier unsigned char x8 = -10; unsigned long hold = 0; hold += x8; int y32 = hold+50; printf("*%u,%u*\\n", hold, y32); // Comparisons x8 = 0; for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2 printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode return 0; } ''' self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*') self.emcc_args.append('-Wno-constant-conversion') src = ''' #include <stdio.h> int main() { { unsigned char x; unsigned char *y = &x; *y = -1; printf("*%d*\\n", x); } { unsigned short x; unsigned short *y = &x; *y = -1; printf("*%d*\\n", x); } /*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that unsigned int x; unsigned int *y = &x; *y = -1; printf("*%u*\\n", x); }*/ { char x; char *y = &x; *y = 255; printf("*%d*\\n", x); } { char x; char *y = &x; *y = 65535; printf("*%d*\\n", x); } { char x; char *y = &x; *y = 0xffffffff; printf("*%d*\\n", x); } return 0; } ''' self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*') def test_bitfields(self): self.do_run_in_out_file_test('tests', 'core', 'test_bitfields') def test_floatvars(self): self.do_run_in_out_file_test('tests', 'core', 'test_floatvars') def test_closebitcasts(self): self.do_run_in_out_file_test('tests', 'core', 'closebitcasts') def test_fast_math(self): self.emcc_args += ['-ffast-math'] self.do_run_in_out_file_test('tests', 'core', 'test_fast_math', args=['5', '6', '8']) def test_zerodiv(self): self.do_run_in_out_file_test('tests', 'core', 'test_zerodiv') def test_zero_multiplication(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_zero_multiplication') def test_isnan(self): self.do_run_in_out_file_test('tests', 'core', 'test_isnan') def test_globaldoubles(self): self.do_run_in_out_file_test('tests', 'core', 'test_globaldoubles') def test_math(self): self.do_run_in_out_file_test('tests', 'core', 'test_math') def test_erf(self): self.do_run_in_out_file_test('tests', 'core', 'test_erf') def test_math_hyperbolic(self): self.do_run_in_out_file_test('tests', 'core', 'test_math_hyperbolic') def test_math_lgamma(self): self.do_run_in_out_file_test('tests', 'math', 'lgamma', assert_returncode=None) if self.get_setting('ALLOW_MEMORY_GROWTH') == 0 and not self.is_wasm() and \ not self.is_wasm_backend(): print('main module') self.set_setting('MAIN_MODULE', 1) self.do_run_in_out_file_test('tests', 'math', 'lgamma', assert_returncode=None) # Test that fmodf with -s PRECISE_F32=1 properly validates as asm.js (% operator cannot take in f32, only f64) def test_math_fmodf(self): self.do_run_in_out_file_test('tests', 'math', 'fmodf') def test_frexp(self): self.do_run_in_out_file_test('tests', 'core', 'test_frexp') def test_rounding(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) for precise_f32 in [0, 1]: print(precise_f32) self.set_setting('PRECISE_F32', precise_f32) self.do_run_in_out_file_test('tests', 'core', 'test_rounding') def test_fcvt(self): self.do_run_in_out_file_test('tests', 'core', 'test_fcvt') def test_llrint(self): self.do_run_in_out_file_test('tests', 'core', 'test_llrint') def test_getgep(self): # Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP self.do_run_in_out_file_test('tests', 'core', 'test_getgep') # No compiling from C/C++ - just process an existing .o/.ll/.bc file. def do_run_object(self, obj_file, expected_output=None, **kwargs): js_file = os.path.basename(obj_file) + '.js' building.emcc(obj_file, self.get_emcc_args(), js_file) self.do_run(js_file, expected_output, no_build=True, **kwargs) def do_ll_run(self, filename, expected_output=None, **kwargs): output_base = os.path.basename(filename) objfile = self.prep_ll_file(output_base, filename) self.do_run_object(objfile, expected_output, **kwargs) def test_multiply_defined_symbols(self): create_test_file('a1.c', 'int f() { return 1; }') create_test_file('a2.c', 'void x() {}') create_test_file('b1.c', 'int f() { return 2; }') create_test_file('b2.c', 'void y() {}') create_test_file('main.c', r''' #include <stdio.h> int f(); int main() { printf("result: %d\n", f()); return 0; } ''') building.emcc('a1.c') building.emcc('a2.c') building.emcc('b1.c') building.emcc('b2.c') building.emcc('main.c') building.emar('cr', 'liba.a', ['a1.c.o', 'a2.c.o']) building.emar('cr', 'libb.a', ['b1.c.o', 'b2.c.o']) building.link_to_object(['main.c.o', 'liba.a', 'libb.a'], 'all.o') self.do_run_object('all.o', 'result: 1') def test_if(self): self.do_run_in_out_file_test('tests', 'core', 'test_if') def test_if_else(self): self.do_run_in_out_file_test('tests', 'core', 'test_if_else') def test_loop(self): self.do_run_in_out_file_test('tests', 'core', 'test_loop') def test_stack(self): self.set_setting('INLINING_LIMIT', 50) self.do_run_in_out_file_test('tests', 'core', 'test_stack') def test_stack_align(self): src = path_from_root('tests', 'core', 'test_stack_align.cpp') def test(): self.do_run(open(src).read(), ['''align 4: 0 align 8: 0 align 16: 0 align 32: 0 base align: 0, 0, 0, 0''']) test() @no_asan('stack size is too low for asan to work properly') def test_stack_placement(self): self.set_setting('TOTAL_STACK', 1024) self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement') self.set_setting('GLOBAL_BASE', 102400) self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement') @no_asan('asan does not support main modules') @no_wasm2js('MAIN_MODULE support') def test_stack_placement_pic(self): if not self.is_wasm_backend() and self.get_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('memory growth is not compatible with MAIN_MODULE') self.set_setting('TOTAL_STACK', 1024) self.set_setting('MAIN_MODULE') self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement') self.set_setting('GLOBAL_BASE', 102400) self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement') def test_stack_restore(self): if self.get_setting('WASM') or self.is_wasm_backend(): self.skipTest('generated code not available in wasm') self.emcc_args += ['-g3'] # to be able to find the generated code self.do_run_in_out_file_test('tests', 'core', 'test_stack_restore') generated = open('src.c.o.js').read() def ensure_stack_restore_count(function_name, expected_count): code = generated[generated.find(function_name):] code = code[:code.find('\n}') + 2] actual_count = code.count('STACKTOP = sp') assert actual_count == expected_count, ('Expected %d stack restorations, got %d' % (expected_count, actual_count)) + ': ' + code ensure_stack_restore_count('function _no_stack_usage', 0) ensure_stack_restore_count('function _alloca_gets_restored', 1) ensure_stack_restore_count('function _stack_usage', 1) def test_strings(self): self.do_run_in_out_file_test('tests', 'core', 'test_strings', args=['wowie', 'too', '74']) def test_strcmp_uni(self): self.do_run_in_out_file_test('tests', 'core', 'test_strcmp_uni') def test_strndup(self): self.do_run_in_out_file_test('tests', 'core', 'test_strndup') def test_errar(self): self.do_run_in_out_file_test('tests', 'core', 'test_errar') def test_mainenv(self): self.do_run_in_out_file_test('tests', 'core', 'test_mainenv') def test_funcs(self): self.do_run_in_out_file_test('tests', 'core', 'test_funcs') def test_structs(self): self.do_run_in_out_file_test('tests', 'core', 'test_structs') gen_struct_src = ''' #include <stdio.h> #include <stdlib.h> #include "emscripten.h" struct S { int x, y; }; int main() { S* a = {{gen_struct}}; a->x = 51; a->y = 62; printf("*%d,%d*\\n", a->x, a->y); {{del_struct}}(a); return 0; } ''' def test_mallocstruct(self): self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*') @no_asan('ASan does not support custom memory allocators') @no_lsan('LSan does not support custom memory allocators') @parameterized({ 'normal': [], 'debug': ['-DEMMALLOC_DEBUG'], 'debug_log': ['-DEMMALLOC_DEBUG', '-DEMMALLOC_DEBUG_LOG', '-DRANDOM_ITERS=130'], }) def test_emmalloc(self, *args): # in newer clang+llvm, the internal calls to malloc in emmalloc may be optimized under # the assumption that they are external, so like in system_libs.py where we build # malloc, we need to disable builtin here too self.set_setting('MALLOC', 'none') self.emcc_args += ['-fno-builtin'] + list(args) self.do_run(open(path_from_root('system', 'lib', 'emmalloc.cpp')).read() + open(path_from_root('system', 'lib', 'sbrk.c')).read() + open(path_from_root('tests', 'core', 'test_emmalloc.cpp')).read(), open(path_from_root('tests', 'core', 'test_emmalloc.txt')).read()) @no_asan('ASan does not support custom memory allocators') @no_lsan('LSan does not support custom memory allocators') def test_emmalloc_usable_size(self, *args): self.set_setting('MALLOC', 'emmalloc') self.emcc_args += list(args) self.do_run_in_out_file_test('tests', 'core', 'test_malloc_usable_size') @no_fastcomp('this feature works in fastcomp, but test outputs are sensitive to wasm backend') @no_optimize('output is sensitive to optimization flags, so only test unoptimized builds') @no_asan('ASan does not support custom memory allocators') @no_lsan('LSan does not support custom memory allocators') def test_emmalloc_memory_statistics(self, *args): self.set_setting('MALLOC', 'emmalloc') self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-g'] + list(args) self.do_run_in_out_file_test('tests', 'core', 'test_emmalloc_memory_statistics') @no_fastcomp('this feature works in fastcomp, but test outputs are sensitive to wasm backend') @no_optimize('output is sensitive to optimization flags, so only test unoptimized builds') @no_asan('ASan does not support custom memory allocators') @no_lsan('LSan does not support custom memory allocators') def test_emmalloc_trim(self, *args): self.set_setting('MALLOC', 'emmalloc') self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=2147418112'] + list(args) self.do_run_in_out_file_test('tests', 'core', 'test_emmalloc_trim') # Test case against https://github.com/emscripten-core/emscripten/issues/10363 def test_emmalloc_memalign_corruption(self, *args): self.set_setting('MALLOC', 'emmalloc') self.do_run_in_out_file_test('tests', 'core', 'emmalloc_memalign_corruption') def test_newstruct(self): self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*') def test_addr_of_stacked(self): self.do_run_in_out_file_test('tests', 'core', 'test_addr_of_stacked') def test_globals(self): self.do_run_in_out_file_test('tests', 'core', 'test_globals') def test_linked_list(self): self.do_run_in_out_file_test('tests', 'core', 'test_linked_list') def test_sup(self): src = ''' #include <stdio.h> struct S4 { int x; }; // size: 4 struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2 struct S6 { short x, y, z; }; // size: 6 struct S6w { char x[6]; }; // size: 6 also struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4 struct C___ { S6 a, b, c; int later; }; struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler) struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2 struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6 int main() { #define TEST(struc) \\ { \\ struc *s = 0; \\ printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\ } #define TEST_ARR(struc) \\ { \\ struc *s = 0; \\ printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\ } printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z)); TEST(C___); TEST_ARR(Carr); TEST(C__w); TEST(Cp1_); TEST(Cp2_); TEST(Cint); TEST(C4__); TEST(C4_2); TEST(C__z); return 0; } ''' self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*') @also_with_standalone_wasm def test_assert(self): self.do_run_in_out_file_test('tests', 'core', 'test_assert', assert_returncode=None) def test_wcslen(self): self.do_run_in_out_file_test('tests', 'core', 'test_wcslen') def test_regex(self): self.do_run_in_out_file_test('tests', 'core', 'test_regex') def test_longjmp(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp') def test_longjmp2(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp2') @needs_dlfcn def test_longjmp2_main_module(self): # Test for binaryen regression: # https://github.com/WebAssembly/binaryen/issues/2180 self.set_setting('MAIN_MODULE') self.do_run_in_out_file_test('tests', 'core', 'test_longjmp2') def test_longjmp3(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp3') def test_longjmp4(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp4') def test_longjmp_funcptr(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_funcptr') def test_longjmp_repeat(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_repeat') def test_longjmp_stacked(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_stacked', assert_returncode=None) def test_longjmp_exc(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_exc', assert_returncode=None) def test_longjmp_throw(self): for disable_throw in [0, 1]: print(disable_throw) self.set_setting('DISABLE_EXCEPTION_CATCHING', disable_throw) self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_throw') def test_longjmp_unwind(self): self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_unwind', assert_returncode=None) def test_longjmp_i64(self): self.emcc_args += ['-g'] self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_i64', assert_returncode=None) def test_siglongjmp(self): self.do_run_in_out_file_test('tests', 'core', 'test_siglongjmp') def test_setjmp_many(self): src = r''' #include <stdio.h> #include <setjmp.h> int main(int argc, char** argv) { jmp_buf buf; for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf)); if (argc-- == 1131) longjmp(buf, 11); return 0; } ''' for num in [1, 5, 20, 1000]: print('NUM=%d' % num) self.do_run(src.replace('NUM', str(num)), '0\n' * num) def test_setjmp_many_2(self): src = r''' #include <setjmp.h> #include <stdio.h> jmp_buf env; void luaWork(int d){ int x; printf("d is at %d\n", d); longjmp(env, 1); } int main() { const int ITERATIONS=25; for(int i = 0; i < ITERATIONS; i++){ if(!setjmp(env)){ luaWork(i); } } return 0; } ''' self.do_run(src, r'''d is at 24''') def test_setjmp_noleak(self): src = r''' #include <setjmp.h> #include <stdio.h> #include <assert.h> jmp_buf env; void luaWork(int d){ int x; printf("d is at %d\n", d); longjmp(env, 1); } #include <malloc.h> #include <stdlib.h> void dump() { struct mallinfo m = mallinfo(); printf("dump: %d , %d\n", m.arena, m.uordblks); } void work(int n) { printf("work %d\n", n); dump(); if(!setjmp(env)){ luaWork(n); } if (n > 0) work(n-1); } int main() { struct mallinfo m1 = mallinfo(); dump(); work(10); dump(); struct mallinfo m2 = mallinfo(); assert(m1.uordblks == m2.uordblks); printf("ok.\n"); } ''' self.do_run(src, r'''ok.''') @with_both_exception_handling def test_exceptions(self): self.set_setting('EXCEPTION_DEBUG', 1) self.maybe_closure() for support_longjmp in [0, 1]: self.set_setting('SUPPORT_LONGJMP', support_longjmp) self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_caught.out')) def test_exceptions_off(self): for support_longjmp in [0, 1]: self.set_setting('DISABLE_EXCEPTION_CATCHING', 1) self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_uncaught.out'), assert_returncode=None) def test_exceptions_minimal_runtime(self): self.set_setting('EXCEPTION_DEBUG', 1) self.maybe_closure() self.set_setting('MINIMAL_RUNTIME', 1) for support_longjmp in [0, 1]: self.set_setting('SUPPORT_LONGJMP', support_longjmp) self.set_setting('DISABLE_EXCEPTION_CATCHING', 0) self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_caught.out')) self.set_setting('DISABLE_EXCEPTION_CATCHING', 1) self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_uncaught.out'), assert_returncode=None) @with_both_exception_handling def test_exceptions_custom(self): self.set_setting('EXCEPTION_DEBUG', 1) # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.maybe_closure() src = ''' #include <iostream> class MyException { public: MyException(){ std::cout << "Construct..."; } MyException( const MyException & ) { std::cout << "Copy..."; } ~MyException(){ std::cout << "Destruct..."; } }; int function() { std::cout << "Throw..."; throw MyException(); } int function2() { return function(); } int main() { try { function2(); } catch (MyException & e) { std::cout << "Caught..."; } try { function2(); } catch (MyException e) { std::cout << "Caught..."; } return 0; } ''' self.do_run(src, 'Throw...Construct...Caught...Destruct...Throw...Construct...Copy...Caught...Destruct...Destruct...') @with_both_exception_handling def test_exceptions_2(self): for safe in [0, 1]: print(safe) if safe and '-fsanitize=address' in self.emcc_args: # Can't use safe heap with ASan continue self.set_setting('SAFE_HEAP', safe) self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_2') @with_both_exception_handling def test_exceptions_3(self): src = r''' #include <iostream> #include <stdexcept> int main(int argc, char **argv) { if (argc != 2) { std::cout << "need an arg" << std::endl; return 1; } int arg = argv[1][0] - '0'; try { if (arg == 0) throw "a c string"; if (arg == 1) throw std::exception(); if (arg == 2) throw std::runtime_error("Hello"); } catch(const char * ex) { std::cout << "Caught C string: " << ex << std::endl; } catch(const std::exception &ex) { std::cout << "Caught exception: " << ex.what() << std::endl; } catch(...) { std::cout << "Caught something else" << std::endl; } std::cout << "Done.\n"; } ''' print('0') self.do_run(src, 'Caught C string: a c string\nDone.', ['0']) print('1') self.do_run(None, 'Caught exception: std::exception\nDone.', ['1'], no_build=True) print('2') self.do_run(None, 'Caught exception: Hello\nDone.', ['2'], no_build=True) def test_exceptions_white_list(self): self.set_setting('DISABLE_EXCEPTION_CATCHING', 2) # Wasm does not add an underscore to function names. For wasm, the # mismatches are fixed in fixImports() function in JS glue code. if not self.is_wasm_backend(): self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["__Z12somefunctionv"]) else: self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_Z12somefunctionv"]) # otherwise it is inlined and not identified self.set_setting('INLINING_LIMIT', 50) test_path = path_from_root('tests', 'core', 'test_exceptions_white_list') src, output = (test_path + s for s in ('.cpp', '.out')) self.do_run_from_file(src, output) size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'orig.js') # check that an empty whitelist works properly (as in, same as exceptions disabled) empty_output = path_from_root('tests', 'core', 'test_exceptions_white_list_empty.out') self.set_setting('EXCEPTION_CATCHING_WHITELIST', []) self.do_run_from_file(src, empty_output, assert_returncode=None) empty_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'empty.js') self.set_setting('EXCEPTION_CATCHING_WHITELIST', ['fake']) self.do_run_from_file(src, empty_output, assert_returncode=None) fake_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'fake.js') self.set_setting('DISABLE_EXCEPTION_CATCHING', 1) self.do_run_from_file(src, empty_output, assert_returncode=None) disabled_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'disabled.js') if not self.is_wasm(): print(size, empty_size, fake_size, disabled_size) assert empty_size == fake_size, [empty_size, fake_size] # big change when we disable exception catching of the function assert size - empty_size > 0.01 * size, [empty_size, size] # full disable can remove a little bit more assert empty_size >= disabled_size, [empty_size, disabled_size] def test_exceptions_white_list_2(self): self.set_setting('DISABLE_EXCEPTION_CATCHING', 2) # Wasm does not add an underscore to function names. For wasm, the # mismatches are fixed in fixImports() function in JS glue code. if not self.is_wasm_backend(): self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_main"]) else: self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["main"]) # otherwise it is inlined and not identified self.set_setting('INLINING_LIMIT', 1) self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_white_list_2') def test_exceptions_white_list_uncaught(self): self.emcc_args += ['-std=c++11'] self.set_setting('DISABLE_EXCEPTION_CATCHING', 2) # Wasm does not add an underscore to function names. For wasm, the # mismatches are fixed in fixImports() function in JS glue code. if not self.is_wasm_backend(): self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["__Z4testv"]) else: self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_Z4testv"]) # otherwise it is inlined and not identified self.set_setting('INLINING_LIMIT', 1) self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_white_list_uncaught') @with_both_exception_handling def test_exceptions_uncaught(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) src = r''' #include <stdio.h> #include <exception> struct X { ~X() { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); } }; int main() { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); try { X x; throw 1; } catch(...) { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); } printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); return 0; } ''' self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n') src = r''' #include <fstream> #include <iostream> int main() { std::ofstream os("test"); os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from // std::basic_ostream::sentry::~sentry std::cout << "success"; } ''' self.do_run(src, 'success') @with_both_exception_handling def test_exceptions_uncaught_2(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) src = r''' #include <iostream> #include <exception> int main() { try { throw std::exception(); } catch(std::exception) { try { throw; } catch(std::exception) {} } if (std::uncaught_exception()) std::cout << "ERROR: uncaught_exception still set."; else std::cout << "OK"; } ''' self.do_run(src, 'OK\n') @with_both_exception_handling def test_exceptions_typed(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.emcc_args += ['-s', 'SAFE_HEAP=0'] # Throwing null will cause an ignorable null pointer access. self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_typed') @with_both_exception_handling def test_exceptions_virtual_inheritance(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_virtual_inheritance') @with_both_exception_handling def test_exceptions_convert(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_convert') # TODO Make setjmp-longjmp also use Wasm exception handling @with_both_exception_handling def test_exceptions_multi(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multi') @with_both_exception_handling def test_exceptions_std(self): self.emcc_args += ['-s', 'SAFE_HEAP=0'] self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_std') @with_both_exception_handling def test_exceptions_alias(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_alias') @with_both_exception_handling def test_exceptions_rethrow(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_rethrow') @with_both_exception_handling def test_exceptions_resume(self): self.set_setting('EXCEPTION_DEBUG', 1) self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_resume') @with_both_exception_handling def test_exceptions_destroy_virtual(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_destroy_virtual') @with_both_exception_handling def test_exceptions_refcount(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_refcount') @with_both_exception_handling def test_exceptions_primary(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_primary') @with_both_exception_handling def test_exceptions_simplify_cfg(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_simplify_cfg') @with_both_exception_handling def test_exceptions_libcxx(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_libcxx') @with_both_exception_handling def test_exceptions_multiple_inherit(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multiple_inherit') @with_both_exception_handling def test_exceptions_multiple_inherit_rethrow(self): self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multiple_inherit_rethrow') @with_both_exception_handling def test_bad_typeid(self): self.do_run(r''' // exception example #include <iostream> // std::cerr #include <typeinfo> // operator typeid #include <exception> // std::exception class Polymorphic {virtual void member(){}}; int main () { try { Polymorphic * pb = 0; const std::type_info& ti = typeid(*pb); // throws a bad_typeid exception } catch (std::exception& e) { std::cerr << "exception caught: " << e.what() << '\n'; } return 0; } ''', 'exception caught: std::bad_typeid') def test_iostream_ctors(self): # iostream stuff must be globally constructed before user global # constructors, so iostream works in global constructors self.do_run(r''' #include <iostream> struct A { A() { std::cout << "bug"; } }; A a; int main() { std::cout << "free code" << std::endl; return 0; } ''', 'bugfree code') # Marked as impure since the WASI reactor modules (modules without main) # are not yet suppored by the wasm engines we test against. @also_with_impure_standalone_wasm def test_ctors_no_main(self): self.emcc_args.append('--no-entry') self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main') def test_class(self): self.do_run_in_out_file_test('tests', 'core', 'test_class') def test_inherit(self): self.do_run_in_out_file_test('tests', 'core', 'test_inherit') def test_isdigit_l(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_isdigit_l') def test_iswdigit(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_iswdigit') def test_polymorph(self): self.do_run_in_out_file_test('tests', 'core', 'test_polymorph') def test_complex(self): self.do_run_in_out_file_test('tests', 'core', 'test_complex') def test_float_builtins(self): # tests wasm_libc_rt if not self.is_wasm_backend(): self.skipTest('no __builtin_fmin support in JSBackend') self.do_run_in_out_file_test('tests', 'core', 'test_float_builtins') @no_asan('SAFE_HEAP cannot be used with ASan') def test_segfault(self): self.set_setting('SAFE_HEAP', 1) for addr in ['0', 'new D2()']: print(addr) src = r''' #include <stdio.h> struct Classey { virtual void doIt() = 0; }; struct D1 : Classey { virtual void doIt() { printf("fleefl\n"); } }; struct D2 : Classey { virtual void doIt() { printf("marfoosh\n"); } }; int main(int argc, char **argv) { Classey *p = argc == 100 ? new D1() : (Classey*)%s; p->doIt(); return 0; } ''' % addr self.do_run(src, 'segmentation fault' if addr.isdigit() else 'marfoosh', assert_returncode=None) def test_dynamic_cast(self): self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast') def test_dynamic_cast_b(self): self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast_b') def test_dynamic_cast_2(self): self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast_2') def test_funcptr(self): self.do_run_in_out_file_test('tests', 'core', 'test_funcptr') def test_mathfuncptr(self): self.do_run_in_out_file_test('tests', 'core', 'test_mathfuncptr') def test_funcptrfunc(self): self.do_run_in_out_file_test('tests', 'core', 'test_funcptrfunc') def test_funcptr_namecollide(self): self.do_run_in_out_file_test('tests', 'core', 'test_funcptr_namecollide') def test_emptyclass(self): self.do_run_in_out_file_test('tests', 'core', 'test_emptyclass') def test_alloca(self): self.do_run_in_out_file_test('tests', 'core', 'test_alloca') def test_rename(self): self.do_run_in_out_file_test('tests', 'stdio', 'test_rename') def test_remove(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'cstdio', 'test_remove') def test_alloca_stack(self): self.do_run_in_out_file_test('tests', 'core', 'test_alloca_stack') def test_stack_byval(self): self.do_run_in_out_file_test('tests', 'core', 'test_stack_byval') def test_stack_varargs(self): # in node.js we allocate argv[0] on the stack, which means the length # of the program directory influences how much stack we need, and so # long random temp dir names can lead to random failures. The stack # size was increased here to avoid that. self.set_setting('INLINING_LIMIT', 50) self.set_setting('TOTAL_STACK', 8 * 1024) self.do_run_in_out_file_test('tests', 'core', 'test_stack_varargs') def test_stack_varargs2(self): # in node.js we allocate argv[0] on the stack, which means the length # of the program directory influences how much stack we need, and so # long random temp dir names can lead to random failures. The stack # size was increased here to avoid that. self.set_setting('TOTAL_STACK', 8 * 1024) src = r''' #include <stdio.h> #include <stdlib.h> void func(int i) { } int main() { for (int i = 0; i < 7000; i++) { printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') print('with return') src = r''' #include <stdio.h> #include <stdlib.h> int main() { for (int i = 0; i < 7000; i++) { int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); printf(" (%d)\n", j); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') print('with definitely no return') src = r''' #include <stdio.h> #include <stdlib.h> #include <stdarg.h> void vary(const char *s, ...) { va_list v; va_start(v, s); char d[20]; vsnprintf(d, 20, s, v); puts(d); // Try it with copying va_list tempva; va_copy(tempva, v); vsnprintf(d, 20, s, tempva); puts(d); va_end(v); } int main() { for (int i = 0; i < 7000; i++) { int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); printf(" (%d)\n", j); vary("*cheez: %d+%d*", 99, 24); vary("*albeit*"); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') def test_stack_void(self): self.set_setting('INLINING_LIMIT', 50) self.do_run_in_out_file_test('tests', 'core', 'test_stack_void') def test_life(self): self.emcc_args += ['-std=c99'] self.do_run_in_out_file_test('tests', 'life', args=['2']) def test_array2(self): self.do_run_in_out_file_test('tests', 'core', 'test_array2') def test_array2b(self): self.do_run_in_out_file_test('tests', 'core', 'test_array2b') def test_constglobalstructs(self): self.do_run_in_out_file_test('tests', 'core', 'test_constglobalstructs') def test_conststructs(self): self.do_run_in_out_file_test('tests', 'core', 'test_conststructs') def test_bigarray(self): self.do_run_in_out_file_test('tests', 'core', 'test_bigarray') def test_mod_globalstruct(self): self.do_run_in_out_file_test('tests', 'core', 'test_mod_globalstruct') @no_wasm_backend('long doubles are f128s in wasm backend') def test_pystruct(self): def test(): self.do_run_in_out_file_test('tests', 'test_pystruct') test() print('relocatable') # this tests recursive global structs => nontrivial postSets for relocation assert self.get_setting('RELOCATABLE') == self.get_setting('EMULATED_FUNCTION_POINTERS') == 0 self.set_setting('RELOCATABLE', 1) self.set_setting('EMULATED_FUNCTION_POINTERS', 1) test() def test_sizeof(self): # Has invalid writes between printouts self.set_setting('SAFE_HEAP', 0) self.do_run_in_out_file_test('tests', 'core', 'test_sizeof') def test_llvm_used(self): self.do_run_in_out_file_test('tests', 'core', 'test_llvm_used') @no_asan('SAFE_HEAP cannot be used with ASan') def test_set_align(self): self.set_setting('SAFE_HEAP', 1) self.do_run_in_out_file_test('tests', 'core', 'test_set_align') def test_emscripten_api(self): self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_save_me_aimee']) self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_api') if '-fsanitize=address' not in self.emcc_args: # test EXPORT_ALL (this is not compatible with asan, which doesn't # support dynamic linking at all or the LINKING flag) self.set_setting('EXPORTED_FUNCTIONS', []) self.set_setting('EXPORT_ALL', 1) self.set_setting('LINKABLE', 1) self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_api') def test_emscripten_run_script_string_int(self): src = r''' #include <stdio.h> #include <emscripten.h> int main() { const char *str = emscripten_run_script_string("1+1"); printf("got string: %s\n", str); return 0; } ''' self.do_run(src, '''got string: 2''') def test_emscripten_run_script_string_utf8(self): src = r''' #include <stdio.h> #include <stdlib.h> #include <string.h> #include <emscripten.h> int main() { const char *str = emscripten_run_script_string("'\\u2603 \\u2603 \\u2603 Hello!'"); printf("length of returned string: %d. Position of substring 'Hello': %d\n", strlen(str), strstr(str, "Hello")-str); return 0; } ''' self.do_run(src, '''length of returned string: 18. Position of substring 'Hello': 12''') def test_emscripten_run_script_string_null(self): src = r''' #include <stdio.h> #include <emscripten.h> int main() { const char *str = emscripten_run_script_string("void(0)"); if (str) { printf("got string: %s\n", str); } else { puts("got null"); } return 0; } ''' self.do_run(src, 'got null') def test_emscripten_get_now(self): self.banned_js_engines = [V8_ENGINE] # timer limitations in v8 shell # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) if self.run_name == 'asm2': self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage self.do_run(open(path_from_root('tests', 'emscripten_get_now.cpp')).read(), 'Timer resolution is good') def test_emscripten_get_compiler_setting(self): test_path = path_from_root('tests', 'core', 'emscripten_get_compiler_setting') src, output = (test_path + s for s in ('.c', '.out')) old = self.get_setting('ASSERTIONS') # with assertions, a nice message is shown self.set_setting('ASSERTIONS', 1) self.do_run(open(src).read(), 'You must build with -s RETAIN_COMPILER_SETTINGS=1', assert_returncode=None) self.set_setting('ASSERTIONS', old) self.set_setting('RETAIN_COMPILER_SETTINGS', 1) self.do_run(open(src).read(), open(output).read().replace('waka', shared.EMSCRIPTEN_VERSION)) @no_fastcomp('ASYNCIFY has been removed from fastcomp') def test_emscripten_has_asyncify(self): src = r''' #include <stdio.h> #include <emscripten.h> int main() { printf("%d\n", emscripten_has_asyncify()); return 0; } ''' self.set_setting('ASYNCIFY', 0) self.do_run(src, '0') self.set_setting('ASYNCIFY', 1) self.do_run(src, '1') # TODO: test only worked in non-fastcomp def test_inlinejs(self): self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs') if self.emcc_args == []: # opts will eliminate the comments out = open('src.cpp.o.js').read() for i in range(1, 5): assert ('comment%d' % i) in out # TODO: test only worked in non-fastcomp def test_inlinejs2(self): self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs2') def test_inlinejs3(self): if self.is_wasm(): self.skipTest('wasm requires a proper asm module') self.emcc_args.append('-Wno-almost-asm') test_path = path_from_root('tests', 'core', 'test_inlinejs3') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs3') print('no debugger, check validation') src = open(src).read().replace('emscripten_debugger();', '') self.do_run(src, open(output).read()) def test_inlinejs4(self): self.do_run(r''' #include <emscripten.h> #define TO_STRING_INNER(x) #x #define TO_STRING(x) TO_STRING_INNER(x) #define assert_msg(msg, file, line) EM_ASM( throw 'Assert (' + msg + ') failed in ' + file + ':' + line + '!'; ) #define assert(expr) { \ if (!(expr)) { \ assert_msg(#expr, TO_STRING(__FILE__), TO_STRING(__LINE__)); \ } \ } int main(int argc, char **argv) { assert(argc != 17); assert(false); return 0; } ''', 'false', assert_returncode=None) def test_em_asm(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm') self.do_run_in_out_file_test('tests', 'core', 'test_em_asm', force_c=True) # Tests various different ways to invoke the EM_ASM(), EM_ASM_INT() and EM_ASM_DOUBLE() macros. @no_asan('Cannot use ASan: test depends exactly on heap size') def test_em_asm_2(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_2') self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_2', force_c=True) # Tests various different ways to invoke the MAIN_THREAD_EM_ASM(), MAIN_THREAD_EM_ASM_INT() and MAIN_THREAD_EM_ASM_DOUBLE() macros. # This test is identical to test_em_asm_2, just search-replaces EM_ASM to MAIN_THREAD_EM_ASM on the test file. That way if new # test cases are added to test_em_asm_2.cpp for EM_ASM, they will also get tested in MAIN_THREAD_EM_ASM form. @no_asan('Cannot use ASan: test depends exactly on heap size') def test_main_thread_em_asm(self): src = open(path_from_root('tests', 'core', 'test_em_asm_2.cpp')).read() create_test_file('src.cpp', src.replace('EM_ASM', 'MAIN_THREAD_EM_ASM')) expected_result = open(path_from_root('tests', 'core', 'test_em_asm_2.out')).read() create_test_file('result.out', expected_result.replace('EM_ASM', 'MAIN_THREAD_EM_ASM')) self.do_run_from_file('src.cpp', 'result.out') self.do_run_from_file('src.cpp', 'result.out', force_c=True) def test_main_thread_async_em_asm(self): self.do_run_in_out_file_test('tests', 'core', 'test_main_thread_async_em_asm') self.do_run_in_out_file_test('tests', 'core', 'test_main_thread_async_em_asm', force_c=True) # Tests MAIN_THREAD_EM_ASM_INT() function call with different signatures. def test_main_thread_em_asm_signatures(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_signatures', assert_returncode=None) def test_em_asm_unicode(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unicode') self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unicode', force_c=True) def test_em_asm_types(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_types') self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_types', force_c=True) def test_em_asm_unused_arguments(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unused_arguments') # Verify that EM_ASM macros support getting called with multiple arities. # Maybe tests will later be joined into larger compilation units? # Then this must still be compiled separately from other code using EM_ASM # macros with arities 1-3. Otherwise this may incorrectly report a success. def test_em_asm_parameter_pack(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_parameter_pack') def test_em_asm_arguments_side_effects(self): self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_arguments_side_effects') self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_arguments_side_effects', force_c=True) @parameterized({ 'normal': ([],), 'linked': (['-s', 'MAIN_MODULE'],), }) def test_em_js(self, args): if 'MAIN_MODULE' in args and self.get_setting('WASM') == 0: self.skipTest('main module support for non-wasm') if '-fsanitize=address' in self.emcc_args: self.skipTest('no dynamic library support in asan yet') self.emcc_args += args + ['-s', 'EXPORTED_FUNCTIONS=["_main","_malloc"]'] self.do_run_in_out_file_test('tests', 'core', 'test_em_js') self.do_run_in_out_file_test('tests', 'core', 'test_em_js', force_c=True) def test_runtime_stacksave(self): src = open(path_from_root('tests', 'core', 'test_runtime_stacksave.c')).read() self.do_run(src, 'success') # Tests that -s MINIMAL_RUNTIME=1 builds can utilize -s ALLOW_MEMORY_GROWTH=1 option. def test_minimal_runtime_memorygrowth(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') self.set_setting('MINIMAL_RUNTIME', 1) src = open(path_from_root('tests', 'core', 'test_memorygrowth.c')).read() # Fail without memory growth self.do_run(src, 'OOM', assert_returncode=None) # Win with it self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH'] self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*') def test_memorygrowth(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') if self.maybe_closure(): # verify NO_DYNAMIC_EXECUTION is compatible with closure self.set_setting('DYNAMIC_EXECUTION', 0) # With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY, # since we then need to enlarge the heap(s). src = open(path_from_root('tests', 'core', 'test_memorygrowth.c')).read() # Fail without memory growth self.do_run(src, 'OOM', assert_returncode=None) fail = open('src.cpp.o.js').read() # Win with it self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH'] self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*') win = open('src.cpp.o.js').read() if '-O2' in self.emcc_args and not self.is_wasm(): # Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized) possible_starts = ['// EMSCRIPTEN_START_FUNCS', 'var TOTAL_STACK'] code_start = None for s in possible_starts: if fail.find(s) >= 0: code_start = s break assert code_start is not None, 'Generated code must contain one of ' + str(possible_starts) fail = fail[fail.find(code_start):] win = win[win.find(code_start):] assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)]) # Tracing of memory growths should work self.set_setting('EMSCRIPTEN_TRACING', 1) self.emcc_args += ['--tracing'] self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*') def test_memorygrowth_2(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') # With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY, # since we then need to enlarge the heap(s). src = open(path_from_root('tests', 'core', 'test_memorygrowth_2.c')).read() # Fail without memory growth self.do_run(src, 'OOM', assert_returncode=None) fail = open('src.cpp.o.js').read() # Win with it self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH'] self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*') win = open('src.cpp.o.js').read() if '-O2' in self.emcc_args and not self.is_wasm(): # Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized) assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)]) def test_memorygrowth_3(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') # checks handling of malloc failure properly self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=0', '-s', 'ABORTING_MALLOC=0', '-s', 'SAFE_HEAP'] self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_3') @also_with_impure_standalone_wasm def test_memorygrowth_MAXIMUM_MEMORY(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') if not self.is_wasm(): self.skipTest('wasm memory specific test') # check that memory growth does not exceed the wasm mem max limit self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=100Mb'] self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_wasm_mem_max') def test_memorygrowth_linear_step(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') if not self.is_wasm(): self.skipTest('wasm memory specific test') # check that memory growth does not exceed the wasm mem max limit and is exactly or one step below the wasm mem max self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_STACK=1Mb', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=130Mb', '-s', 'MEMORY_GROWTH_LINEAR_STEP=1Mb'] self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_memory_growth_step') def test_memorygrowth_geometric_step(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') if not self.is_wasm(): self.skipTest('wasm memory specific test') self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MEMORY_GROWTH_GEOMETRIC_STEP=15', '-s', 'MEMORY_GROWTH_GEOMETRIC_CAP=0'] self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_geometric_step') def test_memorygrowth_3_force_fail_reallocBuffer(self): if self.has_changed_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('test needs to modify memory growth') self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TEST_MEMORY_GROWTH_FAILS=1'] self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_3') @parameterized({ 'nogrow': (['-s', 'ALLOW_MEMORY_GROWTH=0'],), 'grow': (['-s', 'ALLOW_MEMORY_GROWTH=1'],) }) @no_asan('requires more memory when growing') def test_aborting_new(self, args): # test that C++ new properly errors if we fail to malloc when growth is # enabled, with or without growth self.emcc_args += ['-Wno-almost-asm', '-s', 'MAXIMUM_MEMORY=18MB'] + args self.do_run_in_out_file_test('tests', 'core', 'test_aborting_new') @no_asmjs() @no_wasm2js('no WebAssembly.Memory()') @no_asan('ASan alters the memory size') def test_module_wasm_memory(self): self.emcc_args += ['--pre-js', path_from_root('tests', 'core', 'test_module_wasm_memory.js')] src = open(path_from_root('tests', 'core', 'test_module_wasm_memory.c')).read() self.do_run(src, 'success', force_c=True) def test_ssr(self): # struct self-ref src = ''' #include <stdio.h> // see related things in openjpeg typedef struct opj_mqc_state { unsigned int qeval; int mps; struct opj_mqc_state *nmps; struct opj_mqc_state *nlps; } opj_mqc_state_t; static opj_mqc_state_t mqc_states[4] = { {0x5600, 0, &mqc_states[2], &mqc_states[3]}, {0x5602, 1, &mqc_states[3], &mqc_states[2]}, }; int main() { printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states); for (int i = 0; i < 2; i++) printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps, (int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states); return 0; } ''' self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''') def test_tinyfuncstr(self): self.do_run_in_out_file_test('tests', 'core', 'test_tinyfuncstr') def test_llvmswitch(self): self.do_run_in_out_file_test('tests', 'core', 'test_llvmswitch') def test_cxx_version(self): self.do_run_in_out_file_test('tests', 'core', 'test_cxx_version') @no_wasm2js('massive switches can break js engines') def test_bigswitch(self): src = open(path_from_root('tests', 'bigswitch.cpp')).read() self.do_run(src, '''34962: GL_ARRAY_BUFFER (0x8892) 26214: what? 35040: GL_STREAM_DRAW (0x88E0) 3060: what? ''', args=['34962', '26214', '35040', str(0xbf4)], assert_returncode=None) @no_wasm2js('massive switches can break js engines') @is_slow_test def test_biggerswitch(self): if self.is_wasm_backend(): if not is_optimizing(self.emcc_args): self.skipTest('nodejs takes >6GB to compile this if the wasm is not optimized, which OOMs, see https://github.com/emscripten-core/emscripten/issues/7928#issuecomment-458308453') if '-Os' in self.emcc_args: self.skipTest('hangs in recent upstream clang, see https://bugs.llvm.org/show_bug.cgi?id=43468') num_cases = 20000 switch_case = run_process([PYTHON, path_from_root('tests', 'gen_large_switchcase.py'), str(num_cases)], stdout=PIPE, stderr=PIPE).stdout self.do_run(switch_case, '''58996: 589965899658996 59297: 592975929759297 59598: default 59899: 598995989959899 Success!''') def test_indirectbr(self): self.emcc_args = [x for x in self.emcc_args if x != '-g'] self.do_run_in_out_file_test('tests', 'core', 'test_indirectbr') @no_asan('local count too large for VMs') @no_wasm2js('extremely deep nesting, hits stack limit on some VMs') def test_indirectbr_many(self): self.do_run_in_out_file_test('tests', 'core', 'test_indirectbr_many') def test_pack(self): src = ''' #include <stdio.h> #include <string.h> #pragma pack(push,1) typedef struct header { unsigned char id; unsigned short colour; unsigned char desc; } header; #pragma pack(pop) typedef struct fatheader { unsigned char id; unsigned short colour; unsigned char desc; } fatheader; int main( int argc, const char *argv[] ) { header h, *ph = 0; fatheader fh, *pfh = 0; printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0])); printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0])); return 0; } ''' self.do_run(src, '*4,3,4*\n*6,4,6*') def test_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_varargs') @no_wasm_backend('Calling varargs across function calls is undefined behavior in C,' ' and asmjs and wasm implement it differently.') def test_varargs_multi(self): self.do_run_in_out_file_test('tests', 'core', 'test_varargs_multi') @unittest.skip('clang cannot compile this code with that target yet') def test_varargs_byval(self): src = r''' #include <stdio.h> #include <stdarg.h> typedef struct type_a { union { double f; void *p; int i; short sym; } value; } type_a; enum mrb_vtype { MRB_TT_FALSE = 0, /* 0 */ MRB_TT_CLASS = 9 /* 9 */ }; typedef struct type_b { enum mrb_vtype tt:8; } type_b; void print_type_a(int argc, ...); void print_type_b(int argc, ...); int main(int argc, char *argv[]) { type_a a; type_b b; a.value.p = (void*) 0x12345678; b.tt = MRB_TT_CLASS; printf("The original address of a is: %p\n", a.value.p); printf("The original type of b is: %d\n", b.tt); print_type_a(1, a); print_type_b(1, b); return 0; } void print_type_a(int argc, ...) { va_list ap; type_a a; va_start(ap, argc); a = va_arg(ap, type_a); va_end(ap); printf("The current address of a is: %p\n", a.value.p); } void print_type_b(int argc, ...) { va_list ap; type_b b; va_start(ap, argc); b = va_arg(ap, type_b); va_end(ap); printf("The current type of b is: %d\n", b.tt); } ''' self.do_run(src, '''The original address of a is: 0x12345678 The original type of b is: 9 The current address of a is: 0x12345678 The current type of b is: 9 ''') def test_functionpointer_libfunc_varargs(self): self.do_run_in_out_file_test('tests', 'core', 'test_functionpointer_libfunc_varargs') def test_structbyval(self): self.set_setting('INLINING_LIMIT', 50) # part 1: make sure that normally, passing structs by value works src = r''' #include <stdio.h> struct point { int x, y; }; void dump(struct point p) { p.x++; // should not modify p.y++; // anything in the caller! printf("dump: %d,%d\n", p.x, p.y); } void dumpmod(struct point *p) { p->x++; // should not modify p->y++; // anything in the caller! printf("dump: %d,%d\n", p->x, p->y); } int main( int argc, const char *argv[] ) { point p = { 54, 2 }; printf("pre: %d,%d\n", p.x, p.y); dump(p); void (*dp)(point p) = dump; // And, as a function pointer dp(p); printf("post: %d,%d\n", p.x, p.y); dumpmod(&p); dumpmod(&p); printf("last: %d,%d\n", p.x, p.y); return 0; } ''' self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4') def test_stdlibs(self): # safe heap prints a warning that messes up our output. self.set_setting('SAFE_HEAP', 0) # needs atexit self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_stdlibs') def test_stdbool(self): src = r''' #include <stdio.h> #include <stdbool.h> int main() { bool x = true; bool y = false; printf("*%d*\n", x != y); return 0; } ''' self.do_run(src, '*1*', force_c=True) def test_strtoll_hex(self): # tests strtoll for hex strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_hex') def test_strtoll_dec(self): # tests strtoll for decimal strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_dec') def test_strtoll_bin(self): # tests strtoll for binary strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_bin') def test_strtoll_oct(self): # tests strtoll for decimal strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_oct') def test_strtol_hex(self): # tests strtoll for hex strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtol_hex') def test_strtol_dec(self): # tests strtoll for decimal strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtol_dec') def test_strtol_bin(self): # tests strtoll for binary strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtol_bin') def test_strtol_oct(self): # tests strtoll for decimal strings (0x...) self.do_run_in_out_file_test('tests', 'core', 'test_strtol_oct') @also_with_standalone_wasm def test_atexit(self): # Confirms they are called in the proper reverse order self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_atexit') def test_atexit_threads(self): # also tests thread exit (__cxa_thread_atexit) self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_atexit_threads') @no_asan('test relies on null pointer reads') def test_pthread_specific(self): src = open(path_from_root('tests', 'pthread', 'specific.c')).read() expected = open(path_from_root('tests', 'pthread', 'specific.c.txt')).read() self.do_run(src, expected, force_c=True) def test_pthread_equal(self): self.do_run_in_out_file_test('tests', 'pthread', 'test_pthread_equal') def test_tcgetattr(self): src = open(path_from_root('tests', 'termios', 'test_tcgetattr.c')).read() self.do_run(src, 'success', force_c=True) def test_time(self): self.do_run_in_out_file_test('tests', 'core', 'test_time') for tz in ['EST+05EDT', 'UTC+0']: print('extra tz test:', tz) with env_modify({'TZ': tz}): # Run the test with different time zone settings if # possible. It seems that the TZ environment variable does not # work all the time (at least it's not well respected by # Node.js on Windows), but it does no harm either. self.do_run_in_out_file_test('tests', 'core', 'test_time') def test_timeb(self): # Confirms they are called in reverse order self.do_run_in_out_file_test('tests', 'core', 'test_timeb') def test_time_c(self): self.do_run_in_out_file_test('tests', 'core', 'test_time_c') def test_gmtime(self): self.do_run_in_out_file_test('tests', 'core', 'test_gmtime') def test_strptime_tm(self): self.do_run_in_out_file_test('tests', 'core', 'test_strptime_tm') def test_strptime_days(self): self.do_run_in_out_file_test('tests', 'core', 'test_strptime_days') def test_strptime_reentrant(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_strptime_reentrant') def test_strftime(self): self.do_run_in_out_file_test('tests', 'core', 'test_strftime') @no_wasm_backend("wasm backend doesn't compile intentional segfault into an abort() call. " "It also doesn't segfault.") def test_intentional_fault(self): # Some programs intentionally segfault themselves, we should compile that into a throw src = open(path_from_root('tests', 'core', 'test_intentional_fault.c')).read() self.do_run(src, 'abort(' if self.run_name != 'asm2g' else 'abort(segmentation fault', assert_returncode=None) def test_trickystring(self): self.do_run_in_out_file_test('tests', 'core', 'test_trickystring') def test_statics(self): self.do_run_in_out_file_test('tests', 'core', 'test_statics') def test_copyop(self): # clang generated code is vulnerable to this, as it uses # memcpy for assignments, with hardcoded numbers of bytes # (llvm-gcc copies items one by one). self.do_run_in_out_file_test('tests', 'core', 'test_copyop') def test_memcpy_memcmp(self): self.banned_js_engines = [V8_ENGINE] # Currently broken under V8_ENGINE but not node def check(result, err): result = result.replace('\n \n', '\n') # remove extra node output return hashlib.sha1(result.encode('utf-8')).hexdigest() self.do_run_in_out_file_test('tests', 'core', 'test_memcpy_memcmp', output_nicerizer=check) def test_memcpy2(self): self.do_run_in_out_file_test('tests', 'core', 'test_memcpy2', assert_returncode=None) def test_memcpy3(self): self.do_run_in_out_file_test('tests', 'core', 'test_memcpy3', assert_returncode=None) @also_with_standalone_wasm def test_memcpy_alignment(self): self.do_run(open(path_from_root('tests', 'test_memcpy_alignment.cpp')).read(), 'OK.') def test_memset_alignment(self): self.do_run(open(path_from_root('tests', 'test_memset_alignment.cpp')).read(), 'OK.') def test_memset(self): self.do_run_in_out_file_test('tests', 'core', 'test_memset', assert_returncode=None) def test_getopt(self): self.do_run_in_out_file_test('tests', 'core', 'test_getopt', args=['-t', '12', '-n', 'foobar']) def test_getopt_long(self): self.do_run_in_out_file_test('tests', 'core', 'test_getopt_long', args=['--file', 'foobar', '-b']) def test_memmove(self): self.do_run_in_out_file_test('tests', 'core', 'test_memmove') def test_memmove2(self): self.do_run_in_out_file_test('tests', 'core', 'test_memmove2', assert_returncode=None) def test_memmove3(self): self.do_run_in_out_file_test('tests', 'core', 'test_memmove3') def test_flexarray_struct(self): self.do_run_in_out_file_test('tests', 'core', 'test_flexarray_struct') def test_bsearch(self): self.do_run_in_out_file_test('tests', 'core', 'test_bsearch') @no_wasm_backend("https://github.com/emscripten-core/emscripten/issues/9039") def test_stack_overflow(self): self.set_setting('ASSERTIONS', 1) self.do_run(open(path_from_root('tests', 'core', 'stack_overflow.cpp')).read(), 'Stack overflow!', assert_returncode=None) def test_stackAlloc(self): self.do_run_in_out_file_test('tests', 'core', 'stackAlloc') def test_nestedstructs(self): src = ''' #include <stdio.h> #include "emscripten.h" struct base { int x; float y; union { int a; float b; }; char c; }; struct hashtableentry { int key; base data; }; struct hashset { typedef hashtableentry entry; struct chain { entry elem; chain *next; }; // struct chainchunk { chain chains[100]; chainchunk *next; }; }; struct hashtable : hashset { hashtable() { base *b = NULL; entry *e = NULL; chain *c = NULL; printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", sizeof(base), int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)), sizeof(hashtableentry), int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)), sizeof(hashset::chain), int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c)) ); } }; struct B { char buffer[62]; int last; char laster; char laster2; }; struct Bits { unsigned short A : 1; unsigned short B : 1; unsigned short C : 1; unsigned short D : 1; unsigned short x1 : 1; unsigned short x2 : 1; unsigned short x3 : 1; unsigned short x4 : 1; }; int main() { hashtable t; // Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next // one is aligned properly. Also handle char; char; etc. properly. B *b = NULL; printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])), int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B)); // Part 3 - bitfields, and small structures Bits *b2 = NULL; printf("*%d*\\n", sizeof(Bits)); return 0; } ''' # Bloated memory; same layout as C/C++ self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*') def prep_dlfcn_lib(self): self.clear_setting('MAIN_MODULE') self.set_setting('SIDE_MODULE') def prep_dlfcn_main(self): self.set_setting('MAIN_MODULE') self.clear_setting('SIDE_MODULE') create_test_file('lib_so_pre.js', ''' if (!Module['preRun']) Module['preRun'] = []; Module['preRun'].push(function() { FS.createDataFile('/', 'liblib.so', %s, true, false, false); }); ''' % str(list(bytearray(open('liblib.so', 'rb').read())))) self.emcc_args += ['--pre-js', 'lib_so_pre.js'] def build_dlfcn_lib(self, lib_src, dirname, filename): if self.get_setting('WASM'): # emcc emits a wasm in this case self.build(lib_src, dirname, filename, js_outfile=False) shutil.move(filename + '.o.wasm', os.path.join(dirname, 'liblib.so')) else: self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) @needs_dlfcn def test_dlfcn_missing(self): self.set_setting('MAIN_MODULE', 1) if self.has_changed_setting('ASSERTIONS'): self.skipTest('test needs to customize ASSERTIONS') self.set_setting('ASSERTIONS', 1) src = r''' #include <dlfcn.h> #include <stdio.h> #include <assert.h> int main() { void* lib_handle = dlopen("libfoo.so", RTLD_NOW); assert(!lib_handle); printf("error: %s\n", dlerror()); return 0; } ''' self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: No such file or directory') print('without assertions, the error is less clear') self.set_setting('ASSERTIONS', 0) self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: FS error') @needs_dlfcn def test_dlfcn_basic(self): self.prep_dlfcn_lib() lib_src = ''' #include <cstdio> class Foo { public: Foo() { puts("Constructing lib object."); } }; Foo global; ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = ''' #include <cstdio> #include <dlfcn.h> class Bar { public: Bar() { puts("Constructing main object."); } }; Bar global; int main() { dlopen("liblib.so", RTLD_NOW); return 0; } ''' self.do_run(src, 'Constructing main object.\nConstructing lib object.\n') @needs_dlfcn def test_dlfcn_i64(self): self.prep_dlfcn_lib() self.set_setting('EXPORTED_FUNCTIONS', ['_foo']) lib_src = ''' int foo(int x) { return (long long)x / (long long)1234; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() self.clear_setting('EXPORTED_FUNCTIONS') src = r''' #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> typedef int (*intfunc)(int); void *p; int main() { p = malloc(1024); void *lib_handle = dlopen("liblib.so", 0); if (!lib_handle) { puts(dlerror()); abort(); } printf("dll handle: %p\n", lib_handle); intfunc x = (intfunc)dlsym(lib_handle, "foo"); printf("foo func handle: %p\n", x); if (p == 0) return 1; if (!x) { printf("dlsym failed: %s\n", dlerror()); return 1; } printf("|%d|\n", x(81234567)); return 0; } ''' self.do_run(src, '|65830|') @needs_dlfcn @no_wasm('EM_ASM in shared wasm modules, stored inside the wasm somehow') def test_dlfcn_em_asm(self): self.prep_dlfcn_lib() lib_src = ''' #include <emscripten.h> class Foo { public: Foo() { EM_ASM( out("Constructing lib object.") ); } }; Foo global; ''' filename = 'liblib.cpp' self.build_dlfcn_lib(lib_src, self.get_dir(), filename) self.prep_dlfcn_main() src = ''' #include <emscripten.h> #include <dlfcn.h> class Bar { public: Bar() { EM_ASM( out("Constructing main object.") ); } }; Bar global; int main() { dlopen("liblib.so", RTLD_NOW); EM_ASM( out("All done.") ); return 0; } ''' self.do_run(src, 'Constructing main object.\nConstructing lib object.\nAll done.\n') @needs_dlfcn def test_dlfcn_qsort(self): self.prep_dlfcn_lib() self.set_setting('EXPORTED_FUNCTIONS', ['_get_cmp']) lib_src = ''' int lib_cmp(const void* left, const void* right) { const int* a = (const int*) left; const int* b = (const int*) right; if(*a > *b) return 1; else if(*a == *b) return 0; else return -1; } typedef int (*CMP_TYPE)(const void*, const void*); extern "C" CMP_TYPE get_cmp() { return lib_cmp; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc']) src = ''' #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> typedef int (*CMP_TYPE)(const void*, const void*); int main_cmp(const void* left, const void* right) { const int* a = (const int*) left; const int* b = (const int*) right; if(*a < *b) return 1; else if(*a == *b) return 0; else return -1; } int main() { void* lib_handle; CMP_TYPE (*getter_ptr)(); CMP_TYPE lib_cmp_ptr; int arr[5] = {4, 2, 5, 1, 3}; qsort((void*)arr, 5, sizeof(int), main_cmp); printf("Sort with main comparison: "); for (int i = 0; i < 5; i++) { printf("%d ", arr[i]); } printf("\\n"); lib_handle = dlopen("liblib.so", RTLD_NOW); if (lib_handle == NULL) { printf("Could not load lib.\\n"); return 1; } getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp"); if (getter_ptr == NULL) { printf("Could not find func.\\n"); return 1; } lib_cmp_ptr = getter_ptr(); qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr); printf("Sort with lib comparison: "); for (int i = 0; i < 5; i++) { printf("%d ", arr[i]); } printf("\\n"); return 0; } ''' self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *', output_nicerizer=lambda x, err: x.replace('\n', '*')) if self.get_setting('ASM_JS') and SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]) and not self.is_wasm(): out = run_js('liblib.so', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT) if 'asm' in out: self.validate_asmjs(out) @needs_dlfcn def test_dlfcn_data_and_fptr(self): # Failing under v8 since: https://chromium-review.googlesource.com/712595 if self.is_wasm(): self.banned_js_engines = [V8_ENGINE] self.prep_dlfcn_lib() lib_src = r''' #include <stdio.h> int theglobal = 42; extern void parent_func(); // a function that is defined in the parent int* lib_get_global_addr() { return &theglobal; } void lib_fptr() { printf("Second calling lib_fptr from main.\n"); parent_func(); // call it also through a pointer, to check indexizing void (*p_f)(); p_f = parent_func; p_f(); } extern "C" void (*func(int x, void(*fptr)()))() { printf("In func: %d\n", x); fptr(); return lib_fptr; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.set_setting('EXPORTED_FUNCTIONS', ['_func']) self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = r''' #include <stdio.h> #include <dlfcn.h> #include <emscripten.h> typedef void (*FUNCTYPE(int, void(*)()))(); FUNCTYPE func; void EMSCRIPTEN_KEEPALIVE parent_func() { printf("parent_func called from child\n"); } void main_fptr() { printf("First calling main_fptr from lib.\n"); } int main() { void* lib_handle; FUNCTYPE* func_fptr; // Test basic lib loading. lib_handle = dlopen("liblib.so", RTLD_NOW); if (lib_handle == NULL) { printf("Could not load lib.\n"); return 1; } // Test looked up function. func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func"); // Load twice to test cache. func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func"); if (func_fptr == NULL) { printf("Could not find func.\n"); return 1; } // Test passing function pointers across module bounds. void (*fptr)() = func_fptr(13, main_fptr); fptr(); // Test global data. int* globaladdr = (int*) dlsym(lib_handle, "theglobal"); if (globaladdr == NULL) { printf("Could not find global.\n"); return 1; } printf("Var: %d\n", *globaladdr); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main']) self.do_run(src, '''\ In func: 13 First calling main_fptr from lib. Second calling lib_fptr from main. parent_func called from child parent_func called from child Var: 42 ''') @needs_dlfcn def test_dlfcn_varargs(self): # this test is not actually valid - it fails natively. the child should fail # to be loaded, not load and successfully see the parent print_ints func self.prep_dlfcn_lib() lib_src = r''' void print_ints(int n, ...); extern "C" void func() { print_ints(2, 13, 42); } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.set_setting('EXPORTED_FUNCTIONS', ['_func']) self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = r''' #include <stdarg.h> #include <stdio.h> #include <dlfcn.h> #include <assert.h> void print_ints(int n, ...) { va_list args; va_start(args, n); for (int i = 0; i < n; i++) { printf("%d\n", va_arg(args, int)); } va_end(args); } int main() { void* lib_handle; void (*fptr)(); print_ints(2, 100, 200); lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle); fptr = (void (*)())dlsym(lib_handle, "func"); fptr(); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main']) self.do_run(src, '100\n200\n13\n42\n') @needs_dlfcn def test_dlfcn_alignment_and_zeroing(self): self.prep_dlfcn_lib() self.set_setting('INITIAL_MEMORY', 16 * 1024 * 1024) lib_src = r''' extern "C" { int prezero = 0; __attribute__((aligned(1024))) int superAligned = 12345; int postzero = 0; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build_dlfcn_lib(lib_src, dirname, filename) for i in range(10): curr = '%d.so' % i shutil.copyfile('liblib.so', curr) self.emcc_args += ['--embed-file', curr] self.prep_dlfcn_main() self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024) src = r''' #include <stdio.h> #include <stdlib.h> #include <string.h> #include <dlfcn.h> #include <assert.h> #include <emscripten.h> int main() { printf("'prepare' memory with non-zero inited stuff\n"); int num = 120 * 1024 * 1024; // total is 128; we'll use 5*5 = 25 at least, so allocate pretty much all of it void* mem = malloc(num); assert(mem); printf("setting this range to non-zero: %d - %d\n", int(mem), int(mem) + num); memset(mem, 1, num); EM_ASM({ var value = HEAP8[64*1024*1024]; out('verify middle of memory is non-zero: ' + value); assert(value === 1); }); free(mem); for (int i = 0; i < 10; i++) { char curr[] = "?.so"; curr[0] = '0' + i; printf("loading %s\n", curr); void* lib_handle = dlopen(curr, RTLD_NOW); if (!lib_handle) { puts(dlerror()); assert(0); } printf("getting superAligned\n"); int* superAligned = (int*)dlsym(lib_handle, "superAligned"); assert(superAligned); assert(int(superAligned) % 1024 == 0); // alignment printf("checking value of superAligned, at %d\n", superAligned); assert(*superAligned == 12345); // value printf("getting prezero\n"); int* prezero = (int*)dlsym(lib_handle, "prezero"); assert(prezero); printf("checking value of prezero, at %d\n", prezero); assert(*prezero == 0); *prezero = 1; assert(*prezero != 0); printf("getting postzero\n"); int* postzero = (int*)dlsym(lib_handle, "postzero"); printf("checking value of postzero, at %d\n", postzero); assert(postzero); printf("checking value of postzero\n"); assert(*postzero == 0); *postzero = 1; assert(*postzero != 0); } printf("success.\n"); return 0; } ''' self.do_run(src, 'success.\n') @needs_dlfcn def test_dlfcn_self(self): self.set_setting('MAIN_MODULE') self.set_setting('EXPORT_ALL') # TODO(https://github.com/emscripten-core/emscripten/issues/11121) # We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove # this if this issues is fixed. self.emcc_args.append('-nostdlib++') def post(filename): js = open(filename).read() start = js.find('var NAMED_GLOBALS') first = js.find('{', start) last = js.find('}', start) exports = js[first + 1:last] exports = exports.split(',') # ensure there aren't too many globals; we don't want unnamed_addr exports = [e.split(':')[0].strip('"') for e in exports] exports.sort() self.assertGreater(len(exports), 20) # wasm backend includes alias in NAMED_GLOBALS if self.is_wasm_backend(): self.assertLess(len(exports), 56) else: self.assertLess(len(exports), 33) self.do_run_in_out_file_test('tests', 'core', 'test_dlfcn_self', post_build=post) @needs_dlfcn def test_dlfcn_unique_sig(self): self.prep_dlfcn_lib() lib_src = ''' #include <stdio.h> int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) { return 13; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = ''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int); int main() { void *lib_handle; FUNCTYPE func_ptr; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc"); assert(func_ptr != NULL); assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13); puts("success"); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc']) self.do_run(src, 'success', force_c=True) @needs_dlfcn def test_dlfcn_info(self): self.prep_dlfcn_lib() lib_src = ''' #include <stdio.h> int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) { return 13; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = ''' #include <assert.h> #include <stdio.h> #include <string.h> #include <dlfcn.h> typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int); int main() { void *lib_handle; FUNCTYPE func_ptr; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc"); assert(func_ptr != NULL); assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13); /* Verify that we don't corrupt func_ptr when calling dladdr. */ Dl_info info; memset(&info, 0, sizeof(info)); dladdr(func_ptr, &info); assert(func_ptr != NULL); assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13); /* Verify something useful lives in info. */ assert(info.dli_fname != NULL); assert(info.dli_fbase == NULL); assert(info.dli_sname == NULL); assert(info.dli_saddr == NULL); puts("success"); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc']) self.do_run(src, 'success', force_c=True) @needs_dlfcn def test_dlfcn_stacks(self): self.prep_dlfcn_lib() lib_src = ''' #include <assert.h> #include <stdio.h> #include <string.h> int myfunc(const char *input) { char bigstack[1024] = { 0 }; // make sure we didn't just trample the stack! assert(!strcmp(input, "foobar")); snprintf(bigstack, sizeof(bigstack), input); return strlen(bigstack); } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = ''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> #include <string.h> typedef int (*FUNCTYPE)(const char *); int main() { void *lib_handle; FUNCTYPE func_ptr; char str[128]; snprintf(str, sizeof(str), "foobar"); // HACK: Use strcmp in the main executable so that it doesn't get optimized out and the dynamic library // is able to use it. assert(!strcmp(str, "foobar")); lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc"); assert(func_ptr != NULL); assert(func_ptr(str) == 6); puts("success"); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_strcmp']) self.do_run(src, 'success', force_c=True) @needs_dlfcn def test_dlfcn_funcs(self): self.prep_dlfcn_lib() lib_src = r''' #include <assert.h> #include <stdio.h> #include <string.h> typedef void (*voidfunc)(); typedef void (*intfunc)(int); void callvoid(voidfunc f) { f(); } void callint(voidfunc f, int x) { f(x); } void void_0() { printf("void 0\n"); } void void_1() { printf("void 1\n"); } voidfunc getvoid(int i) { switch(i) { case 0: return void_0; case 1: return void_1; default: return NULL; } } void int_0(int x) { printf("int 0 %d\n", x); } void int_1(int x) { printf("int 1 %d\n", x); } intfunc getint(int i) { switch(i) { case 0: return int_0; case 1: return int_1; default: return NULL; } } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_callvoid', '_callint', '_getvoid', '_getint']) dirname = self.get_dir() self.build_dlfcn_lib(lib_src, dirname, os.path.join(dirname, 'liblib.c')) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef void (*voidfunc)(); typedef void (*intfunc)(int); typedef void (*voidcaller)(voidfunc); typedef void (*intcaller)(intfunc, int); typedef voidfunc (*voidgetter)(int); typedef intfunc (*intgetter)(int); void void_main() { printf("void_main.\n"); } void int_main(int x) { printf("int_main %d\n", x); } int main() { printf("go\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); voidcaller callvoid = (voidcaller)dlsym(lib_handle, "callvoid"); assert(callvoid != NULL); callvoid(void_main); intcaller callint = (intcaller)dlsym(lib_handle, "callint"); assert(callint != NULL); callint(int_main, 201); voidgetter getvoid = (voidgetter)dlsym(lib_handle, "getvoid"); assert(getvoid != NULL); callvoid(getvoid(0)); callvoid(getvoid(1)); intgetter getint = (intgetter)dlsym(lib_handle, "getint"); assert(getint != NULL); callint(getint(0), 54); callint(getint(1), 9000); assert(getint(1000) == NULL); puts("ok"); return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc']) self.do_run(src, '''go void_main. int_main 201 void 0 void 1 int 0 54 int 1 9000 ok ''', force_c=True) @needs_dlfcn def test_dlfcn_mallocs(self): # will be exhausted without functional malloc/free self.set_setting('INITIAL_MEMORY', 64 * 1024 * 1024) self.prep_dlfcn_lib() lib_src = r''' #include <assert.h> #include <stdio.h> #include <string.h> #include <stdlib.h> void *mallocproxy(int n) { return malloc(n); } void freeproxy(void *p) { free(p); } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_mallocproxy', '_freeproxy']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = open(path_from_root('tests', 'dlmalloc_proxy.c')).read() self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free']) self.do_run(src, '''*294,153*''', force_c=True) @needs_dlfcn def test_dlfcn_longjmp(self): self.prep_dlfcn_lib() lib_src = r''' #include <setjmp.h> #include <stdio.h> void jumpy(jmp_buf buf) { static int i = 0; i++; if (i == 10) longjmp(buf, i); printf("pre %d\n", i); } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_jumpy']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> #include <setjmp.h> typedef void (*jumpfunc)(jmp_buf); int main() { printf("go!\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); jumpfunc jumpy = (jumpfunc)dlsym(lib_handle, "jumpy"); assert(jumpy); jmp_buf buf; int jmpval = setjmp(buf); if (jmpval == 0) { while (1) jumpy(buf); } else { printf("out!\n"); } return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free']) self.do_run(src, '''go! pre 1 pre 2 pre 3 pre 4 pre 5 pre 6 pre 7 pre 8 pre 9 out! ''', force_c=True) # TODO: make this work. need to forward tempRet0 across modules # TODO Enable @with_both_exception_handling (the test is not working now) @needs_dlfcn def zzztest_dlfcn_exceptions(self): self.set_setting('DISABLE_EXCEPTION_CATCHING', 0) self.prep_dlfcn_lib() lib_src = r''' extern "C" { int ok() { return 65; } int fail() { throw 123; } } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_ok', '_fail']) dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef int (*intfunc)(); int main() { printf("go!\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); intfunc okk = (intfunc)dlsym(lib_handle, "ok"); intfunc faill = (intfunc)dlsym(lib_handle, "fail"); assert(okk && faill); try { printf("ok: %d\n", okk()); } catch(...) { printf("wha\n"); } try { printf("fail: %d\n", faill()); } catch(int x) { printf("int %d\n", x); } try { printf("fail: %d\n", faill()); } catch(double x) { printf("caught %f\n", x); } return 0; } ''' self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free']) self.do_run(src, '''go! ok: 65 int 123 ok ''') @needs_dlfcn def test_dlfcn_handle_alloc(self): # verify that dlopen does not allocate already used handles dirname = self.get_dir() def indir(name): return os.path.join(dirname, name) libecho = r''' #include <stdio.h> static struct %(libname)s { %(libname)s() { puts("%(libname)s: loaded"); } } _; ''' self.prep_dlfcn_lib() self.build_dlfcn_lib(libecho % {'libname': 'a'}, dirname, indir('a.cpp')) shutil.move(indir('liblib.so'), indir('liba.so')) self.build_dlfcn_lib(libecho % {'libname': 'b'}, dirname, indir('b.cpp')) shutil.move(indir('liblib.so'), indir('libb.so')) self.set_setting('MAIN_MODULE') self.clear_setting('SIDE_MODULE') self.set_setting('EXPORT_ALL') self.emcc_args += ['--embed-file', '.@/'] # XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024) src = r''' #include <dlfcn.h> #include <assert.h> #include <stddef.h> int main() { void *liba, *libb, *liba2; int err; liba = dlopen("liba.so", RTLD_NOW); assert(liba != NULL); libb = dlopen("libb.so", RTLD_NOW); assert(liba != NULL); err = dlclose(liba); assert(!err); liba2 = dlopen("liba.so", RTLD_NOW); assert(liba2 != libb); return 0; } ''' self.do_run(src, 'a: loaded\nb: loaded\na: loaded\n') @needs_dlfcn @bleeding_edge_wasm_backend def test_dlfcn_feature_in_lib(self): self.emcc_args.append('-mnontrapping-fptoint') self.prep_dlfcn_lib() lib_src = r''' extern "C" int magic(float x) { return __builtin_wasm_trunc_saturate_s_i32_f32(x); } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build_dlfcn_lib(lib_src, dirname, filename) self.prep_dlfcn_main() src = r''' #include <dlfcn.h> #include <stdio.h> #include <stdlib.h> typedef int (*fi)(float); int main() { void *lib_handle = dlopen("liblib.so", 0); if (!lib_handle) { puts(dlerror()); abort(); } fi x = (fi)dlsym(lib_handle, "magic"); if (!x) { puts(dlerror()); abort(); } printf("float: %d.\n", x(42.99)); return 0; } ''' self.do_run(src, 'float: 42.\n') def dylink_test(self, main, side, expected=None, header=None, main_emcc_args=[], force_c=False, need_reverse=True, auto_load=True, **kwargs): if header: create_test_file('header.h', header) old_args = self.emcc_args[:] # side settings self.clear_setting('MAIN_MODULE') self.set_setting('SIDE_MODULE') side_suffix = 'wasm' if self.is_wasm() else 'js' if isinstance(side, list): # side is just a library try_delete('liblib.cpp.o.' + side_suffix) run_process([EMCC] + side + self.get_emcc_args() + ['-o', os.path.join(self.get_dir(), 'liblib.cpp.o.' + side_suffix)]) else: base = 'liblib.cpp' if not force_c else 'liblib.c' try_delete(base + '.o.' + side_suffix) self.build(side, self.get_dir(), base, js_outfile=(side_suffix == 'js')) if force_c: shutil.move(base + '.o.' + side_suffix, 'liblib.cpp.o.' + side_suffix) if SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]) and not self.is_wasm(): out = run_js('liblib.cpp.o.js', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT) if 'asm' in out: self.validate_asmjs(out) shutil.move('liblib.cpp.o.' + side_suffix, 'liblib.so') # main settings self.set_setting('MAIN_MODULE') self.clear_setting('SIDE_MODULE') if auto_load: self.set_setting('RUNTIME_LINKED_LIBS', ['liblib.so']) self.emcc_args += main_emcc_args if isinstance(main, list): # main is just a library try_delete('src.cpp.o.js') run_process([EMCC] + main + self.emcc_args + self.serialize_settings() + ['-o', 'src.cpp.o.js']) self.do_run(None, expected, no_build=True, **kwargs) else: self.do_run(main, expected, force_c=force_c, **kwargs) self.emcc_args = old_args if need_reverse: # test the reverse as well print('flip') self.dylink_test(side, main, expected, header, main_emcc_args + ['--no-entry'], force_c, need_reverse=False, **kwargs) def do_basic_dylink_test(self, need_reverse=True): self.dylink_test(r''' #include <stdio.h> #include "header.h" int main() { printf("other says %d.\n", sidey()); return 0; } ''', ''' #include "header.h" int sidey() { return 11; } ''', 'other says 11.', 'extern "C" int sidey();', need_reverse=need_reverse) @needs_dlfcn def test_dylink_basics(self): self.do_basic_dylink_test() @needs_dlfcn def test_dylink_no_export(self): self.set_setting('NO_DECLARE_ASM_MODULE_EXPORTS') self.do_basic_dylink_test() @needs_dlfcn def test_dylink_memory_growth(self): if not self.is_wasm(): self.skipTest('wasm only') self.set_setting('ALLOW_MEMORY_GROWTH', 1) self.do_basic_dylink_test() @needs_dlfcn def test_dylink_safe_heap(self): self.set_setting('SAFE_HEAP', 1) self.do_basic_dylink_test() @needs_dlfcn def test_dylink_function_pointer_equality(self): self.dylink_test(r''' #include <stdio.h> #include "header.h" int main() { void* puts_side = get_address(); printf("main module address %p.\n", &puts); printf("side module address address %p.\n", puts_side); if (&puts == puts_side) printf("success\n"); else printf("failure\n"); return 0; } ''', ''' #include <stdio.h> #include "header.h" void* get_address() { return (void*)&puts; } ''', 'success', header='extern "C" void* get_address();') @needs_dlfcn def test_dylink_floats(self): self.dylink_test(r''' #include <stdio.h> extern float sidey(); int main() { printf("other says %.2f.\n", sidey()+1); return 0; } ''', ''' float sidey() { return 11.5; } ''', 'other says 12.50') @needs_dlfcn def test_dylink_printfs(self): self.dylink_test(r''' #include <stdio.h> extern "C" void sidey(); int main() { printf("hello from main\n"); sidey(); return 0; } ''', r''' #include <stdio.h> extern "C" void sidey() { printf("hello from side\n"); } ''', 'hello from main\nhello from side\n') # Verify that a function pointer can be passed back and forth and invoked # on both sides. @needs_dlfcn def test_dylink_funcpointer(self): self.dylink_test( main=r''' #include <stdio.h> #include <assert.h> #include "header.h" intfunc sidey(intfunc f); void a(int arg) { printf("hello from funcptr: %d\n", arg); } int main() { intfunc b = sidey(a); assert(a == b); b(0); return 0; } ''', side=''' #include "header.h" intfunc sidey(intfunc f) { f(1); return f; } ''', expected='hello from funcptr: 1\nhello from funcptr: 0\n', header='typedef void (*intfunc)(int );') @needs_dlfcn # test dynamic linking of a module with multiple function pointers, stored # statically def test_dylink_static_funcpointers(self): self.dylink_test( main=r''' #include <stdio.h> #include "header.h" void areturn0() { printf("hello 0\n"); } void areturn1() { printf("hello 1\n"); } void areturn2() { printf("hello 2\n"); } voidfunc func_ptrs[3] = { areturn0, areturn1, areturn2 }; int main(int argc, char **argv) { sidey(func_ptrs[0]); sidey(func_ptrs[1]); sidey(func_ptrs[2]); return 0; } ''', side=''' #include "header.h" void sidey(voidfunc f) { f(); } ''', expected='hello 0\nhello 1\nhello 2\n', header='typedef void (*voidfunc)(); void sidey(voidfunc f);') @no_wasm('uses function tables in an asm.js specific way') @needs_dlfcn def test_dylink_asmjs_funcpointers(self): self.dylink_test( main=r''' #include "header.h" #include <emscripten.h> void left1() { printf("left1\n"); } void left2() { printf("left2\n"); } voidfunc getleft1() { return left1; } voidfunc getleft2() { return left2; } int main(int argc, char **argv) { printf("main\n"); EM_ASM({ // make the function table sizes a non-power-of-two var newSize = alignFunctionTables(); //out('old size of function tables: ' + newSize); while ((newSize & 3) !== 3) { Module['FUNCTION_TABLE_v'].push(0); newSize = alignFunctionTables(); } //out('new size of function tables: ' + newSize); // when masked, the two function pointers 1 and 2 should not happen to fall back to the right place assert(((newSize+1) & 3) !== 1 || ((newSize+2) & 3) !== 2); loadDynamicLibrary('liblib.so'); }); volatilevoidfunc f; f = (volatilevoidfunc)left1; f(); f = (volatilevoidfunc)left2; f(); f = (volatilevoidfunc)getright1(); f(); f = (volatilevoidfunc)getright2(); f(); second(); return 0; } ''', side=r''' #include "header.h" void right1() { printf("right1\n"); } void right2() { printf("right2\n"); } voidfunc getright1() { return right1; } voidfunc getright2() { return right2; } void second() { printf("second\n"); volatilevoidfunc f; f = (volatilevoidfunc)getleft1(); f(); f = (volatilevoidfunc)getleft2(); f(); f = (volatilevoidfunc)right1; f(); f = (volatilevoidfunc)right2; f(); } ''', expected='main\nleft1\nleft2\nright1\nright2\nsecond\nleft1\nleft2\nright1\nright2\n', header=''' #include <stdio.h> typedef void (*voidfunc)(); typedef volatile voidfunc volatilevoidfunc; voidfunc getleft1(); voidfunc getleft2(); voidfunc getright1(); voidfunc getright2(); void second(); ''', need_reverse=False, auto_load=False) @needs_dlfcn def test_dylink_funcpointers_wrapper(self): self.dylink_test( main=r'''\ #include <stdio.h> #include "header.h" int main(int argc, char **argv) { charfunc f1 = emscripten_run_script; f1("out('one')"); charfunc f2 = get(); f2("out('two')"); return 0; } ''', side='''\ #include "header.h" charfunc get() { return emscripten_run_script; } ''', expected='one\ntwo\n', header='''\ #include <emscripten.h> typedef void (*charfunc)(const char*); extern charfunc get(); ''') @needs_dlfcn def test_dylink_static_funcpointer_float(self): self.dylink_test( main=r'''\ #include <stdio.h> #include "header.h" int sidey(floatfunc f); float func1(float f) { printf("hello 1: %f\n", f); return 0; } floatfunc f1 = &func1; int main(int argc, char **argv) { printf("got: %d\n", sidey(f1)); f1(12.34); return 0; } ''', side='''\ #include "header.h" int sidey(floatfunc f) { f(56.78); return 1; } ''', expected='hello 1: 56.779999\ngot: 1\nhello 1: 12.340000\n', header='typedef float (*floatfunc)(float);') @needs_dlfcn def test_dylink_global_init(self): self.dylink_test(r''' #include <stdio.h> struct Class { Class() { printf("a new Class\n"); } }; static Class c; int main() { return 0; } ''', r''' void nothing() {} ''', 'a new Class\n') @needs_dlfcn def test_dylink_global_inits(self): def test(): self.dylink_test(header=r''' #include <stdio.h> struct Class { Class(const char *name) { printf("new %s\n", name); } }; ''', main=r''' #include "header.h" static Class c("main"); int main() { return 0; } ''', side=r''' #include "header.h" static Class c("side"); ''', expected=['new main\nnew side\n', 'new side\nnew main\n']) test() # TODO: this in wasm if self.get_setting('ASSERTIONS') == 1 and not self.is_wasm(): print('check warnings') self.set_setting('ASSERTIONS', 2) test() full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT) self.assertNotContained("trying to dynamically load symbol '__ZN5ClassC2EPKc' (from 'liblib.so') that already exists", full) @needs_dlfcn def test_dylink_i64(self): self.dylink_test(r''' #include <stdio.h> #include <stdint.h> extern int64_t sidey(); int main() { printf("other says %llx.\n", sidey()); return 0; } ''', ''' #include <stdint.h> int64_t sidey() { volatile int64_t x = 11; x = x * x * x * x; x += x % 17; x += (x * (1 << 30)); x -= 96; x = (x + 1000) / ((x % 5) + 1); volatile uint64_t y = x / 2; x = y / 3; y = y * y * y * y; y += y % 17; y += (y * (1 << 30)); y -= 121; y = (y + 1000) / ((y % 5) + 1); x += y; return x; } ''', 'other says 175a1ddee82b8c31.') @all_engines @needs_dlfcn def test_dylink_i64_b(self): self.dylink_test(r''' #include <stdio.h> #include <stdint.h> extern int64_t sidey(); int64_t testAdd(int64_t a) { return a + 1; } int64_t testAddB(int a) { return a + 1; } typedef int64_t (*testAddHandler)(int64_t); testAddHandler h = &testAdd; typedef int64_t (*testAddBHandler)(int); testAddBHandler hb = &testAddB; int main() { printf("other says %lld.\n", sidey()); int64_t r = h(42); printf("my fp says: %lld.\n", r); int64_t rb = hb(42); printf("my second fp says: %lld.\n", r); } ''', ''' #include <stdint.h> int64_t sidey() { volatile int64_t x = 0x12345678abcdef12LL; x += x % 17; x = 18 - x; return x; } ''', 'other says -1311768467750121224.\nmy fp says: 43.\nmy second fp says: 43.') @needs_dlfcn @also_with_wasm_bigint def test_dylink_i64_c(self): self.dylink_test(r''' #include <cstdio> #include <cinttypes> #include "header.h" typedef int32_t (*fp_type_32)(int32_t, int32_t, int32_t); typedef int64_t (*fp_type_64)(int32_t, int32_t, int32_t); int32_t internal_function_ret_32(int32_t i, int32_t j, int32_t k) { return 32; } int64_t internal_function_ret_64(int32_t i, int32_t j, int32_t k) { return 64; } int main() { fp_type_32 fp32_internal = &internal_function_ret_32; fp_type_32 fp32_external = &function_ret_32; fp_type_64 fp64_external = &function_ret_64; fp_type_64 fp64_internal = &internal_function_ret_64; int32_t ires32 = fp32_internal(0,0,0); printf("res32 - internal %d\n",ires32); int32_t eres32 = fp32_external(0,0,0); printf("res32 - external %d\n",eres32); int64_t ires64 = fp64_internal(0,0,0); printf("res64 - internal %" PRId64 "\n",ires64); int64_t eres64 = fp64_external(0,0,0); printf("res64 - external %" PRId64 "\n",eres64); return 0; } ''', ''' #include "header.h" int32_t function_ret_32(int32_t i, int32_t j, int32_t k) { return 32; } int64_t function_ret_64(int32_t i, int32_t j, int32_t k) { return 64; } ''', '''res32 - internal 32 res32 - external 32 res64 - internal 64 res64 - external 64\n''', header=''' #include <emscripten.h> #include <cstdint> EMSCRIPTEN_KEEPALIVE int32_t function_ret_32(int32_t i, int32_t j, int32_t k); EMSCRIPTEN_KEEPALIVE int64_t function_ret_64(int32_t i, int32_t j, int32_t k); ''') @needs_dlfcn def test_dylink_class(self): self.dylink_test(header=r''' #include <stdio.h> struct Class { Class(const char *name); }; ''', main=r''' #include "header.h" int main() { Class c("main"); return 0; } ''', side=r''' #include "header.h" Class::Class(const char *name) { printf("new %s\n", name); } ''', expected=['new main\n']) @needs_dlfcn def test_dylink_global_var(self): self.dylink_test(main=r''' #include <stdio.h> extern int x; int main() { printf("extern is %d.\n", x); return 0; } ''', side=r''' int x = 123; ''', expected=['extern is 123.\n']) @needs_dlfcn def test_dylink_global_var_modded(self): self.dylink_test(main=r''' #include <stdio.h> extern int x; int main() { printf("extern is %d.\n", x); return 0; } ''', side=r''' int x = 123; struct Initter { Initter() { x = 456; } }; Initter initter; ''', expected=['extern is 456.\n']) @needs_dlfcn def test_dylink_stdlib(self): self.dylink_test(header=r''' #include <math.h> #include <stdlib.h> #include <string.h> char *side(const char *data); double pow_two(double x); ''', main=r''' #include <stdio.h> #include "header.h" int main() { char *temp = side("hello through side\n"); char *ret = (char*)malloc(strlen(temp)+1); strcpy(ret, temp); temp[1] = 'x'; puts(ret); printf("pow_two: %d.\n", int(pow_two(5.9))); return 0; } ''', side=r''' #include "header.h" char *side(const char *data) { char *ret = (char*)malloc(strlen(data)+1); strcpy(ret, data); return ret; } double pow_two(double x) { return pow(2, x); } ''', expected=['hello through side\n\npow_two: 59.']) @needs_dlfcn def test_dylink_jslib(self): create_test_file('lib.js', r''' mergeInto(LibraryManager.library, { test_lib_func: function(x) { return x + 17.2; } }); ''') self.dylink_test(header=r''' extern "C" { extern double test_lib_func(int input); } ''', main=r''' #include <stdio.h> #include "header.h" extern double sidey(); int main2() { return 11; } int main() { int input = sidey(); double temp = test_lib_func(input); printf("other says %.2f\n", temp); printf("more: %.5f, %d\n", temp, input); return 0; } ''', side=r''' #include <stdio.h> #include "header.h" extern int main2(); double sidey() { int temp = main2(); printf("main2 sed: %d\n", temp); printf("main2 sed: %u, %c\n", temp, temp/2); return test_lib_func(temp); } ''', expected='other says 45.2', main_emcc_args=['--js-library', 'lib.js']) @needs_dlfcn def test_dylink_global_var_jslib(self): create_test_file('lib.js', r''' mergeInto(LibraryManager.library, { jslib_x: '{{{ makeStaticAlloc(4) }}}', jslib_x__postset: 'HEAP32[_jslib_x>>2] = 148;', }); ''') self.dylink_test(main=r''' #include <stdio.h> extern "C" int jslib_x; extern void call_side(); int main() { printf("main: jslib_x is %d.\n", jslib_x); call_side(); return 0; } ''', side=r''' #include <stdio.h> extern "C" int jslib_x; void call_side() { printf("side: jslib_x is %d.\n", jslib_x); } ''', expected=['main: jslib_x is 148.\nside: jslib_x is 148.\n'], main_emcc_args=['--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=["_main", "_jslib_x"]']) @needs_dlfcn def test_dylink_many_postsets(self): NUM = 1234 self.dylink_test(header=r''' #include <stdio.h> typedef void (*voidfunc)(); static void simple() { printf("simple.\n"); } static volatile voidfunc funcs[''' + str(NUM) + '] = { ' + ','.join(['simple'] * NUM) + r''' }; static void test() { volatile int i = ''' + str(NUM - 1) + r'''; funcs[i](); i = 0; funcs[i](); } extern void more(); ''', main=r''' #include "header.h" int main() { test(); more(); return 0; } ''', side=r''' #include "header.h" void more() { test(); } ''', expected=['simple.\nsimple.\nsimple.\nsimple.\n']) @needs_dlfcn def test_dylink_postsets_chunking(self): self.dylink_test(header=r''' extern int global_var; ''', main=r''' #include <stdio.h> #include "header.h" // prepare 99 global variable with local initializer static int p = 1; #define P(x) __attribute__((used)) int *padding##x = &p; P(01) P(02) P(03) P(04) P(05) P(06) P(07) P(08) P(09) P(10) P(11) P(12) P(13) P(14) P(15) P(16) P(17) P(18) P(19) P(20) P(21) P(22) P(23) P(24) P(25) P(26) P(27) P(28) P(29) P(30) P(31) P(32) P(33) P(34) P(35) P(36) P(37) P(38) P(39) P(40) P(41) P(42) P(43) P(44) P(45) P(46) P(47) P(48) P(49) P(50) P(51) P(52) P(53) P(54) P(55) P(56) P(57) P(58) P(59) P(60) P(61) P(62) P(63) P(64) P(65) P(66) P(67) P(68) P(69) P(70) P(71) P(72) P(73) P(74) P(75) P(76) P(77) P(78) P(79) P(80) P(81) P(82) P(83) P(84) P(85) P(86) P(87) P(88) P(89) P(90) P(91) P(92) P(93) P(94) P(95) P(96) P(97) P(98) P(99) // prepare global variable with global initializer int *ptr = &global_var; int main(int argc, char *argv[]) { printf("%d\n", *ptr); } ''', side=r''' #include "header.h" int global_var = 12345; ''', expected=['12345\n']) @needs_dlfcn def test_dylink_syslibs(self): # one module uses libcxx, need to force its inclusion when it isn't the main # https://github.com/emscripten-core/emscripten/issues/10571 return self.skipTest('Currently not working due to duplicate symbol errors in wasm-ld') def test(syslibs, expect_pass=True, need_reverse=True): print('syslibs', syslibs, self.get_setting('ASSERTIONS')) passed = True try: with env_modify({'EMCC_FORCE_STDLIBS': syslibs}): self.dylink_test(main=r''' void side(); int main() { side(); return 0; } ''', side=r''' #include <iostream> void side() { std::cout << "cout hello from side\n"; } ''', expected=['cout hello from side\n'], need_reverse=need_reverse, assert_returncode=None) except Exception as e: if expect_pass: raise print('(seeing expected fail)') passed = False assertion = 'build the MAIN_MODULE with EMCC_FORCE_STDLIBS=1 in the environment' if self.get_setting('ASSERTIONS'): self.assertContained(assertion, str(e)) else: self.assertNotContained(assertion, str(e)) assert passed == expect_pass, ['saw', passed, 'but expected', expect_pass] test('libc++') test('1') if not self.has_changed_setting('ASSERTIONS'): self.set_setting('ASSERTIONS', 0) test('', expect_pass=False, need_reverse=False) self.set_setting('ASSERTIONS', 1) test('', expect_pass=False, need_reverse=False) @needs_dlfcn @with_env_modify({'EMCC_FORCE_STDLIBS': 'libc++'}) def test_dylink_iostream(self): self.dylink_test(header=r''' #include <iostream> #include <string> std::string side(); ''', main=r''' #include "header.h" int main() { std::cout << "hello from main " << side() << std::endl; return 0; } ''', side=r''' #include "header.h" std::string side() { return "and hello from side"; } ''', expected=['hello from main and hello from side\n']) @needs_dlfcn def test_dylink_dynamic_cast(self): # issue 3465 self.dylink_test(header=r''' class Base { public: virtual void printName(); }; class Derived : public Base { public: void printName(); }; ''', main=r''' #include "header.h" #include <iostream> using namespace std; int main() { cout << "starting main" << endl; Base *base = new Base(); Base *derived = new Derived(); base->printName(); derived->printName(); if (dynamic_cast<Derived*>(derived)) { cout << "OK" << endl; } else { cout << "KO" << endl; } return 0; } ''', side=r''' #include "header.h" #include <iostream> using namespace std; void Base::printName() { cout << "Base" << endl; } void Derived::printName() { cout << "Derived" << endl; } ''', expected=['starting main\nBase\nDerived\nOK']) @needs_dlfcn @with_both_exception_handling def test_dylink_raii_exceptions(self): self.dylink_test(main=r''' #include <stdio.h> extern int side(); int main() { printf("from side: %d.\n", side()); } ''', side=r''' #include <stdio.h> typedef int (*ifdi)(float, double, int); int func_with_special_sig(float a, double b, int c) { printf("special %f %f %d\n", a, b, c); return 1337; } struct DestructorCaller { ~DestructorCaller() { printf("destroy\n"); } }; int side() { // d has a destructor that must be called on function // exit, which means an invoke will be used for the // indirect call here - and the signature of that call // is special and not present in the main module, so // it must be generated for the side module. DestructorCaller d; volatile ifdi p = func_with_special_sig; return p(2.18281, 3.14159, 42); } ''', expected=['special 2.182810 3.141590 42\ndestroy\nfrom side: 1337.\n']) @needs_dlfcn @no_wasm_backend('wasm backend resolves symbols greedily on startup') def test_dylink_hyper_dupe(self): self.set_setting('INITIAL_MEMORY', 64 * 1024 * 1024) if not self.has_changed_setting('ASSERTIONS'): self.set_setting('ASSERTIONS', 2) # test hyper-dynamic linking, and test duplicate warnings create_test_file('third.cpp', r''' #include <stdio.h> int sidef() { return 36; } int sideg = 49; int bsidef() { return 536; } extern void only_in_second_1(int x); extern int second_to_third; int third_to_second = 1337; void only_in_third_0() { // note we access our own globals directly, so // it doesn't matter that overriding failed printf("only_in_third_0: %d, %d, %d\n", sidef(), sideg, second_to_third); only_in_second_1(2112); } void only_in_third_1(int x) { printf("only_in_third_1: %d, %d, %d, %d\n", sidef(), sideg, second_to_third, x); } ''') if self.is_wasm(): libname = 'third.wasm' else: libname = 'third.js' run_process([EMCC, 'third.cpp', '-o', libname, '-s', 'SIDE_MODULE', '-s', 'EXPORT_ALL'] + self.get_emcc_args()) self.dylink_test(main=r''' #include <stdio.h> #include <emscripten.h> extern int sidef(); extern int sideg; extern int bsidef(); extern int bsideg; extern void only_in_second_0(); extern void only_in_third_0(); int main() { EM_ASM({ loadDynamicLibrary('%s'); // hyper-dynamic! works at least for functions (and consts not used in same block) }); printf("sidef: %%d, sideg: %%d.\n", sidef(), sideg); printf("bsidef: %%d.\n", bsidef()); only_in_second_0(); only_in_third_0(); } ''' % libname, side=r''' #include <stdio.h> int sidef() { return 10; } // third will try to override these, but fail! int sideg = 20; extern void only_in_third_1(int x); int second_to_third = 500; extern int third_to_second; void only_in_second_0() { printf("only_in_second_0: %d, %d, %d\n", sidef(), sideg, third_to_second); only_in_third_1(1221); } void only_in_second_1(int x) { printf("only_in_second_1: %d, %d, %d, %d\n", sidef(), sideg, third_to_second, x); } ''', expected=['sidef: 10, sideg: 20.\nbsidef: 536.\nonly_in_second_0: 10, 20, 1337\nonly_in_third_1: 36, 49, 500, 1221\nonly_in_third_0: 36, 49, 500\nonly_in_second_1: 10, 20, 1337, 2112\n'], need_reverse=not self.is_wasm()) # in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO if not self.has_changed_setting('ASSERTIONS'): print('check warnings') full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT) self.assertContained("warning: symbol '_sideg' from '%s' already exists" % libname, full) @needs_dlfcn @no_wasm_backend('possible https://github.com/emscripten-core/emscripten/issues/9038') def test_dylink_dso_needed(self): def do_run(src, expected_output): self.do_run(src + 'int main() { return test_main(); }', expected_output) self._test_dylink_dso_needed(do_run) @needs_dlfcn def test_dylink_dot_a(self): # .a linking must force all .o files inside it, when in a shared module create_test_file('third.cpp', 'extern "C" int sidef() { return 36; }') create_test_file('fourth.cpp', 'extern "C" int sideg() { return 17; }') run_process([EMCC, '-c', 'third.cpp', '-o', 'third.o'] + self.get_emcc_args()) run_process([EMCC, '-c', 'fourth.cpp', '-o', 'fourth.o'] + self.get_emcc_args()) run_process([EMAR, 'rc', 'libfourth.a', 'fourth.o']) self.dylink_test(main=r''' #include <stdio.h> #include <emscripten.h> extern "C" int sidef(); extern "C" int sideg(); int main() { printf("sidef: %d, sideg: %d.\n", sidef(), sideg()); } ''', # contents of libfourth.a must be included, even if they aren't referred to! side=['libfourth.a', 'third.o'], expected=['sidef: 36, sideg: 17.\n']) @needs_dlfcn def test_dylink_spaghetti(self): self.dylink_test(main=r''' #include <stdio.h> int main_x = 72; extern int side_x; int adjust = side_x + 10; int *ptr = &side_x; struct Class { Class() { printf("main init sees %d, %d, %d.\n", adjust, *ptr, main_x); } }; Class cm; int main() { printf("main main sees %d, %d, %d.\n", adjust, *ptr, main_x); return 0; } ''', side=r''' #include <stdio.h> extern int main_x; int side_x = -534; int adjust2 = main_x + 10; int *ptr2 = &main_x; struct Class { Class() { printf("side init sees %d, %d, %d.\n", adjust2, *ptr2, side_x); } }; Class cs; ''', expected=['side init sees 82, 72, -534.\nmain init sees -524, -534, 72.\nmain main sees -524, -534, 72.', 'main init sees -524, -534, 72.\nside init sees 82, 72, -534.\nmain main sees -524, -534, 72.']) @needs_make('mingw32-make') @needs_dlfcn def test_dylink_zlib(self): self.emcc_args += ['-I' + path_from_root('tests', 'third_party', 'zlib'), '-s', 'RELOCATABLE'] zlib_archive = self.get_zlib_library() self.dylink_test(main=open(path_from_root('tests', 'third_party', 'zlib', 'example.c')).read(), side=zlib_archive, expected=open(path_from_root('tests', 'core', 'test_zlib.out')).read(), force_c=True) # @needs_dlfcn # def test_dylink_bullet(self): # self.emcc_args += ['-I' + path_from_root('tests', 'bullet', 'src')] # side = self.get_bullet_library(self, True) # self.dylink_test(main=open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(), # side=side, # expected=[open(path_from_root('tests', 'bullet', 'output.txt')).read(), # different roundings # open(path_from_root('tests', 'bullet', 'output2.txt')).read(), # open(path_from_root('tests', 'bullet', 'output3.txt')).read()]) @needs_dlfcn @no_fastcomp('https://github.com/emscripten-core/emscripten/issues/8376') def test_dylink_rtti(self): # Verify that objects created in one module and be dynamic_cast<> correctly # in the another module. # Each module will define its own copy of certain COMDAT symbols such as # each classs's typeinfo, but at runtime they should both use the same one. header = ''' #include <cstddef> class Foo { public: virtual ~Foo() {} }; class Bar : public Foo { public: virtual ~Bar() {} }; bool is_bar(Foo* foo); ''' main = ''' #include <stdio.h> #include "header.h" int main() { Bar bar; if (!is_bar(&bar)) { puts("failure"); return 1; } puts("success"); return 0; } ''' side = ''' #include "header.h" bool is_bar(Foo* foo) { return dynamic_cast<Bar*>(foo) != nullptr; } ''' self.dylink_test(main=main, side=side, header=header, expected='success') def test_random(self): src = r'''#include <stdlib.h> #include <stdio.h> int main() { srandom(0xdeadbeef); printf("%ld\n", random()); } ''' self.do_run(src, '956867869') def test_rand(self): src = r'''#include <stdlib.h> #include <stdio.h> #include <assert.h> int main() { // we need RAND_MAX to be a bitmask (power of 2 minus 1). this assertions guarantees // if RAND_MAX changes the test failure will focus attention on that issue here. assert(RAND_MAX == 0x7fffffff); srand(0xdeadbeef); for(int i = 0; i < 10; ++i) printf("%d\n", rand()); unsigned int seed = 0xdeadbeef; for(int i = 0; i < 10; ++i) printf("%d\n", rand_r(&seed)); bool haveEvenAndOdd = true; for(int i = 1; i <= 30; ++i) { int mask = 1 << i; if (mask > RAND_MAX) break; bool haveEven = false; bool haveOdd = false; for(int j = 0; j < 1000 && (!haveEven || !haveOdd); ++j) { if ((rand() & mask) == 0) haveEven = true; else haveOdd = true; } haveEvenAndOdd = haveEvenAndOdd && haveEven && haveOdd; } if (haveEvenAndOdd) printf("Have even and odd!\n"); return 0; } ''' expected = '''490242850 2074599277 1480056542 1912638067 931112055 2110392489 2053422194 1614832492 216117595 174823244 760368382 602359081 1121118963 1291018924 1608306807 352705809 958258461 1182561381 114276303 1481323674 Have even and odd! ''' self.do_run(src, expected) def test_strtod(self): src = open(path_from_root('tests', 'core', 'test_strtod.c')).read() expected = open(path_from_root('tests', 'core', 'test_strtod.out')).read() self.do_run(src, expected) def test_strtold(self): self.do_run_in_out_file_test('tests', 'core', 'test_strtold') def test_strtok(self): self.do_run_in_out_file_test('tests', 'core', 'test_strtok') def test_parseInt(self): self.do_run_in_out_file_test('tests', 'core', 'test_parseInt') def test_transtrcase(self): self.do_run_in_out_file_test('tests', 'core', 'test_transtrcase') @no_wasm2js('very slow to compile') def test_printf(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'printf', 'test') def test_printf_2(self): self.do_run_in_out_file_test('tests', 'core', 'test_printf_2') def test_printf_float(self): self.do_run_in_out_file_test('tests', 'printf', 'test_float') def test_printf_octal(self): self.do_run_in_out_file_test('tests', 'printf', 'test_octal') def test_vprintf(self): self.do_run_in_out_file_test('tests', 'core', 'test_vprintf') def test_vsnprintf(self): self.do_run_in_out_file_test('tests', 'core', 'test_vsnprintf') def test_printf_more(self): self.do_run_in_out_file_test('tests', 'core', 'test_printf_more') def test_perrar(self): self.do_run_in_out_file_test('tests', 'core', 'test_perrar') def test_atoX(self): self.do_run_in_out_file_test('tests', 'core', 'test_atoX') def test_strstr(self): self.do_run_in_out_file_test('tests', 'core', 'test_strstr') def test_fnmatch(self): # Run one test without assertions, for additional coverage if self.run_name == 'asm2m': i = self.emcc_args.index('ASSERTIONS=1') assert i > 0 and self.emcc_args[i - 1] == '-s' self.emcc_args[i] = 'ASSERTIONS=0' print('flip assertions off') self.do_run_in_out_file_test('tests', 'core', 'test_fnmatch') def test_sscanf(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf') def test_sscanf_2(self): # doubles for ftype in ['float', 'double']: src = r''' #include <stdio.h> int main(){ char strval1[] = "1.2345678901"; char strval2[] = "1.23456789e5"; char strval3[] = "1.23456789E5"; char strval4[] = "1.2345678e-5"; char strval5[] = "1.2345678E-5"; double dblval = 1.2345678901; double tstval; sscanf(strval1, "%lf", &tstval); if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval2, "%lf", &tstval); dblval = 123456.789; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval3, "%lf", &tstval); dblval = 123456.789; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval4, "%lf", &tstval); dblval = 0.000012345678; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval5, "%lf", &tstval); dblval = 0.000012345678; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); return 0; } ''' if ftype == 'float': self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568 Pass: 123456.789062 123456.789062 Pass: 123456.789062 123456.789062 Pass: 0.000012 0.000012 Pass: 0.000012 0.000012''') else: self.do_run(src, '''Pass: 1.234568 1.234568 Pass: 123456.789000 123456.789000 Pass: 123456.789000 123456.789000 Pass: 0.000012 0.000012 Pass: 0.000012 0.000012''') def test_sscanf_n(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_n') def test_sscanf_whitespace(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_whitespace') def test_sscanf_other_whitespace(self): # use i16s in printf self.set_setting('SAFE_HEAP', 0) # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_other_whitespace') def test_sscanf_3(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_3') def test_sscanf_4(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_4') def test_sscanf_5(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_5') def test_sscanf_6(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_6') def test_sscanf_skip(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_skip') def test_sscanf_caps(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_caps') def test_sscanf_hex(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_hex') def test_sscanf_float(self): self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_float') def test_langinfo(self): self.do_run_in_out_file_test('tests', 'core', 'test_langinfo') def test_files(self): self.banned_js_engines = [SPIDERMONKEY_ENGINE] # closure can generate variables called 'gc', which pick up js shell stuff if self.maybe_closure(): # Use closure here, to test we don't break FS stuff self.emcc_args = [x for x in self.emcc_args if x != '-g'] # ensure we test --closure 1 --memory-init-file 1 (-g would disable closure) elif '-O3' in self.emcc_args and not self.is_wasm(): print('closure 2') self.emcc_args += ['--closure', '2', '-Wno-almost-asm'] # Use closure 2 here for some additional coverage return self.skipTest('TODO: currently skipped because CI runs out of memory running Closure in this test!') self.emcc_args += ['-s', 'FORCE_FILESYSTEM=1', '--pre-js', 'pre.js'] print('base', self.emcc_args) create_test_file('pre.js', ''' /** @suppress{checkTypes}*/ Module = { 'noFSInit': true, 'preRun': function() { FS.createLazyFile('/', 'test.file', 'test.file', true, false); // Test FS_* exporting Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false, false); // 200 becomes -56, since signed chars are used in memory var test_files_input = 'hi there!'; var test_files_input_index = 0; FS.init(function() { return test_files_input.charCodeAt(test_files_input_index++) || null; }); } }; ''') create_test_file('test.file', 'some data') src = open(path_from_root('tests', 'files.cpp')).read() mem_file = 'src.cpp.o.js.mem' try_delete(mem_file) def clean(out, err): return '\n'.join([line for line in (out + err).split('\n') if 'binaryen' not in line and 'wasm' not in line and 'so not running' not in line]) self.do_run(src, ('size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\ntexte\n', 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n'), output_nicerizer=clean) if self.uses_memory_init_file(): self.assertExists(mem_file) def test_files_m(self): # Test for Module.stdin etc. # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) create_test_file('pre.js', ''' Module = { data: [10, 20, 40, 30], stdin: function() { return Module.data.pop() || null }, stdout: function(x) { out('got: ' + x) } }; ''') self.emcc_args += ['--pre-js', 'pre.js'] src = r''' #include <stdio.h> #include <unistd.h> int main () { char c; fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr))); while ((c = fgetc(stdin)) != EOF) { putc(c+5, stdout); } return 0; } ''' def clean(out, err): return '\n'.join(l for l in (out + err).splitlines() if 'warning' not in l and 'binaryen' not in l) self.do_run(src, ('got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1\n', 'got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1', 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15'), output_nicerizer=clean) def test_mount(self): self.set_setting('FORCE_FILESYSTEM', 1) src = open(path_from_root('tests', 'fs', 'test_mount.c')).read() self.do_run(src, 'success', force_c=True) def test_getdents64(self): src = open(path_from_root('tests', 'fs', 'test_getdents64.cpp')).read() self.do_run(src, '..') def test_getdents64_special_cases(self): self.banned_js_engines = [V8_ENGINE] # https://bugs.chromium.org/p/v8/issues/detail?id=6881 src = path_from_root('tests', 'fs', 'test_getdents64_special_cases.cpp') out = path_from_root('tests', 'fs', 'test_getdents64_special_cases.out') self.do_run_from_file(src, out, assert_identical=True) def test_getcwd_with_non_ascii_name(self): self.banned_js_engines = [V8_ENGINE] # https://bugs.chromium.org/p/v8/issues/detail?id=6881 src = path_from_root('tests', 'fs', 'test_getcwd_with_non_ascii_name.cpp') out = path_from_root('tests', 'fs', 'test_getcwd_with_non_ascii_name.out') self.do_run_from_file(src, out, assert_identical=True) def test_fwrite_0(self): self.do_run_in_out_file_test('tests', 'core', 'test_fwrite_0') def test_fgetc_ungetc(self): print('TODO: update this test once the musl ungetc-on-EOF-stream bug is fixed upstream and reaches us') self.set_setting('SYSCALL_DEBUG', 1) self.clear() orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: print(fs) src = open(path_from_root('tests', 'stdio', 'test_fgetc_ungetc.c')).read() self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) def test_fgetc_unsigned(self): src = r''' #include <stdio.h> int main() { FILE *file = fopen("file_with_byte_234.txt", "rb"); int c = fgetc(file); printf("*%d\n", c); } ''' create_test_file('file_with_byte_234.txt', b'\xea', binary=True) self.emcc_args += ['--embed-file', 'file_with_byte_234.txt'] self.do_run(src, '*234\n') def test_fgets_eol(self): src = r''' #include <stdio.h> char buf[32]; int main() { const char *r = "SUCCESS"; FILE *f = fopen("eol.txt", "r"); while (fgets(buf, 32, f) != NULL) { if (buf[0] == '\0') { r = "FAIL"; break; } } printf("%s\n", r); fclose(f); return 0; } ''' open('eol.txt', 'wb').write(b'\n') self.emcc_args += ['--embed-file', 'eol.txt'] self.do_run(src, 'SUCCESS\n') def test_fscanf(self): create_test_file('three_numbers.txt', '-1 0.1 -.1') src = r''' #include <stdio.h> #include <assert.h> #include <float.h> int main() { float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX; FILE* fp = fopen("three_numbers.txt", "r"); if (fp) { int match = fscanf(fp, " %f %f %f ", &x, &y, &z); printf("match = %d\n", match); printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z); } else { printf("failed to open three_numbers.txt\n"); } return 0; } ''' self.emcc_args += ['--embed-file', 'three_numbers.txt'] self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n') def test_fscanf_2(self): create_test_file('a.txt', '''1/2/3 4/5/6 7/8/9 ''') self.emcc_args += ['--embed-file', 'a.txt'] self.do_run(r'''#include <cstdio> #include <iostream> using namespace std; int main( int argv, char ** argc ) { cout << "fscanf test" << endl; FILE * file; file = fopen("a.txt", "rb"); int vertexIndex[4]; int normalIndex[4]; int uvIndex[4]; int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex [1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2], &vertexIndex[3], &uvIndex[3], &normalIndex[3]); cout << matches << endl; return 0; } ''', 'fscanf test\n9\n') def test_fileno(self): create_test_file('empty.txt', '') src = r''' #include <stdio.h> #include <unistd.h> int main() { FILE* fp = fopen("empty.txt", "r"); if (fp) { printf("%d\n", fileno(fp)); } else { printf("failed to open empty.txt\n"); } return 0; } ''' self.emcc_args += ['--embed-file', 'empty.txt'] self.do_run(src, '3\n') def test_readdir(self): self.do_run_in_out_file_test('tests', 'dirent', 'test_readdir') def test_readdir_empty(self): self.do_run_in_out_file_test('tests', 'dirent', 'test_readdir_empty') def test_stat(self): src = open(path_from_root('tests', 'stat', 'test_stat.c')).read() self.do_run(src, 'success', force_c=True) self.verify_in_strict_mode('src.c.o.js') def test_stat_chmod(self): src = open(path_from_root('tests', 'stat', 'test_chmod.c')).read() self.do_run(src, 'success', force_c=True) def test_stat_mknod(self): src = open(path_from_root('tests', 'stat', 'test_mknod.c')).read() self.do_run(src, 'success', force_c=True) def test_fcntl(self): self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);") self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl') def test_fcntl_open(self): self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl_open') def test_fcntl_misc(self): self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);") self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl_misc') def test_poll(self): self.add_pre_run(''' var dummy_device = FS.makedev(64, 0); FS.registerDevice(dummy_device, {}); FS.createDataFile('/', 'file', 'abcdef', true, true, false); FS.mkdev('/device', dummy_device); ''') self.do_run_in_out_file_test('tests', 'core', 'test_poll') def test_statvfs(self): self.do_run_in_out_file_test('tests', 'core', 'test_statvfs') def test_libgen(self): self.do_run_in_out_file_test('tests', 'core', 'test_libgen') def test_utime(self): src = open(path_from_root('tests', 'utime', 'test_utime.c')).read() self.do_run(src, 'success', force_c=True) @no_minimal_runtime('MINIMAL_RUNTIME does not have getValue() and setValue() (TODO add it to a JS library function to get it in)') def test_utf(self): self.banned_js_engines = [SPIDERMONKEY_ENGINE] # only node handles utf well self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc']) self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue', 'UTF8ToString', 'stringToUTF8']) self.do_run_in_out_file_test('tests', 'core', 'test_utf') def test_utf32(self): if self.get_setting('MINIMAL_RUNTIME'): self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$UTF32ToString', '$stringToUTF32', '$lengthBytesUTF32']) else: self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF32ToString', 'stringToUTF32', 'lengthBytesUTF32']) self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.') self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.', args=['-fshort-wchar']) def test_utf8(self): if self.get_setting('MINIMAL_RUNTIME'): self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$AsciiToString', '$stringToAscii', '$writeAsciiToMemory']) else: self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8', 'AsciiToString', 'stringToAscii']) self.do_run(open(path_from_root('tests', 'utf8.cpp')).read(), 'OK.') @also_with_wasm_bigint def test_utf8_textdecoder(self): self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8']) self.emcc_args += ['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt'] self.do_run(open(path_from_root('tests', 'benchmark_utf8.cpp')).read(), 'OK.') # Test that invalid character in UTF8 does not cause decoding to crash. def test_utf8_invalid(self): self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8']) for decoder_mode in [[], ['-s', 'TEXTDECODER=1']]: self.emcc_args += decoder_mode print(str(decoder_mode)) self.do_run(open(path_from_root('tests', 'utf8_invalid.cpp')).read(), 'OK.') # Test that invalid character in UTF8 does not cause decoding to crash. def test_minimal_runtime_utf8_invalid(self): self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8']) for decoder_mode in [[], ['-s', 'TEXTDECODER=1']]: self.emcc_args += ['-s', 'MINIMAL_RUNTIME=1'] + decoder_mode print(str(decoder_mode)) self.do_run(open(path_from_root('tests', 'utf8_invalid.cpp')).read(), 'OK.') def test_utf16_textdecoder(self): self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF16ToString', 'stringToUTF16', 'lengthBytesUTF16']) self.emcc_args += ['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt'] self.do_run(open(path_from_root('tests', 'benchmark_utf16.cpp')).read(), 'OK.') def test_wprintf(self): self.do_run_in_out_file_test('tests', 'core', 'test_wprintf') def test_write_stdout_fileno(self): self.do_run_in_out_file_test('tests', 'core', 'test_write_stdout_fileno') self.do_run_in_out_file_test('tests', 'core', 'test_write_stdout_fileno', args=['-s', 'FILESYSTEM=0']) def test_direct_string_constant_usage(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_direct_string_constant_usage') def test_std_cout_new(self): self.do_run_in_out_file_test('tests', 'core', 'test_std_cout_new') def test_istream(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) for linkable in [0]: # , 1]: print(linkable) # regression check for issue #273 self.set_setting('LINKABLE', linkable) self.do_run_in_out_file_test('tests', 'core', 'test_istream') def test_fs_base(self): # TODO(sbc): It seems that INCLUDE_FULL_LIBRARY will generally generate # undefined symbols at link time so perhaps have it imply this setting? self.set_setting('WARN_ON_UNDEFINED_SYMBOLS', 0) self.set_setting('INCLUDE_FULL_LIBRARY', 1) self.add_pre_run(open(path_from_root('tests', 'filesystem', 'src.js')).read()) src = 'int main() {return 0;}\n' expected = open(path_from_root('tests', 'filesystem', 'output.txt')).read() self.do_run(src, expected) @also_with_noderawfs @is_slow_test def test_fs_nodefs_rw(self): self.emcc_args += ['-lnodefs.js'] self.set_setting('SYSCALL_DEBUG', 1) src = open(path_from_root('tests', 'fs', 'test_nodefs_rw.c')).read() self.do_run(src, 'success', force_c=True) if '-g' not in self.emcc_args: print('closure') self.emcc_args += ['--closure', '1'] self.do_run(src, 'success', force_c=True) @also_with_noderawfs def test_fs_nodefs_cloexec(self): self.emcc_args += ['-lnodefs.js'] src = open(path_from_root('tests', 'fs', 'test_nodefs_cloexec.c')).read() self.do_run(src, 'success', force_c=True) def test_fs_nodefs_home(self): self.set_setting('FORCE_FILESYSTEM', 1) self.emcc_args += ['-lnodefs.js'] src = open(path_from_root('tests', 'fs', 'test_nodefs_home.c')).read() self.do_run(src, 'success', js_engines=[NODE_JS]) def test_fs_nodefs_nofollow(self): self.emcc_args += ['-lnodefs.js'] src = open(path_from_root('tests', 'fs', 'test_nodefs_nofollow.c')).read() self.do_run(src, 'success', js_engines=[NODE_JS]) def test_fs_trackingdelegate(self): src = path_from_root('tests', 'fs', 'test_trackingdelegate.c') out = path_from_root('tests', 'fs', 'test_trackingdelegate.out') self.do_run_from_file(src, out) @also_with_noderawfs def test_fs_writeFile(self): self.emcc_args += ['-s', 'DISABLE_EXCEPTION_CATCHING=1'] # see issue 2334 src = path_from_root('tests', 'fs', 'test_writeFile.cpp') out = path_from_root('tests', 'fs', 'test_writeFile.out') self.do_run_from_file(src, out) def test_fs_write(self): src = path_from_root('tests', 'fs', 'test_write.cpp') out = path_from_root('tests', 'fs', 'test_write.out') self.do_run_from_file(src, out) @also_with_noderawfs def test_fs_emptyPath(self): src = path_from_root('tests', 'fs', 'test_emptyPath.c') out = path_from_root('tests', 'fs', 'test_emptyPath.out') self.do_run_from_file(src, out) @also_with_noderawfs def test_fs_append(self): src = open(path_from_root('tests', 'fs', 'test_append.c')).read() self.do_run(src, 'success', force_c=True) def test_fs_mmap(self): orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: src = path_from_root('tests', 'fs', 'test_mmap.c') out = path_from_root('tests', 'fs', 'test_mmap.out') self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run_from_file(src, out) @also_with_noderawfs def test_fs_errorstack(self): # Enables strict mode, which may catch some strict-mode-only errors # so that users can safely work with strict JavaScript if enabled. create_test_file('pre.js', '"use strict";') self.emcc_args += ['--pre-js', 'pre.js'] self.set_setting('FORCE_FILESYSTEM', 1) self.set_setting('ASSERTIONS', 1) self.do_run(r''' #include <emscripten.h> #include <iostream> int main(void) { std::cout << "hello world\n"; // should work with strict mode EM_ASM( try { FS.readFile('/dummy.txt'); } catch (err) { err.stack = err.stack; // should be writable throw err; } ); return 0; } ''', 'at Object.readFile', assert_returncode=None) # engines has different error stack format @also_with_noderawfs def test_fs_llseek(self): self.set_setting('FORCE_FILESYSTEM', 1) src = open(path_from_root('tests', 'fs', 'test_llseek.c')).read() self.do_run(src, 'success', force_c=True) def test_fs_64bit(self): src = open(path_from_root('tests', 'fs', 'test_64bit.c')).read() self.do_run(src, 'success', force_c=True) @no_windows('https://github.com/emscripten-core/emscripten/issues/8882') def test_unistd_access(self): self.clear() orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run_in_out_file_test('tests', 'unistd', 'access', js_engines=[NODE_JS]) # Node.js fs.chmod is nearly no-op on Windows if not WINDOWS: self.emcc_args = orig_compiler_opts self.emcc_args += ['-s', 'NODERAWFS=1'] self.do_run_in_out_file_test('tests', 'unistd', 'access', js_engines=[NODE_JS]) def test_unistd_curdir(self): src = open(path_from_root('tests', 'unistd', 'curdir.c')).read() expected = open(path_from_root('tests', 'unistd', 'curdir.out')).read() self.do_run(src, expected) @also_with_noderawfs def test_unistd_close(self): src = open(path_from_root('tests', 'unistd', 'close.c')).read() expected = open(path_from_root('tests', 'unistd', 'close.out')).read() self.do_run(src, expected) def test_unistd_confstr(self): src = open(path_from_root('tests', 'unistd', 'confstr.c')).read() expected = open(path_from_root('tests', 'unistd', 'confstr.out')).read() self.do_run(src, expected) def test_unistd_ttyname(self): src = open(path_from_root('tests', 'unistd', 'ttyname.c')).read() self.do_run(src, 'success', force_c=True) @also_with_noderawfs def test_unistd_pipe(self): src = open(path_from_root('tests', 'unistd', 'pipe.c')).read() self.do_run(src, 'success', force_c=True) @also_with_noderawfs def test_unistd_dup(self): src = open(path_from_root('tests', 'unistd', 'dup.c')).read() expected = open(path_from_root('tests', 'unistd', 'dup.out')).read() self.do_run(src, expected) def test_unistd_pathconf(self): src = open(path_from_root('tests', 'unistd', 'pathconf.c')).read() expected = open(path_from_root('tests', 'unistd', 'pathconf.out')).read() self.do_run(src, expected) def test_unistd_truncate(self): self.clear() orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'truncate.c')).read() expected = open(path_from_root('tests', 'unistd', 'truncate.out')).read() self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run(src, expected, js_engines=[NODE_JS]) @no_windows("Windows throws EPERM rather than EACCES or EINVAL") @unittest.skipIf(WINDOWS or os.geteuid() == 0, "Root access invalidates this test by being able to write on readonly files") def test_unistd_truncate_noderawfs(self): # FIXME self.skipTest('fails on some node versions and OSes, e.g. 10.13.0 on linux') self.emcc_args += ['-s', 'NODERAWFS=1'] self.do_run_in_out_file_test('tests', 'unistd', 'truncate', js_engines=[NODE_JS]) def test_unistd_swab(self): self.do_run_in_out_file_test('tests', 'unistd', 'swab') def test_unistd_isatty(self): src = open(path_from_root('tests', 'unistd', 'isatty.c')).read() self.do_run(src, 'success', force_c=True) @also_with_standalone_wasm def test_unistd_sysconf(self): self.do_run_in_out_file_test('tests', 'unistd', 'sysconf') @no_asan('ASan alters memory layout') def test_unistd_sysconf_phys_pages(self): src = open(path_from_root('tests', 'unistd', 'sysconf_phys_pages.c')).read() if self.get_setting('ALLOW_MEMORY_GROWTH'): expected = (2 * 1024 * 1024 * 1024) // 16384 else: expected = 16 * 1024 * 1024 // 16384 self.do_run(src, str(expected) + ', errno: 0') def test_unistd_login(self): self.do_run_in_out_file_test('tests', 'unistd', 'login') @no_windows('https://github.com/emscripten-core/emscripten/issues/8882') def test_unistd_unlink(self): self.clear() orig_compiler_opts = self.emcc_args[:] src = open(path_from_root('tests', 'unistd', 'unlink.c')).read() for fs in ['MEMFS', 'NODEFS']: self.emcc_args = orig_compiler_opts + ['-D' + fs] # symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges) # so skip testing those bits on that combination. if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] if WINDOWS: self.emcc_args += ['-DNO_SYMLINK=1'] if MACOS: continue self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) # Several differences/bugs on non-linux including https://github.com/nodejs/node/issues/18014 if not WINDOWS and not MACOS: self.emcc_args = orig_compiler_opts + ['-DNODERAWFS'] # 0 if root user if os.geteuid() == 0: self.emcc_args += ['-DSKIP_ACCESS_TESTS'] self.emcc_args += ['-s', 'NODERAWFS=1'] self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) def test_unistd_links(self): self.clear() orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: if WINDOWS and fs == 'NODEFS': print('Skipping NODEFS part of this test for test_unistd_links on Windows, since it would require administrative privileges.', file=sys.stderr) # Also, other detected discrepancies if you do end up running this test on NODEFS: # test expects /, but Windows gives \ as path slashes. # Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. continue self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run_in_out_file_test('tests', 'unistd', 'links', js_engines=[NODE_JS]) @no_windows('Skipping NODEFS test, since it would require administrative privileges.') def test_unistd_symlink_on_nodefs(self): # Also, other detected discrepancies if you do end up running this test on NODEFS: # test expects /, but Windows gives \ as path slashes. # Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. self.emcc_args += ['-lnodefs.js'] self.clear() src = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.c')).read() expected = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.out')).read() self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_sleep(self): src = open(path_from_root('tests', 'unistd', 'sleep.c')).read() expected = open(path_from_root('tests', 'unistd', 'sleep.out')).read() self.do_run(src, expected) @also_with_wasm_bigint def test_unistd_io(self): self.set_setting('INCLUDE_FULL_LIBRARY', 1) # uses constants from ERRNO_CODES self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) # avoid errors when linking in full library orig_compiler_opts = self.emcc_args[:] src = open(path_from_root('tests', 'unistd', 'io.c')).read() expected = open(path_from_root('tests', 'unistd', 'io.out')).read() for fs in ['MEMFS', 'NODEFS']: self.clear() self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run(src, expected) @no_windows('https://github.com/emscripten-core/emscripten/issues/8882') def test_unistd_misc(self): orig_compiler_opts = self.emcc_args[:] for fs in ['MEMFS', 'NODEFS']: self.emcc_args = orig_compiler_opts + ['-D' + fs] if fs == 'NODEFS': self.emcc_args += ['-lnodefs.js'] self.do_run_in_out_file_test('tests', 'unistd', 'misc', js_engines=[NODE_JS]) # i64s in the API, which we'd need to legalize for JS, so in standalone mode # all we can test is wasm VMs @also_with_standalone_wasm def test_posixtime(self): test_path = path_from_root('tests', 'core', 'test_posixtime') src, output = (test_path + s for s in ('.c', '.out')) self.banned_js_engines = [V8_ENGINE] # v8 lacks monotonic time self.do_run_from_file(src, output) def test_uname(self): self.do_run_in_out_file_test('tests', 'core', 'test_uname') def test_unary_literal(self): self.do_run_in_out_file_test('tests', 'core', 'test_unary_literal') def test_env(self): src = open(path_from_root('tests', 'env', 'src.c')).read() expected = open(path_from_root('tests', 'env', 'output.txt')).read() if not self.is_wasm_backend(): # the fastcomp implementation is incorrect in one way expected = expected.replace('after alteration: Qest5', 'after alteration: test5') self.do_run(src, [ expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.cpp.o.js')).replace('\\', '/'), # node, can find itself properly expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8 ]) def test_environ(self): src = open(path_from_root('tests', 'env', 'src-mini.c')).read() expected = open(path_from_root('tests', 'env', 'output-mini.txt')).read() self.do_run(src, [ expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.cpp.o.js')).replace('\\', '/'), # node, can find itself properly expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8 ]) def test_systypes(self): self.do_run_in_out_file_test('tests', 'core', 'test_systypes') def test_stddef(self): self.do_run_in_out_file_test('tests', 'core', 'test_stddef') self.do_run_in_out_file_test('tests', 'core', 'test_stddef', force_c=True) def test_getloadavg(self): self.do_run_in_out_file_test('tests', 'core', 'test_getloadavg') def test_nl_types(self): self.do_run_in_out_file_test('tests', 'core', 'test_nl_types') def test_799(self): src = open(path_from_root('tests', '799.cpp')).read() self.do_run(src, '''Set PORT family: 0, port: 3979 Get PORT family: 0 PORT: 3979 ''') def test_ctype(self): self.do_run_in_out_file_test('tests', 'core', 'test_ctype') def test_strcasecmp(self): self.do_run_in_out_file_test('tests', 'core', 'test_strcasecmp') def test_atomic(self): self.do_run_in_out_file_test('tests', 'core', 'test_atomic') def test_atomic_cxx(self): # the wasm backend has lock-free atomics, but not asm.js or asm2wasm is_lock_free = self.is_wasm_backend() self.emcc_args += ['-DIS_64BIT_LOCK_FREE=%d' % is_lock_free] self.do_run_in_out_file_test('tests', 'core', 'test_atomic_cxx') if self.get_setting('ALLOW_MEMORY_GROWTH') == 0 and not self.is_wasm() \ and not self.is_wasm_backend(): print('main module') self.set_setting('MAIN_MODULE', 1) self.do_run_in_out_file_test('tests', 'core', 'test_atomic_cxx') # TODO: test with USE_PTHREADS in wasm backend as well def test_phiundef(self): self.do_run_in_out_file_test('tests', 'core', 'test_phiundef') def test_netinet_in(self): src = open(path_from_root('tests', 'netinet', 'in.cpp')).read() expected = open(path_from_root('tests', 'netinet', 'in.out')).read() self.do_run(src, expected) @needs_dlfcn def test_main_module_static_align(self): if self.get_setting('ALLOW_MEMORY_GROWTH'): self.skipTest('no shared modules with memory growth') self.set_setting('MAIN_MODULE', 1) self.do_run_in_out_file_test('tests', 'core', 'test_main_module_static_align') # libc++ tests def test_iostream_and_determinism(self): src = ''' #include <iostream> int main() { std::cout << "hello world" << std::endl << 77 << "." << std::endl; return 0; } ''' num = 5 for i in range(num): print('(iteration %d)' % i) # add some timing nondeterminism here, not that we need it, but whatever time.sleep(random.random() / (10 * num)) self.do_run(src, 'hello world\n77.\n') # Verify that this build is identical to the previous one if os.path.exists('src.js.previous'): self.assertBinaryEqual('src.cpp.o.js', 'src.js.previous') shutil.copy2('src.cpp.o.js', 'src.js.previous') # Same but for the wasm file. if self.get_setting('WASM') and not self.get_setting('WASM2JS'): if os.path.exists('src.wasm.previous'): self.assertBinaryEqual('src.cpp.o.wasm', 'src.wasm.previous') shutil.copy2('src.cpp.o.wasm', 'src.wasm.previous') def test_stdvec(self): self.do_run_in_out_file_test('tests', 'core', 'test_stdvec') def test_random_device(self): self.do_run_in_out_file_test('tests', 'core', 'test_random_device') def test_reinterpreted_ptrs(self): self.do_run_in_out_file_test('tests', 'core', 'test_reinterpreted_ptrs') def test_js_libraries(self): create_test_file('main.cpp', ''' #include <stdio.h> extern "C" { extern void printey(); extern int calcey(int x, int y); } int main() { printey(); printf("*%d*\\n", calcey(10, 22)); return 0; } ''') create_test_file('mylib1.js', ''' mergeInto(LibraryManager.library, { printey: function() { out('hello from lib!'); } }); ''') create_test_file('mylib2.js', ''' mergeInto(LibraryManager.library, { calcey: function(x, y) { return x + y; } }); ''') self.emcc_args += ['--js-library', 'mylib1.js', '--js-library', 'mylib2.js'] self.do_run(open('main.cpp').read(), 'hello from lib!\n*32*\n') def test_unicode_js_library(self): create_test_file('main.cpp', ''' #include <stdio.h> extern "C" { extern void printey(); } int main() { printey(); return 0; } ''') self.emcc_args += ['--js-library', path_from_root('tests', 'unicode_library.js')] self.do_run(open('main.cpp').read(), u'Unicode snowman \u2603 says hello!') def test_funcptr_import_type(self): self.emcc_args += ['--js-library', path_from_root('tests', 'core', 'test_funcptr_import_type.js')] self.do_run_in_out_file_test('tests', 'core', 'test_funcptr_import_type') @no_asan('ASan does not work with EXPORT_ALL') def test_constglobalunion(self): self.emcc_args += ['-s', 'EXPORT_ALL=1'] self.do_run(r''' #include <stdio.h> struct one_const { long a; }; struct two_consts { long a; long b; }; union some_consts { struct one_const one; struct two_consts two; }; union some_consts my_consts = {{ 1 }}; struct one_const addr_of_my_consts = { (long)(&my_consts) }; int main(void) { printf("%li\n", (long)!!addr_of_my_consts.a); return 0; } ''', '1') ### 'Medium' tests def test_fannkuch(self): results = [(1, 0), (2, 1), (3, 2), (4, 4), (5, 7), (6, 10), (7, 16), (8, 22)] src = open(path_from_root('tests', 'fannkuch.cpp')).read() self.build(src, self.get_dir(), 'fannkuch.cpp') for i, j in results: print(i, j) self.do_run('fannkuch.cpp.o.js', 'Pfannkuchen(%d) = %d.' % (i, j), [str(i)], no_build=True) def test_raytrace(self): # TODO: Should we remove this test? self.skipTest('Relies on double value rounding, extremely sensitive') src = open(path_from_root('tests', 'raytrace.cpp')).read().replace('double', 'float') output = open(path_from_root('tests', 'raytrace.ppm')).read() self.do_run(src, output, ['3', '16']) def test_fasta(self): results = [(1, '''GG*ctt**tgagc*'''), (20, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''), (50, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''')] old = self.emcc_args orig_src = open(path_from_root('tests', 'fasta.cpp')).read() def test(extra_args): self.emcc_args = old + extra_args for precision in [0, 1, 2]: self.set_setting('PRECISE_F32', precision) for t in ['float', 'double']: print(precision, t) src = orig_src.replace('double', t) self.build(src, self.get_dir(), 'fasta.cpp') for arg, output in results: self.do_run('fasta.cpp.o.js', output, [str(arg)], lambda x, err: x.replace('\n', '*'), no_build=True) shutil.copyfile('fasta.cpp.o.js', '%d_%s.js' % (precision, t)) test([]) @bleeding_edge_wasm_backend def test_fasta_nontrapping(self): self.emcc_args += ['-mnontrapping-fptoint'] self.test_fasta() def test_whets(self): self.do_run(open(path_from_root('tests', 'whets.cpp')).read(), 'Single Precision C Whetstone Benchmark', assert_returncode=None) def test_dlmalloc_inline(self): self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit # needed with typed arrays self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024) src = open(path_from_root('system', 'lib', 'dlmalloc.c')).read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c')).read() self.do_run(src, '*1,0*', ['200', '1'], force_c=True) self.do_run(None, '*400,0*', ['400', '400'], force_c=True, no_build=True) def test_dlmalloc(self): self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit # needed with typed arrays self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024) # Linked version src = open(path_from_root('tests', 'dlmalloc_test.c')).read() self.do_run(src, '*1,0*', ['200', '1']) self.do_run(None, '*400,0*', ['400', '400'], no_build=True) # TODO: do this in other passes too, passing their opts into emcc if self.emcc_args == []: # emcc should build in dlmalloc automatically, and do all the sign correction etc. for it try_delete('src.cpp.o.js') run_process([EMCC, path_from_root('tests', 'dlmalloc_test.c'), '-s', 'INITIAL_MEMORY=128MB', '-o', 'src.cpp.o.js'], stdout=PIPE, stderr=self.stderr_redirect) self.do_run(None, '*1,0*', ['200', '1'], no_build=True) self.do_run(None, '*400,0*', ['400', '400'], no_build=True) # The same for new and all its variants src = open(path_from_root('tests', 'new.cpp')).read() for new, delete in [ ('malloc(100)', 'free'), ('new char[100]', 'delete[]'), ('new Structy', 'delete'), ('new int', 'delete'), ('new Structy[10]', 'delete[]'), ]: self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*') @no_asan('asan also changes malloc, and that ends up linking in new twice') def test_dlmalloc_partial(self): # present part of the symbols of dlmalloc, not all src = open(path_from_root('tests', 'new.cpp')).read().replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + ''' #include <new> void * operator new(size_t size) throw(std::bad_alloc) { printf("new %d!\\n", size); return malloc(size); } ''' self.do_run(src, 'new 4!\n*1,0*') @no_asan('asan also changes malloc, and that ends up linking in new twice') def test_dlmalloc_partial_2(self): if 'SAFE_HEAP' in str(self.emcc_args): self.skipTest('we do unsafe stuff here') # present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak. self.do_run_in_out_file_test('tests', 'core', 'test_dlmalloc_partial_2', assert_returncode=None) def test_libcxx(self): self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(), 'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march') self.do_run(''' #include <set> #include <stdio.h> int main() { std::set<int> *fetchOriginatorNums = new std::set<int>(); fetchOriginatorNums->insert(171); printf("hello world\\n"); return 0; } ''', 'hello world') def test_typeid(self): self.do_run_in_out_file_test('tests', 'core', 'test_typeid') def test_static_variable(self): # needs atexit self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_static_variable') def test_fakestat(self): self.do_run_in_out_file_test('tests', 'core', 'test_fakestat') def test_mmap(self): self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024) # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.do_run_in_out_file_test('tests', 'core', 'test_mmap') def test_mmap_file(self): for extra_args in [[], ['--no-heap-copy']]: self.emcc_args += ['--embed-file', 'data.dat'] + extra_args x = 'data from the file........' s = '' while len(s) < 9000: if len(s) + len(x) < 9000: s += x continue s += '.' assert len(s) == 9000 create_test_file('data.dat', s) src = open(path_from_root('tests', 'mmap_file.c')).read() self.do_run(src, '*\n' + s[0:20] + '\n' + s[4096:4096 + 20] + '\n*\n') def test_cubescript(self): # uses register keyword self.emcc_args.append('-std=c++03') if self.run_name == 'asm3': self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage self.emcc_args = [x for x in self.emcc_args if x != '-g'] # remove -g, so we have one test without it by default def test(): self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp') test() def count_relocations(): generated = open('src.cpp.o.js').read() generated = re.sub(r'\n+[ \n]*\n+', '\n', generated) start = '\nfunction __apply_relocations() {' relocs_start = generated.find(start) if relocs_start == -1: return "", 0 relocs_start += len(start) relocs_end = generated.find('\n}', relocs_start) relocs = generated[relocs_start:relocs_end] num_relocs = relocs.count('\n') return relocs, num_relocs # TODO: wrappers for wasm modules if not self.get_setting('WASM') and not self.is_wasm_backend(): print('relocatable') assert self.get_setting('RELOCATABLE') == self.get_setting('EMULATED_FUNCTION_POINTERS') == 0 self.set_setting('RELOCATABLE', 1) self.set_setting('EMULATED_FUNCTION_POINTERS', 1) test() self.set_setting('RELOCATABLE', 0) self.set_setting('EMULATED_FUNCTION_POINTERS', 0) if self.is_wasm_backend(): print('asyncify') # extra coverage self.emcc_args += ['-s', 'ASYNCIFY=1'] test() @needs_dlfcn def test_relocatable_void_function(self): self.set_setting('RELOCATABLE', 1) self.do_run_in_out_file_test('tests', 'core', 'test_relocatable_void_function') @wasm_simd def test_wasm_builtin_simd(self): # Improves test readability self.emcc_args.append('-Wno-c++11-narrowing') self.do_run(open(path_from_root('tests', 'test_wasm_builtin_simd.cpp')).read(), 'Success!') self.emcc_args.append('-munimplemented-simd128') self.build(open(path_from_root('tests', 'test_wasm_builtin_simd.cpp')).read(), self.get_dir(), os.path.join(self.get_dir(), 'src.cpp')) @wasm_simd def test_wasm_intrinsics_simd(self): def run(): self.do_run( open(path_from_root('tests', 'test_wasm_intrinsics_simd.c')).read(), 'Success!') # Improves test readability self.emcc_args.append('-Wno-c++11-narrowing') self.emcc_args.extend(['-Wpedantic', '-Werror', '-Wall', '-xc++']) run() self.emcc_args.append('-funsigned-char') run() self.emcc_args.extend(['-munimplemented-simd128', '-xc', '-std=c99']) self.build(open(path_from_root('tests', 'test_wasm_intrinsics_simd.c')).read(), self.get_dir(), os.path.join(self.get_dir(), 'src.cpp')) # Tests invoking the SIMD API via x86 SSE1 xmmintrin.h header (_mm_x() functions) @wasm_simd def test_sse1(self): src = path_from_root('tests', 'sse', 'test_sse1.cpp') run_process([shared.CLANG_CXX, src, '-msse', '-o', 'test_sse1', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_sse1', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 SSE2 emmintrin.h header (_mm_x() functions) @wasm_simd def test_sse2(self): src = path_from_root('tests', 'sse', 'test_sse2.cpp') run_process([shared.CLANG_CXX, src, '-msse2', '-Wno-argument-outside-range', '-o', 'test_sse2', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_sse2', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse2', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 SSE3 pmmintrin.h header (_mm_x() functions) @wasm_simd def test_sse3(self): src = path_from_root('tests', 'sse', 'test_sse3.cpp') run_process([shared.CLANG_CXX, src, '-msse3', '-Wno-argument-outside-range', '-o', 'test_sse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_sse3', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse3', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 SSSE3 tmmintrin.h header (_mm_x() functions) @wasm_simd def test_ssse3(self): src = path_from_root('tests', 'sse', 'test_ssse3.cpp') run_process([shared.CLANG_CXX, src, '-mssse3', '-Wno-argument-outside-range', '-o', 'test_ssse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_ssse3', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-mssse3', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 SSE4.1 smmintrin.h header (_mm_x() functions) @wasm_simd def test_sse4_1(self): src = path_from_root('tests', 'sse', 'test_sse4_1.cpp') run_process([shared.CLANG_CXX, src, '-msse4.1', '-Wno-argument-outside-range', '-o', 'test_sse4_1', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_sse4_1', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse4.1', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 SSE4.2 nmmintrin.h header (_mm_x() functions) @wasm_simd def test_sse4_2(self): src = path_from_root('tests', 'sse', 'test_sse4_2.cpp') run_process([shared.CLANG_CXX, src, '-msse4.2', '-Wno-argument-outside-range', '-o', 'test_sse4_2', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_sse4_2', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse4.2', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) # Tests invoking the SIMD API via x86 AVX avxintrin.h header (_mm_x() functions) @wasm_simd def test_avx(self): src = path_from_root('tests', 'sse', 'test_avx.cpp') run_process([shared.CLANG_CXX, src, '-mavx', '-Wno-argument-outside-range', '-o', 'test_avx', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE) native_result = run_process('./test_avx', stdout=PIPE, env=building.get_building_env(native=True)).stdout orig_args = self.emcc_args self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-mavx', '-Wno-argument-outside-range'] self.maybe_closure() self.do_run(open(src).read(), native_result) @no_asan('call stack exceeded on some versions of node') def test_gcc_unmangler(self): self.emcc_args += ['-I' + path_from_root('third_party')] self.do_run(open(path_from_root('third_party', 'gcc_demangler.c')).read(), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], assert_returncode=None) @needs_make('make') def test_lua(self): self.emcc_args.remove('-Werror') self.do_run('', 'hello lua world!\n17\n1\n2\n3\n4\n7', args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''], libraries=self.get_library(os.path.join('third_party', 'lua'), [os.path.join('src', 'lua.o'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None), includes=[path_from_root('tests', 'lua')], output_nicerizer=lambda string, err: (string + err).replace('\n\n', '\n').replace('\n\n', '\n')) @no_asan('issues with freetype itself') @needs_make('configure script') @is_slow_test def test_freetype(self): if self.run_name == 'asm2g': # flip for some more coverage here self.set_setting('ALIASING_FUNCTION_POINTERS', 1 - self.get_setting('ALIASING_FUNCTION_POINTERS')) self.add_pre_run("FS.createDataFile('/', 'font.ttf', %s, true, false, false);" % str( list(bytearray(open(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read())) )) # Not needed for js, but useful for debugging shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'font.ttf') # Main self.do_run(open(path_from_root('tests', 'freetype', 'main.c')).read(), open(path_from_root('tests', 'freetype', 'ref.txt')).read(), ['font.ttf', 'test!', '150', '120', '25'], libraries=self.get_freetype_library(), includes=[path_from_root('tests', 'third_party', 'freetype', 'include')]) # github issue 324 print('[issue 324]') self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c')).read(), open(path_from_root('tests', 'freetype', 'ref_2.txt')).read(), ['font.ttf', 'w', '32', '32', '25'], libraries=self.get_freetype_library(), includes=[path_from_root('tests', 'third_party', 'freetype', 'include')]) print('[issue 324 case 2]') self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c')).read(), open(path_from_root('tests', 'freetype', 'ref_3.txt')).read(), ['font.ttf', 'W', '32', '32', '0'], libraries=self.get_freetype_library(), includes=[path_from_root('tests', 'third_party', 'freetype', 'include')]) print('[issue 324 case 3]') self.do_run(None, open(path_from_root('tests', 'freetype', 'ref_4.txt')).read(), ['font.ttf', 'ea', '40', '32', '0'], no_build=True) @no_asan('local count too large for VMs') def test_sqlite(self): self.set_setting('DISABLE_EXCEPTION_CATCHING', 1) self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free']) if self.get_setting('ASM_JS') == 1 and '-g' in self.emcc_args: print("disabling inlining") # without registerize (which -g disables), we generate huge amounts of code self.set_setting('INLINING_LIMIT', 50) # newer clang has a warning for implicit conversions that lose information, # which happens in sqlite (see #9138) self.emcc_args += ['-Wno-implicit-int-float-conversion'] # temporarily ignore unknown flags, which lets the above flag be used on our CI which doesn't # yet have the new clang with that flag self.emcc_args += ['-Wno-unknown-warning-option'] self.emcc_args += ['-I' + path_from_root('tests', 'third_party', 'sqlite')] src = ''' #define SQLITE_DISABLE_LFS #define LONGDOUBLE_TYPE double #define SQLITE_INT64_TYPE long long int #define SQLITE_THREADSAFE 0 ''' src += open(path_from_root('tests', 'third_party', 'sqlite', 'sqlite3.c')).read() src += open(path_from_root('tests', 'sqlite', 'benchmark.c')).read() self.do_run(src, open(path_from_root('tests', 'sqlite', 'benchmark.txt')).read(), includes=[path_from_root('tests', 'sqlite')], force_c=True) @needs_make('mingw32-make') @is_slow_test @parameterized({ 'cmake': (True,), 'configure': (False,) }) def test_zlib(self, use_cmake): if WINDOWS and not use_cmake: self.skipTest("Windows cannot run configure sh scripts") self.maybe_closure() if self.run_name == 'asm2g': self.emcc_args += ['-g4'] # more source maps coverage if self.run_name == 'asm2f': return self.skipTest('asm2f affects cflags in a way that changes zlib compile flag reporting, so the stdout is different') if use_cmake: make_args = [] configure = [path_from_root('emcmake'), 'cmake', '.'] else: make_args = ['libz.a'] configure = ['sh', './configure'] self.do_run(open(path_from_root('tests', 'third_party', 'zlib', 'example.c')).read(), open(path_from_root('tests', 'core', 'test_zlib.out')).read(), libraries=self.get_library(os.path.join('third_party', 'zlib'), 'libz.a', make_args=make_args, configure=configure), includes=[path_from_root('tests', 'third_party', 'zlib'), 'building', 'zlib'], force_c=True) @needs_make('make') @is_slow_test @parameterized({ 'cmake': (True,), 'autoconf': (False,) }) # Called thus so it runs late in the alphabetical cycle... it is long def test_bullet(self, use_cmake): if WINDOWS and not use_cmake: self.skipTest("Windows cannot run configure sh scripts") self.set_setting('DEAD_FUNCTIONS', ['__ZSt9terminatev']) self.emcc_args += ['-Wno-c++11-narrowing', '-Wno-deprecated-register', '-Wno-writable-strings'] asserts = self.get_setting('ASSERTIONS') # extra testing for ASSERTIONS == 2 self.set_setting('ASSERTIONS', 2 if use_cmake else asserts) self.do_run(open(path_from_root('tests', 'third_party', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(), [open(path_from_root('tests', 'bullet', 'output.txt')).read(), # different roundings open(path_from_root('tests', 'bullet', 'output2.txt')).read(), open(path_from_root('tests', 'bullet', 'output3.txt')).read(), open(path_from_root('tests', 'bullet', 'output4.txt')).read()], libraries=self.get_bullet_library(use_cmake), includes=[path_from_root('tests', 'third_party', 'bullet', 'src')]) @no_asan('issues with freetype itself') @needs_make('depends on freetype') @is_slow_test def test_poppler(self): def test(): pdf_data = open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read() create_test_file('paper.pdf.js', str(list(bytearray(pdf_data)))) create_test_file('pre.js', ''' Module.preRun = function() { FS.createDataFile('/', 'paper.pdf', eval(read_('paper.pdf.js')), true, false, false); }; Module.postRun = function() { var FileData = MEMFS.getFileDataAsRegularArray(FS.root.contents['filename-1.ppm']); out("Data: " + JSON.stringify(FileData.map(function(x) { return unSign(x, 8) }))); }; ''') self.emcc_args += ['--pre-js', 'pre.js'] ppm_data = str(list(bytearray(open(path_from_root('tests', 'poppler', 'ref.ppm'), 'rb').read()))) self.do_run('', ppm_data.replace(' ', ''), libraries=self.get_poppler_library(), args=['-scale-to', '512', 'paper.pdf', 'filename']) test() if self.supports_js_dfe(): print("Testing poppler with ELIMINATE_DUPLICATE_FUNCTIONS set to 1", file=sys.stderr) num_original_funcs = self.count_funcs('src.cpp.o.js') self.set_setting('ELIMINATE_DUPLICATE_FUNCTIONS', 1) test() # Make sure that DFE ends up eliminating more than 200 functions (if we can view source) assert (num_original_funcs - self.count_funcs('src.cpp.o.js')) > 200 @needs_make('make') @is_slow_test def test_openjpeg(self): if '-fsanitize=address' in self.emcc_args: self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024) def line_splitter(data): out = '' counter = 0 for ch in data: out += ch if ch == ' ' and counter > 60: out += '\n' counter = 0 else: counter += 1 return out # remove -g, so we have one test without it by default self.emcc_args = [x for x in self.emcc_args if x != '-g'] original_j2k = path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k') image_bytes = list(bytearray(open(original_j2k, 'rb').read())) create_test_file('pre.js', """ Module.preRun = function() { FS.createDataFile('/', 'image.j2k', %s, true, false, false); }; Module.postRun = function() { out('Data: ' + JSON.stringify(MEMFS.getFileDataAsRegularArray(FS.analyzePath('image.raw').object))); }; """ % line_splitter(str(image_bytes))) shutil.copy(path_from_root('tests', 'third_party', 'openjpeg', 'opj_config.h'), self.get_dir()) lib = self.get_library(os.path.join('third_party', 'openjpeg'), [os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')), os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')), os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')), os.path.join('bin', 'libopenjpeg.a')], configure=['cmake', '.'], # configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'], make_args=[]) # no -j 2, since parallel builds can fail # We use doubles in JS, so we get slightly different values than native code. So we # check our output by comparing the average pixel difference def image_compare(output, err): # Get the image generated by JS, from the JSON.stringify'd array m = re.search(r'\[[\d, -]*\]', output) self.assertIsNotNone(m, 'Failed to find proper image output in: ' + output) # Evaluate the output as a python array js_data = eval(m.group(0)) js_data = [x if x >= 0 else 256 + x for x in js_data] # Our output may be signed, so unsign it # Get the correct output true_data = bytearray(open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read()) # Compare them assert(len(js_data) == len(true_data)) num = len(js_data) diff_total = js_total = true_total = 0 for i in range(num): js_total += js_data[i] true_total += true_data[i] diff_total += abs(js_data[i] - true_data[i]) js_mean = js_total / float(num) true_mean = true_total / float(num) diff_mean = diff_total / float(num) image_mean = 83.265 # print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']' assert abs(js_mean - image_mean) < 0.01, [js_mean, image_mean] assert abs(true_mean - image_mean) < 0.01, [true_mean, image_mean] assert diff_mean < 0.01, diff_mean return output self.emcc_args += ['--minify', '0'] # to compare the versions self.emcc_args += ['--pre-js', 'pre.js'] def do_test(): self.do_run(open(path_from_root('tests', 'third_party', 'openjpeg', 'codec', 'j2k_to_image.c')).read(), 'Successfully generated', # The real test for valid output is in image_compare '-i image.j2k -o image.raw'.split(' '), libraries=lib, includes=[path_from_root('tests', 'third_party', 'openjpeg', 'libopenjpeg'), path_from_root('tests', 'third_party', 'openjpeg', 'codec'), path_from_root('tests', 'third_party', 'openjpeg', 'common'), os.path.join(self.get_build_dir(), 'openjpeg')], force_c=True, assert_returncode=0, output_nicerizer=image_compare) do_test() # extra testing if self.get_setting('ALLOW_MEMORY_GROWTH') == 1: print('no memory growth', file=sys.stderr) self.set_setting('ALLOW_MEMORY_GROWTH', 0) do_test() @no_wasm_backend("uses bitcode compiled with asmjs, and we don't have unified triples") def test_python(self): self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', 1) # The python build contains several undefined symbols self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) bitcode = path_from_root('tests', 'third_party', 'python', 'python.bc') pyscript = dedent('''\ print '***' print "hello python world!" print [x*2 for x in range(4)] t=2 print 10-3-t print (lambda x: x*2)(11) print '%f' % 5.47 print {1: 2}.keys() print '***' ''') pyoutput = '***\nhello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000\n[1]\n***' for lto in [0, 1]: print('lto:', lto) if lto == 1: self.emcc_args += ['--llvm-lto', '1'] self.do_run_object(bitcode, pyoutput, args=['-S', '-c', pyscript]) def test_lifetime(self): self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n') if '-O1' in self.emcc_args or '-O2' in self.emcc_args: # lifetime stuff and their vars must be culled self.assertNotContained('a18', open('lifetime.ll.o.js').read()) # Test cases in separate files. Note that these files may contain invalid .ll! # They are only valid enough for us to read for test purposes, not for llvm-as # to process. @no_wasm_backend("uses bitcode compiled with asmjs, and we don't have unified triples") @is_slow_test def test_zzz_cases(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) # These tests don't end up linking with libc due to a behaviour in emcc # where the llvm-link step is skipped when the input is a single # object file. Since most of them `printf` (which comes from JS) but # depends on `strlen` (which comes from musl) these tests almost all # have an undefined `strlen`, which happens to not get called. # TODO(sbc): Remove the special case from emcc what bypasses llvm-link # and then remove this line? self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) self.set_setting('WARN_ON_UNDEFINED_SYMBOLS', 0) self.emcc_args.remove('-Werror') emcc_args = self.emcc_args # The following tests link to libc, whereas others link with -nostdlib needs_stdlib = [ 'muli33_ta2', 'philoop_ta2', 'uadd_overflow_64_ta2', 'i64toi8star', 'legalizer_ta2', 'quotedlabel', 'alignedunaligned', 'sillybitcast', 'invokeundef', 'loadbitcastgep', 'sillybitcast2', 'legalizer_b_ta2', 'emptystruct', 'entry3', 'atomicrmw_i64', 'atomicrmw_b_i64', 'invoke_byval', 'i24_ce_fastcomp', ] need_no_error_on_undefined_symbols = [ 'unsanitized_declare' ] skip_tests = [ # invalid ir 'aliasbitcast', 'structparam', 'issue_39', 'phinonexist', 'oob_ta2', 'phiself', 'invokebitcast', # pnacl limitations in ExpandStructRegs 'structphiparam', 'callwithstructural_ta2', 'callwithstructural64_ta2', 'structinparam', # pnacl limitations in ExpandGetElementPtr '2xi40', # current fastcomp limitations FIXME 'quoted', # assumes malloc exists in JS 'llvm_assume', 'longjmp_tiny', 'longjmp_tiny_invoke', 'longjmp_tiny_invoke_phi', 'longjmp_tiny_keepem', 'longjmp_tiny_keepem_cond', 'longjmp_tiny_phi', 'longjmp_tiny_phi2', ] skip_wasm = [ # casts a function pointer from (i32, i32)* to (i64)*, which happens to work in asm.js but is a general function pointer undefined behavior 'call_inttoptr_i64', ] names = glob.glob(path_from_root('tests', 'cases', '*.ll')) names.sort() for name in names: shortname = os.path.splitext(name)[0] # TODO: test only worked in non-fastcomp (well, these cases) basename = os.path.basename(shortname) if basename in skip_tests: continue if self.is_wasm() and basename in skip_wasm: continue if '_noasm' in shortname and self.get_setting('ASM_JS'): print('case "%s" not relevant for asm.js' % shortname) continue if basename in need_no_error_on_undefined_symbols: self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) print("Testing case '%s'..." % basename) output_file = path_from_root('tests', 'cases', shortname + '.txt') if os.path.exists(output_file): output = open(output_file).read() else: output = 'hello, world!' if output.rstrip() != 'skip': self.emcc_args = list(emcc_args) if basename in needs_stdlib: self.set_setting('FILESYSTEM', 1) else: self.emcc_args.append('-nostdlib') # no libc is linked in; with FILESYSTEM=0 we have a chance at printfing anyhow self.set_setting('FILESYSTEM', 0) if os.path.exists(shortname + '.emcc'): self.emcc_args += json.loads(open(shortname + '.emcc').read()) self.do_ll_run(path_from_root('tests', 'cases', name), output, assert_returncode=None) # Optional source checking, a python script that gets a global generated with the source src_checker = path_from_root('tests', 'cases', shortname + '.py') if os.path.exists(src_checker): generated = open('src.cpp.o.js').read() # noqa exec(open(src_checker).read()) @no_asan('call stack exceeded on some versions of node') @is_slow_test def test_fuzz(self): self.emcc_args += ['-I' + path_from_root('tests', 'fuzz', 'include'), '-w'] skip_lto_tests = [ # LLVM LTO bug '19.c', '18.cpp', # puts exists before LTO, but is not used; LTO cleans it out, but then creates uses to it (printf=>puts) XXX https://llvm.org/bugs/show_bug.cgi?id=23814 '23.cpp' ] def run_all(x): print(x) for name in sorted(glob.glob(path_from_root('tests', 'fuzz', '*.c')) + glob.glob(path_from_root('tests', 'fuzz', '*.cpp'))): # if os.path.basename(name) != '4.c': # continue if 'newfail' in name: continue if os.path.basename(name).startswith('temp_fuzzcode'): continue # pnacl legalization issue, see https://code.google.com/p/nativeclient/issues/detail?id=4027 if x == 'lto' and self.run_name in ['default', 'asm2f'] and os.path.basename(name) in ['8.c']: continue if x == 'lto' and self.run_name == 'default' and os.path.basename(name) in skip_lto_tests: continue if x == 'lto' and os.path.basename(name) in ['21.c']: continue # LLVM LTO bug print(name) if name.endswith('.cpp'): self.emcc_args.append('-std=c++03') self.do_run(open(path_from_root('tests', 'fuzz', name)).read(), open(path_from_root('tests', 'fuzz', name + '.txt')).read(), force_c=name.endswith('.c'), assert_returncode=None) if name.endswith('.cpp'): self.emcc_args.remove('-std=c++03') run_all('normal') self.emcc_args += ['--llvm-lto', '1'] run_all('lto') def test_autodebug_bitcode(self): if self.is_wasm_backend() and '-flto' not in self.get_emcc_args(): return self.skipTest('must use bitcode object files for bitcode autodebug') self.emcc_args += ['--llvm-opts', '0'] # Autodebug the code def do_autodebug(filename): building.llvm_dis(filename + '.o', filename + '.ll') run_process([PYTHON, AUTODEBUGGER, filename + '.ll', filename + '.auto.ll']) # rebuild .bc # TODO: use code in do_autodebug_post for this self.prep_ll_file(filename, filename + '.auto.ll', force_recompile=True) # Run a test that should work, generating some code test_path = path_from_root('tests', 'core', 'test_structs') src = test_path + '.c' output = test_path + '.out' # Add an ll hook, to force ll generation self.do_run_from_file(src, output, build_ll_hook=lambda x: False) filename = 'src.c' do_autodebug(filename) # Compare to each other, and to expected output self.do_ll_run(filename + '.auto.ll', 'AD:-1,1') # Test using build_ll_hook src = ''' #include <stdio.h> char cache[256], *next = cache; int main() { cache[10] = 25; next[20] = 51; int x = cache[10]; double y = 11.52; printf("*%d,%d,%.2f*\\n", x, cache[20], y); return 0; } ''' self.do_run(src, 'AD:-1,1', build_ll_hook=do_autodebug) @no_asan('autodebug logging interferes with asan') @no_fastcomp('autodebugging wasm is only supported in the wasm backend') @with_env_modify({'EMCC_AUTODEBUG': '1'}) @also_with_impure_standalone_wasm def test_autodebug_wasm(self): # Autodebug does not work with too much shadow memory. # Memory consumed by autodebug depends on the size of the WASM linear memory. # With a large shadow memory, the JS engine runs out of memory. if '-fsanitize=address' in self.emcc_args: self.set_setting('ASAN_SHADOW_SIZE', 16 * 1024 * 1024) # test that the program both works and also emits some of the logging # (but without the specific output, as it is logging the actual locals # used and so forth, which will change between opt modes and updates of # llvm etc.) def check(out, err): for msg in ['log_execution', 'get_i32', 'set_i32', 'load_ptr', 'load_val', 'store_ptr', 'store_val']: self.assertIn(msg, out) return out + err self.do_run(open(path_from_root('tests', 'core', 'test_autodebug.c')).read(), 'success', output_nicerizer=check) ### Integration tests @sync def test_ccall(self): self.emcc_args.append('-Wno-return-stack-address') self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap']) create_test_file('post.js', ''' out('*'); var ret; ret = Module['ccall']('get_int', 'number'); out([typeof ret, ret].join(',')); ret = ccall('get_float', 'number'); out([typeof ret, ret.toFixed(2)].join(',')); ret = ccall('get_bool', 'boolean'); out([typeof ret, ret].join(',')); ret = ccall('get_string', 'string'); out([typeof ret, ret].join(',')); ret = ccall('print_int', null, ['number'], [12]); out(typeof ret); ret = ccall('print_float', null, ['number'], [14.56]); out(typeof ret); ret = ccall('print_bool', null, ['boolean'], [true]); out(typeof ret); ret = ccall('print_string', null, ['string'], ["cheez"]); out(typeof ret); ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); out(typeof ret); // JS array ret = ccall('print_string', null, ['array'], [new Uint8Array([97, 114, 114, 45, 97, 121, 0])]); out(typeof ret); // typed array ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); out([typeof ret, ret].join(',')); var p = ccall('malloc', 'pointer', ['number'], [4]); setValue(p, 650, 'i32'); ret = ccall('pointer', 'pointer', ['pointer'], [p]); out([typeof ret, getValue(ret, 'i32')].join(',')); out('*'); // part 2: cwrap var noThirdParam = Module['cwrap']('get_int', 'number'); out(noThirdParam()); var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']); out(multi(2, 1.4, 3, 'atr')); out(multi(8, 5.4, 4, 'bret')); out('*'); // part 3: avoid stack explosion and check it's restored correctly for (var i = 0; i < TOTAL_STACK/60; i++) { ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']); } out('stack is ok.'); ccall('call_ccall_again', null); ''') self.emcc_args += ['--post-js', 'post.js'] self.set_setting('EXPORTED_FUNCTIONS', ['_get_int', '_get_float', '_get_bool', '_get_string', '_print_int', '_print_float', '_print_bool', '_print_string', '_multi', '_pointer', '_call_ccall_again', '_malloc']) self.do_run_in_out_file_test('tests', 'core', 'test_ccall') if '-O2' in self.emcc_args and '-g' not in self.emcc_args: print('with closure') self.emcc_args += ['--closure', '1'] self.do_run_in_out_file_test('tests', 'core', 'test_ccall') def test_EXTRA_EXPORTED_RUNTIME_METHODS(self): self.do_run_in_out_file_test('tests', 'core', 'EXTRA_EXPORTED_RUNTIME_METHODS') # test dyncall (and other runtime methods in support.js) can be exported self.emcc_args += ['-DEXPORTED'] self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['dynCall', 'addFunction', 'lengthBytesUTF8', 'getTempRet0', 'setTempRet0']) self.do_run_in_out_file_test('tests', 'core', 'EXTRA_EXPORTED_RUNTIME_METHODS') @no_fastcomp('fails mysteriously on fastcomp (dynCall_viji is not defined); ignored, because fastcomp is deprecated') @no_minimal_runtime('MINIMAL_RUNTIME does not blindly export all symbols to Module to save code size') def test_dyncall_specific(self): emcc_args = self.emcc_args[:] for which, exported_runtime_methods in [ ('DIRECT', []), ('EXPORTED', []), ('FROM_OUTSIDE', ['dynCall_viji']) ]: print(which) self.emcc_args = emcc_args + ['-D' + which] self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', exported_runtime_methods) self.do_run_in_out_file_test('tests', 'core', 'dyncall_specific') def test_getValue_setValue(self): # these used to be exported, but no longer are by default def test(output_prefix='', args=[]): old = self.emcc_args[:] self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'getValue_setValue.cpp')).read(), open(path_from_root('tests', 'core', 'getValue_setValue' + output_prefix + '.txt')).read(), assert_returncode=None) self.emcc_args = old # see that direct usage (not on module) works. we don't export, but the use # keeps it alive through JSDCE test(args=['-DDIRECT']) # see that with assertions, we get a nice error message self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', []) self.set_setting('ASSERTIONS', 1) test('_assert') self.set_setting('ASSERTIONS', 0) # see that when we export them, things work on the module self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue']) test() def test_FS_exports(self): # these used to be exported, but no longer are by default for use_files in (0, 1): print(use_files) def test(output_prefix='', args=[], assert_returncode=None): if use_files: args += ['-DUSE_FILES'] print(args) old = self.emcc_args[:] self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'FS_exports.cpp')).read(), (open(path_from_root('tests', 'core', 'FS_exports' + output_prefix + '.txt')).read(), open(path_from_root('tests', 'core', 'FS_exports' + output_prefix + '_2.txt')).read()), assert_returncode=assert_returncode) self.emcc_args = old # see that direct usage (not on module) works. we don't export, but the use # keeps it alive through JSDCE test(args=['-DDIRECT', '-s', 'FORCE_FILESYSTEM=1']) # see that with assertions, we get a nice error message self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', []) self.set_setting('ASSERTIONS', 1) test('_assert', assert_returncode=None) self.set_setting('ASSERTIONS', 0) # see that when we export them, things work on the module self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['FS_createDataFile']) test(args=['-s', 'FORCE_FILESYSTEM=1']) def test_legacy_exported_runtime_numbers(self): # these used to be exported, but no longer are by default def test(output_prefix='', args=[]): old = self.emcc_args[:] self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'legacy_exported_runtime_numbers.cpp')).read(), open(path_from_root('tests', 'core', 'legacy_exported_runtime_numbers' + output_prefix + '.txt')).read(), assert_returncode=None) self.emcc_args = old # see that direct usage (not on module) works. we don't export, but the use # keeps it alive through JSDCE test(args=['-DDIRECT']) # see that with assertions, we get a nice error message self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', []) self.set_setting('ASSERTIONS', 1) test('_assert') self.set_setting('ASSERTIONS', 0) # see that when we export them, things work on the module self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['ALLOC_DYNAMIC']) test() @no_wasm_backend('DEAD_FUNCTIONS elimination is done by the JSOptimizer') def test_dead_functions(self): src = r''' #include <stdio.h> extern "C" { __attribute__((noinline)) int unused(int x) { volatile int y = x; return y; } } int main(int argc, char **argv) { printf("*%d*\n", argc > 1 ? unused(1) : 2); return 0; } ''' # Sanity check that it works and the dead function is emitted self.do_run(src, '*1*', args=['x']) js = open('src.cpp.o.js').read() if self.run_name in ['default', 'asm2g']: assert 'function _unused($' in js self.do_run(None, '*2*', no_build=True) # Kill off the dead function, and check a code path using it aborts self.set_setting('DEAD_FUNCTIONS', ['_unused']) self.do_run(src, '*2*') self.do_run(None, 'abort(', args=['x'], no_build=True, assert_returncode=None) # Kill off a library function, check code aborts self.set_setting('DEAD_FUNCTIONS', ['_printf']) self.do_run(src, 'abort(', assert_returncode=None) self.do_run(None, 'abort(', args=['x'], no_build=True, assert_returncode=None) def test_response_file(self): response_data = '-o %s/response_file.js %s' % (self.get_dir(), path_from_root('tests', 'hello_world.cpp')) create_test_file('rsp_file', response_data.replace('\\', '\\\\')) run_process([EMCC, "@rsp_file"] + self.get_emcc_args()) self.do_run('response_file.js', 'hello, world', no_build=True) self.assertContained('response file not found: foo.txt', self.expect_fail([EMCC, '@foo.txt'])) def test_linker_response_file(self): objfile = 'response_file.o' run_process([EMCC, '-c', path_from_root('tests', 'hello_world.cpp'), '-o', objfile] + self.get_emcc_args()) # This should expand into -Wl,--start-group <objfile> -Wl,--end-group response_data = '--start-group ' + objfile + ' --end-group' create_test_file('rsp_file', response_data.replace('\\', '\\\\')) run_process([EMCC, "-Wl,@rsp_file", '-o', 'response_file.o.js'] + self.get_emcc_args()) self.do_run('response_file.o.js', 'hello, world', no_build=True) def test_exported_response(self): src = r''' #include <stdio.h> #include <stdlib.h> #include <emscripten.h> extern "C" { int other_function() { return 5; } } int main() { int x = EM_ASM_INT({ return Module._other_function() }); emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite. printf("waka %d!\n", x); return 0; } ''' create_test_file('exps', '["_main","_other_function"]') self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@exps'] self.do_run(src, '''waka 5!''') assert 'other_function' in open('src.cpp.o.js').read() def test_large_exported_response(self): src = r''' #include <stdio.h> #include <stdlib.h> #include <emscripten.h> extern "C" { ''' js_funcs = [] num_exports = 5000 count = 0 while count < num_exports: src += 'int exported_func_from_response_file_%d () { return %d;}\n' % (count, count) js_funcs.append('_exported_func_from_response_file_%d' % count) count += 1 src += r''' } int main() { int x = EM_ASM_INT({ return Module._exported_func_from_response_file_4999() }); emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite. printf("waka %d!\n", x); return 0; } ''' js_funcs.append('_main') exported_func_json_file = 'large_exported_response.json' create_test_file(exported_func_json_file, json.dumps(js_funcs)) self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@' + exported_func_json_file] self.do_run(src, '''waka 4999!''') assert '_exported_func_from_response_file_1' in open('src.cpp.o.js').read() @sync def test_add_function(self): self.set_setting('INVOKE_RUN', 0) self.set_setting('RESERVED_FUNCTION_POINTERS', 1) self.set_setting('EXPORTED_RUNTIME_METHODS', ['callMain']) src = path_from_root('tests', 'interop', 'test_add_function.cpp') post_js = path_from_root('tests', 'interop', 'test_add_function_post.js') self.emcc_args += ['--post-js', post_js] print('basics') self.do_run_in_out_file_test('tests', 'interop', 'test_add_function') if '-g' not in self.emcc_args and not self.is_wasm_backend(): print('with --closure') old = list(self.emcc_args) self.emcc_args += ['--closure', '1'] self.do_run_in_out_file_test('tests', 'interop', 'test_add_function') self.emcc_args = old print(old) print('with ALIASING_FUNCTION_POINTERS') self.set_setting('ALIASING_FUNCTION_POINTERS', 1) self.do_run_in_out_file_test('tests', 'interop', 'test_add_function') self.clear_setting('ALIASING_FUNCTION_POINTERS') print('with RESERVED_FUNCTION_POINTERS=0') self.set_setting('RESERVED_FUNCTION_POINTERS', 0) if self.is_wasm_backend(): self.do_run(open(src).read(), 'Unable to grow wasm table', assert_returncode=None) print('- with table growth') self.set_setting('ALLOW_TABLE_GROWTH', 1) self.emcc_args += ['-DGROWTH'] # enable costly assertions to verify correct table behavior self.set_setting('ASSERTIONS', 2) self.do_run_in_out_file_test('tests', 'interop', 'test_add_function') else: self.do_run(open(src).read(), 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.', assert_returncode=None) self.assertNotContained('jsCall_', open('src.cpp.o.js').read()) if not self.get_setting('WASM') and not self.is_wasm_backend(): # with emulation, we don't need to reserve, except with wasm where # we still do. print('- with function pointer emulation') self.set_setting('EMULATED_FUNCTION_POINTERS', 1) self.do_run_in_out_file_test('tests', 'interop', 'test_add_function') def test_getFuncWrapper_sig_alias(self): src = r''' #include <stdio.h> #include <emscripten.h> void func1(int a) { printf("func1\n"); } void func2(int a, int b) { printf("func2\n"); } int main() { EM_ASM({ getFuncWrapper($0, 'vi')(0); getFuncWrapper($1, 'vii')(0, 0); }, func1, func2); return 0; } ''' self.do_run(src, 'func1\nfunc2\n') def test_emulate_function_pointer_casts(self): self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', 1) self.do_run(open(path_from_root('tests', 'core', 'test_emulate_function_pointer_casts.cpp')).read(), ('|1.266,1|', # asm.js, double <-> int '|1.266,1413754136|')) # wasm, reinterpret the bits @no_wasm2js('TODO: nicely printed names in wasm2js') @parameterized({ 'normal': ([],), 'noexcept': (['-fno-exceptions'],) }) def test_demangle_stacks(self, extra_args): self.emcc_args += extra_args self.set_setting('DEMANGLE_SUPPORT', 1) self.set_setting('ASSERTIONS', 1) # ensure function names are preserved self.emcc_args += ['--profiling-funcs', '--llvm-opts', '0'] self.do_run_in_out_file_test('tests', 'core', 'test_demangle_stacks', assert_returncode=None) if not self.has_changed_setting('ASSERTIONS'): print('without assertions, the stack is not printed, but a message suggesting assertions is') self.set_setting('ASSERTIONS', 0) self.do_run_in_out_file_test('tests', 'core', 'test_demangle_stacks_noassert', assert_returncode=None) def test_demangle_stacks_symbol_map(self): self.set_setting('DEMANGLE_SUPPORT', 1) if '-O' in str(self.emcc_args) and '-O0' not in self.emcc_args and '-O1' not in self.emcc_args and '-g' not in self.emcc_args: self.emcc_args += ['--llvm-opts', '0'] else: self.skipTest("without opts, we don't emit a symbol map") self.emcc_args += ['--emit-symbol-map'] self.do_run(open(path_from_root('tests', 'core', 'test_demangle_stacks.cpp')).read(), 'abort', assert_returncode=None) # make sure the shortened name is the right one full_aborter = None short_aborter = None for line in open('src.cpp.o.js.symbols').readlines(): if ':' not in line: continue # split by the first ':' (wasm backend demangling may include more :'s later on) short, full = line.split(':', 1) if 'Aborter' in full: short_aborter = short full_aborter = full self.assertIsNotNone(full_aborter) self.assertIsNotNone(short_aborter) print('full:', full_aborter, 'short:', short_aborter) if SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]): output = run_js('src.cpp.o.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None) # we may see the full one, if -g, or the short one if not if ' ' + short_aborter + ' ' not in output and ' ' + full_aborter + ' ' not in output: # stack traces may also be ' name ' or 'name@' etc if '\n' + short_aborter + ' ' not in output and '\n' + full_aborter + ' ' not in output and 'wasm-function[' + short_aborter + ']' not in output: if '\n' + short_aborter + '@' not in output and '\n' + full_aborter + '@' not in output: self.assertContained(' ' + short_aborter + ' ' + '\n' + ' ' + full_aborter + ' ', output) def test_tracing(self): self.emcc_args += ['--tracing'] self.do_run_in_out_file_test('tests', 'core', 'test_tracing') @no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/9527') def test_eval_ctors(self): if '-O2' not in str(self.emcc_args) or '-O1' in str(self.emcc_args): self.skipTest('need js optimizations') if not self.get_setting('WASM'): self.skipTest('this test uses wasm binaries') orig_args = self.emcc_args print('leave printf in ctor') self.emcc_args = orig_args + ['-s', 'EVAL_CTORS=1'] self.do_run(r''' #include <stdio.h> struct C { C() { printf("constructing!\n"); } // don't remove this! }; C c; int main() {} ''', "constructing!\n") def get_code_size(): if self.is_wasm(): # Use number of functions as a for code size return self.count_wasm_contents('src.cpp.o.wasm', 'funcs') else: return os.path.getsize('src.cpp.o.js') def get_mem_size(): if self.is_wasm(): # Use number of functions as a for code size return self.count_wasm_contents('src.cpp.o.wasm', 'memory-data') if self.uses_memory_init_file(): return os.path.getsize('src.cpp.o.js.mem') # otherwise we ignore memory size return 0 def do_test(test): self.emcc_args = orig_args + ['-s', 'EVAL_CTORS=1'] test() ec_code_size = get_code_size() ec_mem_size = get_mem_size() self.emcc_args = orig_args test() code_size = get_code_size() mem_size = get_mem_size() if mem_size: print('mem: ', mem_size, '=>', ec_mem_size) self.assertGreater(ec_mem_size, mem_size) print('code:', code_size, '=>', ec_code_size) self.assertLess(ec_code_size, code_size) print('remove ctor of just assigns to memory') def test1(): self.do_run(r''' #include <stdio.h> struct C { int x; C() { volatile int y = 10; y++; x = y; } }; C c; int main() { printf("x: %d\n", c.x); } ''', "x: 11\n") do_test(test1) if self.is_wasm_backend(): # The wasm backend currently exports a single initalizer so the ctor # evaluation is all or nothing. As well as that it doesn't currently # do DCE of libcxx symbols (because the are marked as visibility(defaault) # and because of that we end up not being able to eval ctors unless all # libcxx constrcutors can be eval'd return print('libcxx - remove 2 ctors from iostream code') src = open(path_from_root('tests', 'hello_libcxx.cpp')).read() output = 'hello, world!' def test2(): self.do_run(src, output) do_test(test2) print('assertions too') self.set_setting('ASSERTIONS', 1) self.do_run(src, output) self.set_setting('ASSERTIONS', 0) print('remove just some, leave others') def test3(): self.do_run(r''' #include <iostream> #include <string> class std_string { public: std_string(): ptr(nullptr) { std::cout << "std_string()\n"; } std_string(const char* s): ptr(s) { std::cout << "std_string(const char* s)" << std::endl; } std_string(const std_string& s): ptr(s.ptr) { std::cout << "std_string(const std_string& s) " << std::endl; } const char* data() const { return ptr; } private: const char* ptr; }; const std_string txtTestString("212121\0"); const std::string s2text("someweirdtext"); int main() { std::cout << s2text << std::endl; std::cout << txtTestString.data() << std::endl; std::cout << txtTestString.data() << std::endl; return 0; } ''', '''std_string(const char* s) someweirdtext 212121 212121 ''') # noqa do_test(test3) def test_embind(self): self.emcc_args += ['--bind'] src = r''' #include <stdio.h> #include <emscripten/val.h> using namespace emscripten; int main() { val Math = val::global("Math"); // two ways to call Math.abs printf("abs(-10): %d\n", Math.call<int>("abs", -10)); printf("abs(-11): %d\n", Math["abs"](-11).as<int>()); return 0; } ''' self.do_run(src, 'abs(-10): 10\nabs(-11): 11') def test_embind_2(self): self.emcc_args += ['--bind', '--post-js', 'post.js'] create_test_file('post.js', ''' function printLerp() { out('lerp ' + Module.lerp(100, 200, 66) + '.'); } ''') src = r''' #include <stdio.h> #include <emscripten.h> #include <emscripten/bind.h> using namespace emscripten; int lerp(int a, int b, int t) { return (100 - t) * a + t * b; } EMSCRIPTEN_BINDINGS(my_module) { function("lerp", &lerp); } int main(int argc, char **argv) { EM_ASM(printLerp()); return 0; } ''' self.do_run(src, 'lerp 166') def test_embind_3(self): self.emcc_args += ['--bind', '--post-js', 'post.js'] create_test_file('post.js', ''' function ready() { try { Module.compute(new Uint8Array([1,2,3])); } catch(e) { out(e); } } ''') src = r''' #include <emscripten.h> #include <emscripten/bind.h> using namespace emscripten; int compute(int array[]) { return 0; } EMSCRIPTEN_BINDINGS(my_module) { function("compute", &compute, allow_raw_pointers()); } int main(int argc, char **argv) { EM_ASM(ready()); return 0; } ''' self.do_run(src, 'UnboundTypeError: Cannot call compute due to unbound types: Pi') @no_wasm_backend('long doubles are f128s in wasm backend') def test_embind_4(self): self.emcc_args += ['--bind', '--post-js', 'post.js'] create_test_file('post.js', ''' function printFirstElement() { out(Module.getBufferView()[0]); } ''') src = r''' #include <emscripten.h> #include <emscripten/bind.h> #include <emscripten/val.h> #include <stdio.h> using namespace emscripten; const size_t kBufferSize = 1024; long double buffer[kBufferSize]; val getBufferView(void) { val v = val(typed_memory_view(kBufferSize, buffer)); return v; } EMSCRIPTEN_BINDINGS(my_module) { function("getBufferView", &getBufferView); } int main(int argc, char **argv) { buffer[0] = 107; EM_ASM(printFirstElement()); return 0; } ''' self.do_run(src, '107') def test_embind_5(self): self.emcc_args += ['--bind'] self.do_run_in_out_file_test('tests', 'core', 'test_embind_5') def test_embind_custom_marshal(self): self.emcc_args += ['--bind', '--pre-js', path_from_root('tests', 'embind', 'test_custom_marshal.js')] self.do_run_in_out_file_test('tests', 'embind', 'test_custom_marshal', assert_identical=True) def test_embind_float_constants(self): self.emcc_args += ['--bind'] self.do_run_from_file(path_from_root('tests', 'embind', 'test_float_constants.cpp'), path_from_root('tests', 'embind', 'test_float_constants.out')) def test_embind_negative_constants(self): self.emcc_args += ['--bind'] self.do_run_from_file(path_from_root('tests', 'embind', 'test_negative_constants.cpp'), path_from_root('tests', 'embind', 'test_negative_constants.out')) def test_embind_unsigned(self): self.emcc_args += ['--bind'] self.do_run_from_file(path_from_root('tests', 'embind', 'test_unsigned.cpp'), path_from_root('tests', 'embind', 'test_unsigned.out')) @no_asan('FIXME #11158') def test_embind_val(self): self.emcc_args += ['--bind'] self.do_run_from_file(path_from_root('tests', 'embind', 'test_val.cpp'), path_from_root('tests', 'embind', 'test_val.out')) def test_embind_no_rtti(self): create_test_file('pre.js', ''' Module = {}; Module['postRun'] = function() { out("dotest retured: " + Module.dotest()); }; ''') src = r''' #include <emscripten/bind.h> #include <emscripten/val.h> #include <stdio.h> int main(int argc, char** argv){ printf("418\n"); return 0; } int test() { return 42; } EMSCRIPTEN_BINDINGS(my_module) { emscripten::function("dotest", &test); } ''' self.emcc_args += ['--bind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0', '--pre-js', 'pre.js'] self.do_run(src, '418\ndotest retured: 42\n') def test_embind_no_rtti_followed_by_rtti(self): create_test_file('pre.js', ''' Module = {}; Module['postRun'] = function() { out("dotest retured: " + Module.dotest()); }; ''') src = r''' #include <emscripten/bind.h> #include <emscripten/val.h> #include <stdio.h> int main(int argc, char** argv){ printf("418\n"); return 0; } int test() { return 42; } EMSCRIPTEN_BINDINGS(my_module) { emscripten::function("dotest", &test); } ''' self.emcc_args += ['--bind', '-fno-rtti', '-frtti', '--pre-js', 'pre.js'] self.do_run(src, '418\ndotest retured: 42\n') @sync def test_webidl(self): if self.run_name == 'asm2': self.emcc_args += ['--closure', '1', '-g1'] # extra testing # avoid closure minified names competing with our test code in the global name space self.set_setting('MODULARIZE', 1) def do_test_in_mode(mode, allow_memory_growth): print('testing mode', mode, ', memory growth =', allow_memory_growth) # Force IDL checks mode os.environ['IDL_CHECKS'] = mode run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'), path_from_root('tests', 'webidl', 'test.idl'), 'glue']) self.assertExists('glue.cpp') self.assertExists('glue.js') # Export things on "TheModule". This matches the typical use pattern of the bound library # being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it). create_test_file('export.js', ''' // test purposes: remove printErr output, whose order is unpredictable when compared to print err = err = function(){}; ''') self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=["_malloc"]', '--post-js', 'glue.js', '--post-js', 'export.js'] if allow_memory_growth: self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH', '-Wno-almost-asm'] shutil.copyfile(path_from_root('tests', 'webidl', 'test.h'), 'test.h') shutil.copyfile(path_from_root('tests', 'webidl', 'test.cpp'), 'test.cpp') src = open('test.cpp').read() def post(filename): with open(filename, 'a') as f: f.write('\n\n') if self.run_name == 'asm2': f.write('var TheModule = Module();\n') else: f.write('var TheModule = Module;\n') f.write('\n\n') if allow_memory_growth: f.write("var isMemoryGrowthAllowed = true;") else: f.write("var isMemoryGrowthAllowed = false;") f.write(open(path_from_root('tests', 'webidl', 'post.js')).read()) f.write('\n\n') output = open(path_from_root('tests', 'webidl', "output_%s.txt" % mode)).read() self.do_run(src, output, post_build=post, output_nicerizer=(lambda out, err: out)) do_test_in_mode('ALL', False) do_test_in_mode('FAST', False) do_test_in_mode('DEFAULT', False) do_test_in_mode('ALL', True) ### Tests for tools @no_wasm2js('TODO: source maps in wasm2js') def test_source_map(self): if '-g' not in self.emcc_args: self.emcc_args.append('-g') src = ''' #include <stdio.h> #include <assert.h> __attribute__((noinline)) int foo() { printf("hi"); // line 6 return 1; // line 7 } int main() { printf("%d", foo()); // line 11 return 0; // line 12 } ''' create_test_file('src.cpp', src) out_filename = 'a.out.js' wasm_filename = 'a.out.wasm' no_maps_filename = 'no-maps.out.js' assert '-g4' not in self.emcc_args building.emcc('src.cpp', self.serialize_settings() + self.emcc_args + self.emcc_args, out_filename) # the file name may find its way into the generated code, so make sure we # can do an apples-to-apples comparison by compiling with the same file name shutil.move(out_filename, no_maps_filename) with open(no_maps_filename) as f: no_maps_file = f.read() no_maps_file = re.sub(' *//[@#].*$', '', no_maps_file, flags=re.MULTILINE) self.emcc_args.append('-g4') building.emcc(os.path.abspath('src.cpp'), self.serialize_settings() + self.emcc_args + self.emcc_args, out_filename, stderr=PIPE) map_referent = out_filename if not self.get_setting('WASM') else wasm_filename # after removing the @line and @sourceMappingURL comments, the build # result should be identical to the non-source-mapped debug version. # this is worth checking because the parser AST swaps strings for token # objects when generating source maps, so we want to make sure the # optimizer can deal with both types. map_filename = map_referent + '.map' def encode_utf8(data): if isinstance(data, dict): for key in data: data[key] = encode_utf8(data[key]) return data elif isinstance(data, list): for i in range(len(data)): data[i] = encode_utf8(data[i]) return data elif isinstance(data, type(u'')): return data.encode('utf8') else: return data def source_map_file_loc(name): if shared.Settings.WASM_BACKEND: return name # in fastcomp, we have the absolute path, which is not good return os.path.abspath(name) data = json.load(open(map_filename)) if str is bytes: # Python 2 compatibility data = encode_utf8(data) if hasattr(data, 'file'): # the file attribute is optional, but if it is present it needs to refer # the output file. self.assertPathsIdentical(map_referent, data['file']) assert len(data['sources']) == 1, data['sources'] self.assertPathsIdentical(source_map_file_loc('src.cpp'), data['sources'][0]) if hasattr(data, 'sourcesContent'): # the sourcesContent attribute is optional, but if it is present it # needs to containt valid source text. self.assertTextDataIdentical(src, data['sourcesContent'][0]) mappings = json.loads(jsrun.run_js( path_from_root('tools', 'source-maps', 'sourcemap2json.js'), shared.NODE_JS, [map_filename])) if str is bytes: # Python 2 compatibility mappings = encode_utf8(mappings) seen_lines = set() for m in mappings: self.assertPathsIdentical(source_map_file_loc('src.cpp'), m['source']) seen_lines.add(m['originalLine']) # ensure that all the 'meaningful' lines in the original code get mapped # when optimizing, the binaryen optimizer may remove some of them (by inlining, etc.) if is_optimizing(self.emcc_args): assert seen_lines.issuperset([11, 12]), seen_lines else: assert seen_lines.issuperset([6, 7, 11, 12]), seen_lines @no_wasm2js('TODO: source maps in wasm2js') @no_fastcomp('DWARF is only supported in upstream') def test_dwarf(self): self.emcc_args.append('-g') create_test_file('src.cpp', ''' #include <emscripten.h> EM_JS(int, out_to_js, (int x), {}) void foo() { out_to_js(0); // line 5 out_to_js(1); // line 6 out_to_js(2); // line 7 // A silly possible recursion to avoid binaryen doing any inlining. if (out_to_js(3)) foo(); } int main() { foo(); } ''') js_filename = 'a.out.js' wasm_filename = 'a.out.wasm' building.emcc('src.cpp', self.serialize_settings() + self.emcc_args, js_filename) LLVM_DWARFDUMP = os.path.join(LLVM_ROOT, 'llvm-dwarfdump') out = run_process([LLVM_DWARFDUMP, wasm_filename, '-all'], stdout=PIPE).stdout # parse the sections sections = {} curr_section_name = '' curr_section_body = '' def add_section(): if curr_section_name: sections[curr_section_name] = curr_section_body for line in out.splitlines(): if ' contents:' in line: # a new section, a line like ".debug_str contents:" add_section() curr_section_name = line.split(' ')[0] curr_section_body = '' else: # possibly a line in a section if curr_section_name: curr_section_body += line + '\n' add_section() # make sure the right sections exist self.assertIn('.debug_abbrev', sections) self.assertIn('.debug_info', sections) self.assertIn('.debug_line', sections) self.assertIn('.debug_str', sections) self.assertIn('.debug_ranges', sections) # verify some content in the sections self.assertIn('"src.cpp"', sections['.debug_info']) # the line section looks like this: # Address Line Column File ISA Discriminator Flags # ------------------ ------ ------ ------ --- ------------- ------------- # 0x000000000000000b 5 0 3 0 0 is_stmt src_to_addr = {} for line in sections['.debug_line'].splitlines(): if line.startswith('0x'): while ' ' in line: line = line.replace(' ', ' ') addr, line, col = line.split(' ')[:3] key = (int(line), int(col)) src_to_addr.setdefault(key, []).append(addr) # each of the calls must remain in the binary, and be mapped self.assertIn((5, 9), src_to_addr) self.assertIn((6, 9), src_to_addr) self.assertIn((7, 9), src_to_addr) def get_dwarf_addr(line, col): addrs = src_to_addr[(line, col)] assert len(addrs) == 1, 'we assume the simple calls have one address' return int(addrs[0], 0) # the lines must appear in sequence (as calls to JS, the optimizer cannot # reorder them) self.assertLess(get_dwarf_addr(5, 9), get_dwarf_addr(6, 9)) self.assertLess(get_dwarf_addr(6, 9), get_dwarf_addr(7, 9)) # get the wat, printing with -g which has binary offsets wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_filename, '-g', '--print'], stdout=PIPE).stdout # we expect to see a pattern like this, as in both debug and opt builds # there isn't much that can change with such calls to JS (they can't be # reordered or anything else): # # ;; code offset: 0x? # (drop # ;; code offset: 0x? # (call $out_to_js # ;; code offset: 0x? # (local.get ?) or (i32.const ?) # ) # ) # # In stacky stream of instructions form, it is # local.get or i32.const # call $out_to_js # drop # get_wat_addr gets the address of one of the 3 interesting calls, by its # index (0,1,2). def get_wat_addr(call_index): # find the call_index-th call call_loc = -1 for i in range(call_index + 1): call_loc = wat.find('call $out_to_js', call_loc + 1) assert call_loc > 0 # the call begins with the local.get/i32.const printed below it, which is # the first instruction in the stream, so it has the lowest address start_addr_loc = wat.find('0x', call_loc) assert start_addr_loc > 0 start_addr_loc_end = wat.find('\n', start_addr_loc) start_addr = int(wat[start_addr_loc:start_addr_loc_end], 0) # the call ends with the drop, which is the last in the stream, at the # highest address end_addr_loc = wat.rfind('drop', 0, call_loc) assert end_addr_loc > 0 end_addr_loc = wat.rfind('0x', 0, end_addr_loc) assert end_addr_loc > 0 end_addr_loc_end = wat.find('\n', end_addr_loc) assert end_addr_loc_end > 0 end_addr = int(wat[end_addr_loc:end_addr_loc_end], 0) return (start_addr, end_addr) # match up the DWARF and the wat for i in range(3): dwarf_addr = get_dwarf_addr(5 + i, 9) start_wat_addr, end_wat_addr = get_wat_addr(i) # the dwarf may match any of the 3 instructions that form the stream of # of instructions implementing the call in the source code, in theory self.assertLessEqual(start_wat_addr, dwarf_addr) self.assertLessEqual(dwarf_addr, end_wat_addr) def test_modularize_closure_pre(self): # test that the combination of modularize + closure + pre-js works. in that mode, # closure should not minify the Module object in a way that the pre-js cannot use it. self.emcc_args += [ '--pre-js', path_from_root('tests', 'core', 'modularize_closure_pre.js'), '--closure', '1', '-g1', '-s', 'MODULARIZE=1', ] def post(filename): with open(filename, 'a') as f: f.write('\n\n') f.write('var TheModule = Module();\n') self.do_run_in_out_file_test('tests', 'core', 'modularize_closure_pre', post_build=post) @no_wasm('wasmifying destroys debug info and stack tracability') @no_wasm2js('source maps support') def test_emscripten_log(self): self.banned_js_engines = [V8_ENGINE] # v8 doesn't support console.log self.emcc_args += ['-s', 'DEMANGLE_SUPPORT=1'] if self.get_setting('ASM_JS'): # XXX Does not work in SpiderMonkey since callstacks cannot be captured when running in asm.js, see https://bugzilla.mozilla.org/show_bug.cgi?id=947996 self.banned_js_engines += [SPIDERMONKEY_ENGINE] if '-g' not in self.emcc_args: self.emcc_args.append('-g') self.emcc_args += ['-DRUN_FROM_JS_SHELL'] self.do_run(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read(), '''test print 123 12.345679 9.123457 1.353180 12345678 9123456 1353179 12.345679 9123456 1353179 12345678 9.123457 1353179 12345678 9123456 1.353180 12345678 9.123457 1.353180 12.345679 9123456 1.353180 12.345679 9.123457 1353179 Success! ''') # test closure compiler as well if self.run_name == 'asm2': print('closure') self.emcc_args += ['--closure', '1', '-g1'] # extra testing self.do_run_in_out_file_test('tests', 'emscripten_log', 'emscripten_log_with_closure') def test_float_literals(self): self.do_run_in_out_file_test('tests', 'test_float_literals') def test_exit_status(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) src = r''' #include <stdio.h> #include <stdlib.h> static void cleanup() { printf("cleanup\n"); } int main() { atexit(cleanup); // this atexit should still be called printf("hello, world!\n"); // Unusual exit status to make sure it's working! if (CAPITAL_EXIT) { _Exit(118); } else { exit(118); } } ''' create_test_file('pre.js', ''' Module.preInit = function() { addOnExit(function () { out('I see exit status: ' + EXITSTATUS); }); } ''') self.emcc_args += ['--pre-js', 'pre.js'] self.do_run(src.replace('CAPITAL_EXIT', '0'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=None) self.do_run(src.replace('CAPITAL_EXIT', '1'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=None) def test_noexitruntime(self): src = r''' #include <emscripten.h> #include <stdio.h> static int testPre = TEST_PRE; struct Global { Global() { printf("in Global()\n"); if (testPre) { EM_ASM(noExitRuntime = true;); } } ~Global() { printf("ERROR: in ~Global()\n"); } } global; int main() { if (!testPre) { EM_ASM(noExitRuntime = true;); } printf("in main()\n"); } ''' self.do_run(src.replace('TEST_PRE', '0'), 'in Global()\nin main()') self.do_run(src.replace('TEST_PRE', '1'), 'in Global()\nin main()') def test_minmax(self): self.do_run(open(path_from_root('tests', 'test_minmax.c')).read(), 'NAN != NAN\nSuccess!') def test_locale(self): self.do_run_from_file(path_from_root('tests', 'test_locale.c'), path_from_root('tests', 'test_locale.out')) def test_vswprintf_utf8(self): self.do_run_from_file(path_from_root('tests', 'vswprintf_utf8.c'), path_from_root('tests', 'vswprintf_utf8.out')) @no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO') def test_async(self): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) self.banned_js_engines = [SPIDERMONKEY_ENGINE, V8_ENGINE] # needs setTimeout which only node has if self.is_wasm_backend(): self.set_setting('ASYNCIFY', 1) else: self.skipTest('fastcomp Asyncify was removed') src = r''' #include <stdio.h> #include <emscripten.h> void f(void *p) { *(int*)p = 99; printf("!"); } int main() { int i = 0; printf("Hello"); emscripten_async_call(f, &i, 1); printf("World"); emscripten_sleep(100); printf("%d\n", i); } ''' self.do_run(src, 'HelloWorld!99') if self.is_wasm_backend(): print('check bad ccall use') src = r''' #include <stdio.h> #include <emscripten.h> int main() { printf("Hello"); emscripten_sleep(100); printf("World\n"); } ''' self.set_setting('ASSERTIONS', 1) self.set_setting('INVOKE_RUN', 0) create_test_file('pre.js', ''' Module['onRuntimeInitialized'] = function() { try { ccall('main', 'number', ['number', 'string'], [2, 'waka']); var never = true; } catch(e) { out(e); assert(!never); } }; ''') self.emcc_args += ['--pre-js', 'pre.js'] self.do_run(src, 'The call to main is running asynchronously.') print('check reasonable ccall use') src = r''' #include <stdio.h> #include <emscripten.h> int main() { printf("Hello"); emscripten_sleep(100); printf("World\n"); } ''' create_test_file('pre.js', ''' Module['onRuntimeInitialized'] = function() { ccall('main', null, ['number', 'string'], [2, 'waka'], { async: true }); }; ''') self.do_run(src, 'HelloWorld') print('check ccall promise') self.set_setting('EXPORTED_FUNCTIONS', ['_stringf', '_floatf']) src = r''' #include <stdio.h> #include <emscripten.h> extern "C" { const char* stringf(char* param) { emscripten_sleep(20); printf(param); return "second"; } double floatf() { emscripten_sleep(20); emscripten_sleep(20); return 6.4; } } ''' create_test_file('pre.js', r''' Module['onRuntimeInitialized'] = function() { ccall('stringf', 'string', ['string'], ['first\n'], { async: true }) .then(function(val) { console.log(val); ccall('floatf', 'number', null, null, { async: true }).then(console.log); }); }; ''') self.do_run(src, 'first\nsecond\n6.4') @no_wasm_backend('ASYNCIFY coroutines are not yet supported in the LLVM wasm backend') def do_test_coroutine(self, additional_settings): # needs to flush stdio streams self.set_setting('EXIT_RUNTIME', 1) src = open(path_from_root('tests', 'test_coroutines.cpp')).read() for (k, v) in additional_settings.items(): self.set_setting(k, v) self.do_run(src, '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*') @no_wasm_backend('ASYNCIFY coroutines are not yet supported in the LLVM wasm backend') @no_fastcomp('ASYNCIFY has been removed from fastcomp') def test_coroutine_asyncify(self): self.do_test_coroutine({'ASYNCIFY': 1}) @no_asan('asyncify stack operations confuse asan') @no_fastcomp('Fibers are not implemented for fastcomp') def test_fibers_asyncify(self): self.set_setting('ASYNCIFY', 1) src = open(path_from_root('tests', 'test_fibers.cpp')).read() self.do_run(src, '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*') @no_wasm_backend('ASYNCIFY is not supported in the LLVM wasm backend') @no_fastcomp('ASYNCIFY has been removed from fastcomp') def test_asyncify_unused(self): # test a program not using asyncify, but the pref is set self.set_setting('ASYNCIFY', 1) self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') @parameterized({ 'normal': ([], True), 'blacklist_a': (['-s', 'ASYNCIFY_BLACKLIST=["foo(int, double)"]'], False), 'blacklist_b': (['-s', 'ASYNCIFY_BLACKLIST=["bar()"]'], True), 'blacklist_c': (['-s', 'ASYNCIFY_BLACKLIST=["baz()"]'], False), 'whitelist_a': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()","bar()"]'], True), 'whitelist_b': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'], True), 'whitelist_c': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz"]'], False), 'whitelist_d': (['-s', 'ASYNCIFY_WHITELIST=["foo(int, double)","baz()","c_baz","Structy::funcy()"]'], False), 'whitelist_b_response': ([], True, '["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'), 'whitelist_c_response': ([], False, '["main","__original_main","foo(int, double)","baz()","c_baz"]'), }) @no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO') @no_fastcomp('new asyncify only') def test_asyncify_lists(self, args, should_pass, response=None): if response is not None: create_test_file('response.file', response) self.emcc_args += ['-s', 'ASYNCIFY_WHITELIST=@response.file'] self.set_setting('ASYNCIFY', 1) self.emcc_args += args try: self.do_run_in_out_file_test('tests', 'core', 'test_asyncify_lists', assert_identical=True) if not should_pass: should_pass = True raise Exception('should not have passed') except Exception: if should_pass: raise @no_asan('asyncify stack operations confuse asan') @no_fastcomp('wasm-backend specific feature') def test_emscripten_scan_registers(self): self.set_setting('ASYNCIFY', 1) self.do_run_in_out_file_test('tests', 'core', 'emscripten_scan_registers') @no_fastcomp('wasm-backend specific feature') def test_asyncify_assertions(self): self.set_setting('ASYNCIFY', 1) self.set_setting('ASYNCIFY_IMPORTS', ['suspend']) self.set_setting('ASSERTIONS', 1) self.do_run_in_out_file_test('tests', 'core', 'asyncify_assertions') @no_asan('asyncify stack operations confuse asan') @no_fastcomp('wasm-backend specific feature') @no_wasm2js('TODO: lazy loading in wasm2js') @parameterized({ 'conditional': (True,), 'unconditional': (False,), }) def test_emscripten_lazy_load_code(self, conditional): self.set_setting('ASYNCIFY', 1) self.set_setting('ASYNCIFY_LAZY_LOAD_CODE', 1) self.set_setting('ASYNCIFY_IGNORE_INDIRECT', 1) self.set_setting('MALLOC', 'emmalloc') self.emcc_args += ['--profiling-funcs'] # so that we can find the functions for the changes below if conditional: self.emcc_args += ['-DCONDITIONAL'] self.do_run_in_out_file_test('tests', 'core', 'emscripten_lazy_load_code', args=['0']) first_size = os.path.getsize('src.cpp.o.wasm') second_size = os.path.getsize('src.cpp.o.wasm.lazy.wasm') print('first wasm size', first_size) print('second wasm size', second_size) if not conditional and is_optimizing(self.emcc_args): # If the call to lazy-load is unconditional, then the optimizer can dce # out more than half self.assertLess(first_size, 0.5 * second_size) with open('src.cpp.o.wasm', 'rb') as f: with open('src.cpp.o.wasm.lazy.wasm', 'rb') as g: self.assertNotEqual(f.read(), g.read()) # attempts to "break" the wasm by adding an unreachable in $foo_end. returns whether we found it. def break_wasm(name): wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), name], stdout=PIPE).stdout lines = wat.splitlines() wat = None for i in range(len(lines)): if '(func $foo_end ' in lines[i]: j = i + 1 while '(local ' in lines[j]: j += 1 # we found the first line after the local defs lines[j] = '(unreachable)' + lines[j] wat = '\n'.join(lines) break if wat is None: # $foo_end is not present in the wasm, nothing to break shutil.copyfile(name, name + '.orig') return False with open('wat.wat', 'w') as f: f.write(wat) shutil.move(name, name + '.orig') run_process([os.path.join(building.get_binaryen_bin(), 'wasm-as'), 'wat.wat', '-o', name, '-g']) return True def verify_working(args=['0']): self.assertContained('foo_end', run_js('src.cpp.o.js', args=args)) def verify_broken(args=['0']): self.assertNotContained('foo_end', run_js('src.cpp.o.js', args=args, stderr=STDOUT, assert_returncode=None)) # the first-loaded wasm will not reach the second call, since we call it after lazy-loading. # verify that by changing the first wasm to throw in that function found_foo_end = break_wasm('src.cpp.o.wasm') if not conditional and is_optimizing(self.emcc_args): self.assertFalse(found_foo_end, 'should have optimizd out $foo_end') verify_working() # but breaking the second wasm actually breaks us break_wasm('src.cpp.o.wasm.lazy.wasm') verify_broken() # restore shutil.copyfile('src.cpp.o.wasm.orig', 'src.cpp.o.wasm') shutil.copyfile('src.cpp.o.wasm.lazy.wasm.orig', 'src.cpp.o.wasm.lazy.wasm') verify_working() if conditional: # if we do not call the lazy load function, then we do not need the lazy wasm, # and we do the second call in the first wasm os.remove('src.cpp.o.wasm.lazy.wasm') verify_broken() verify_working(['42']) break_wasm('src.cpp.o.wasm') verify_broken() # Test basic wasm2js functionality in all core compilation modes. @no_fastcomp('wasm-backend specific feature') @no_asan('no wasm2js support yet in asan') def test_wasm2js(self): if self.get_setting('WASM') == 0: self.skipTest('redundant to test wasm2js in wasm2js* mode') self.set_setting('WASM', 0) self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') # a mem init file is emitted just like with JS expect_memory_init_file = self.uses_memory_init_file() see_memory_init_file = os.path.exists('src.c.o.js.mem') assert expect_memory_init_file == see_memory_init_file, 'memory init file expectation wrong: %s' % expect_memory_init_file if see_memory_init_file: with open('src.c.o.js.mem', 'rb') as f: self.assertTrue(f.read()[-1] != b'\0') @no_fastcomp('wasm-backend specific feature') @no_asan('no wasm2js support yet in asan') def test_maybe_wasm2js(self): if self.get_setting('WASM') == 0: self.skipTest('redundant to test wasm2js in wasm2js* mode') self.set_setting('MAYBE_WASM2JS', 1) # see that running as wasm works self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') # run wasm2js, bundle the code, and use the wasm2js path cmd = [PYTHON, path_from_root('tools', 'maybe_wasm2js.py'), 'src.c.o.js', 'src.c.o.wasm'] if is_optimizing(self.emcc_args): cmd += ['-O2'] run_process(cmd, stdout=open('do_wasm2js.js', 'w')).stdout # remove the wasm to make sure we never use it again os.remove('src.c.o.wasm') # verify that it runs self.assertContained('hello, world!', run_js('do_wasm2js.js')) @no_fastcomp('wasm-backend specific feature') @no_asan('no wasm2js support yet in asan') def test_wasm2js_fallback(self): if self.get_setting('WASM') == 0: self.skipTest('redundant to test wasm2js in wasm2js* mode') for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]: cmd = [EMCC, path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2'] + args run_process(cmd) # First run with WebAssembly support enabled # Move the Wasm2js fallback away to test it is not accidentally getting loaded. os.rename('a.out.wasm.js', 'a.out.wasm.js.unused') self.assertContained('hello!', run_js('a.out.js')) os.rename('a.out.wasm.js.unused', 'a.out.wasm.js') # Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback. open('b.out.js', 'w').write('WebAssembly = undefined;\n' + open('a.out.js', 'r').read()) os.remove('a.out.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded. self.assertContained('hello!', run_js('b.out.js')) def test_cxx_self_assign(self): # See https://github.com/emscripten-core/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735 self.do_run(r''' #include <map> #include <stdio.h> int main() { std::map<int, int> m; m[0] = 1; m = m; // size should still be one after self assignment if (m.size() == 1) { printf("ok.\n"); } } ''', 'ok.') def test_memprof_requirements(self): # This test checks for the global variables required to run the memory # profiler. It would fail if these variables were made no longer global # or if their identifiers were changed. create_test_file('main.cpp', ''' extern "C" { void check_memprof_requirements(); } int main() { check_memprof_requirements(); return 0; } ''') create_test_file('lib.js', ''' mergeInto(LibraryManager.library, { check_memprof_requirements: function() { if (typeof STATIC_BASE === 'number' && typeof STACK_BASE === 'number' && typeof STACK_MAX === 'number' && typeof STACKTOP === 'number' && typeof DYNAMIC_BASE === 'number' && typeof DYNAMICTOP_PTR === 'number') { out('able to run memprof'); } else { out('missing the required variables to run memprof'); } } }); ''') self.emcc_args += ['--js-library', 'lib.js'] self.do_run(open('main.cpp').read(), 'able to run memprof') def test_fs_dict(self): self.set_setting('FORCE_FILESYSTEM', 1) self.emcc_args += ['-lidbfs.js'] self.emcc_args += ['-lnodefs.js'] create_test_file('pre.js', ''' Module = {}; Module['preRun'] = function() { out(typeof FS.filesystems['MEMFS']); out(typeof FS.filesystems['IDBFS']); out(typeof FS.filesystems['NODEFS']); // Globals console.log(typeof MEMFS); console.log(typeof IDBFS); console.log(typeof NODEFS); }; ''') self.emcc_args += ['--pre-js', 'pre.js'] self.do_run('int main() { return 0; }', 'object\nobject\nobject\nobject\nobject\nobject') def test_fs_dict_none(self): # if IDBFS and NODEFS are not enabled, they are not present. self.set_setting('FORCE_FILESYSTEM', 1) self.set_setting('ASSERTIONS', 1) create_test_file('pre.js', ''' Module = {}; Module['preRun'] = function() { out(typeof FS.filesystems['MEMFS']); out(typeof FS.filesystems['IDBFS']); out(typeof FS.filesystems['NODEFS']); // Globals if (ASSERTIONS) { console.log(typeof MEMFS); console.log(IDBFS); console.log(NODEFS); FS.mkdir('/working1'); try { FS.mount(IDBFS, {}, '/working1'); } catch (e) { console.log('|' + e + '|'); } } }; ''') self.emcc_args += ['--pre-js', 'pre.js'] expected = '''\ object undefined undefined object IDBFS is no longer included by default; build with -lidbfs.js NODEFS is no longer included by default; build with -lnodefs.js |IDBFS is no longer included by default; build with -lidbfs.js|''' self.do_run('int main() { return 0; }', expected) @sync @no_wasm_backend("https://github.com/emscripten-core/emscripten/issues/9039") def test_stack_overflow_check(self): args = self.emcc_args + ['-s', 'TOTAL_STACK=1048576'] self.emcc_args = args + ['-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'ASSERTIONS=0'] self.do_run(open(path_from_root('tests', 'stack_overflow.cpp')).read(), 'Stack overflow! Attempted to allocate', assert_returncode=None) self.emcc_args = args + ['-s', 'ASSERTIONS=1'] self.do_run(open(path_from_root('tests', 'stack_overflow.cpp')).read(), 'Stack overflow! Attempted to allocate', assert_returncode=None) @no_wasm_backend('uses BINARYEN_TRAP_MODE (the wasm backend only supports non-trapping)') def test_binaryen_trap_mode(self): if not self.is_wasm(): self.skipTest('wasm test') TRAP_OUTPUTS = ('trap', 'RuntimeError') default = 'allow' print('default is', default) for mode in ['js', 'clamp', 'allow', '']: if mode == 'js' and self.is_wasm_backend(): # wasm backend does not use asm2wasm imports, which js trap mode requires continue print('mode:', mode) self.set_setting('BINARYEN_TRAP_MODE', mode or default) if not mode: mode = default print(' idiv') self.do_run(open(path_from_root('tests', 'wasm', 'trap-idiv.cpp')).read(), { 'js': '|0|', 'clamp': '|0|', 'allow': TRAP_OUTPUTS }[mode], assert_returncode=None) print(' f2i') self.do_run(open(path_from_root('tests', 'wasm', 'trap-f2i.cpp')).read(), { 'js': '|1337|\n|4294967295|', # JS did an fmod 2^32 | normal 'clamp': '|-2147483648|\n|4294967295|', 'allow': TRAP_OUTPUTS }[mode], assert_returncode=None) @also_with_standalone_wasm def test_sbrk(self): self.do_run(open(path_from_root('tests', 'sbrk_brk.cpp')).read(), 'OK.') def test_brk(self): self.emcc_args += ['-DTEST_BRK=1'] self.do_run(open(path_from_root('tests', 'sbrk_brk.cpp')).read(), 'OK.') # Tests that we can use the dlmalloc mallinfo() function to obtain information # about malloc()ed blocks and compute how much memory is used/freed. @no_asan('mallinfo is not part of ASan malloc') def test_mallinfo(self): self.do_run(open(path_from_root('tests', 'mallinfo.cpp')).read(), 'OK.') @no_asan('cannot replace malloc/free with ASan') def test_wrap_malloc(self): self.do_run(open(path_from_root('tests', 'wrap_malloc.cpp')).read(), 'OK.') def test_environment(self): self.set_setting('ASSERTIONS', 1) def test(): self.do_run_in_out_file_test('tests', 'core', 'test_hello_world', assert_returncode=None) js = open('src.c.o.js').read() assert ('require(' in js) == ('node' in self.get_setting('ENVIRONMENT')), 'we should have require() calls only if node js specified' for engine in JS_ENGINES: print(engine) # set us to test in just this engine self.banned_js_engines = [e for e in JS_ENGINES if e != engine] # tell the compiler to build with just that engine if engine == NODE_JS: right = 'node' wrong = 'shell' else: right = 'shell' wrong = 'node' # test with the right env self.set_setting('ENVIRONMENT', right) print('ENVIRONMENT =', self.get_setting('ENVIRONMENT')) test() # test with the wrong env self.set_setting('ENVIRONMENT', wrong) print('ENVIRONMENT =', self.get_setting('ENVIRONMENT')) try: test() raise Exception('unexpected success') except Exception as e: self.assertContained('not compiled for this environment', str(e)) # test with a combined env self.set_setting('ENVIRONMENT', right + ',' + wrong) print('ENVIRONMENT =', self.get_setting('ENVIRONMENT')) test() def test_dfe(self): if not self.supports_js_dfe(): self.skipTest('dfe-only') self.set_setting('ELIMINATE_DUPLICATE_FUNCTIONS', 1) self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') self.emcc_args += ['-g2'] # test for issue #6331 self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') def test_postrun_exception(self): # verify that an exception thrown in postRun() will not trigger the # compilation failed handler, and will be printed to stderr. self.add_post_run('ThisFunctionDoesNotExist()') src = open(path_from_root('tests', 'core', 'test_hello_world.c')).read() self.build(src, self.get_dir(), 'src.c') output = run_js('src.c.o.js', assert_returncode=None, stderr=STDOUT) self.assertNotContained('failed to asynchronously prepare wasm', output) self.assertContained('hello, world!', output) self.assertContained('ThisFunctionDoesNotExist is not defined', output) # Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works def test_no_declare_asm_module_exports(self): self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0) self.set_setting('WASM_ASYNC_COMPILATION', 0) self.maybe_closure() self.do_run(open(path_from_root('tests', 'declare_asm_module_exports.cpp')).read(), 'jsFunction: 1') js = open('src.cpp.o.js').read() occurances = js.count('cFunction') if is_optimizing(self.emcc_args) and '-g' not in self.emcc_args: # In optimized builds only the single reference cFunction that exists in the EM_ASM should exist if self.is_wasm(): self.assertEqual(occurances, 1) else: # With js the asm module itself also contains a reference for the cFunction name self.assertEqual(occurances, 2) else: print(occurances) # Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works def test_minimal_runtime_no_declare_asm_module_exports(self): self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0) self.set_setting('WASM_ASYNC_COMPILATION', 0) self.maybe_closure() self.set_setting('MINIMAL_RUNTIME', 1) self.do_run(open(path_from_root('tests', 'declare_asm_module_exports.cpp')).read(), 'jsFunction: 1') # Tests that -s MINIMAL_RUNTIME=1 works well in different build modes @parameterized({ 'default': ([],), 'streaming': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1'],), 'streaming_inst': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1'],), 'no_export': (['-s', 'DECLARE_ASM_MODULE_EXPORTS=0'],) }) def test_minimal_runtime_hello_world(self, args): # TODO: Support for non-Node.js shells has not yet been added to MINIMAL_RUNTIME self.banned_js_engines = [V8_ENGINE, SPIDERMONKEY_ENGINE] self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1'] + args self.set_setting('MINIMAL_RUNTIME', 1) self.maybe_closure() self.do_run(open(path_from_root('tests', 'small_hello_world.c')).read(), 'hello') # Test that printf() works in MINIMAL_RUNTIME=1 @parameterized({ 'fs': (['-s', 'FORCE_FILESYSTEM=1'],), 'nofs': (['-s', 'NO_FILESYSTEM=1'],), }) def test_minimal_runtime_hello_printf(self, args): self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1'] + args self.maybe_closure() self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello, world!') # Tests that -s MINIMAL_RUNTIME=1 works well with SAFE_HEAP def test_minimal_runtime_safe_heap(self): self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1', '-s', 'SAFE_HEAP=1'] self.maybe_closure() self.do_run(open(path_from_root('tests', 'small_hello_world.c')).read(), 'hello') # Tests global initializer with -s MINIMAL_RUNTIME=1 def test_minimal_runtime_global_initializer(self): self.set_setting('MINIMAL_RUNTIME', 1) self.maybe_closure() self.do_run(open(path_from_root('tests', 'test_global_initializer.cpp')).read(), 't1 > t0: 1') @no_fastcomp('return address not supported on fastcomp') @no_optimize('return address test cannot work with optimizations') def test_return_address(self): self.emcc_args += ['-s', 'USE_OFFSET_CONVERTER'] self.do_run(open(path_from_root('tests', 'core', 'test_return_address.cpp')).read(), 'passed') @no_wasm2js('TODO: sanitizers in wasm2js') @no_fastcomp('ubsan not supported on fastcomp') @no_asan('-fsanitize-minimal-runtime cannot be used with ASan') def test_ubsan_minimal_too_many_errors(self): self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime'] if self.get_setting('WASM') == 0: if is_optimizing(self.emcc_args): self.skipTest('test can only be run without optimizations on asm.js') # Need to use `-g` to get proper line numbers in asm.js self.emcc_args += ['-g'] self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_minimal_too_many_errors.c')).read(), expected_output='ubsan: add-overflow\n' * 20 + 'ubsan: too many errors\n') @no_wasm2js('TODO: sanitizers in wasm2js') @no_fastcomp('ubsan not supported on fastcomp') @no_asan('-fsanitize-minimal-runtime cannot be used with ASan') def test_ubsan_minimal_errors_same_place(self): self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime'] if self.get_setting('WASM') == 0: if is_optimizing(self.emcc_args): self.skipTest('test can only be run without optimizations on asm.js') # Need to use `-g` to get proper line numbers in asm.js self.emcc_args += ['-g'] self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_minimal_errors_same_place.c')).read(), expected_output='ubsan: add-overflow\n' * 5) @parameterized({ 'fsanitize_undefined': (['-fsanitize=undefined'],), 'fsanitize_integer': (['-fsanitize=integer'],), 'fsanitize_overflow': (['-fsanitize=signed-integer-overflow'],), }) @no_fastcomp('ubsan not supported on fastcomp') @no_wasm2js('TODO: sanitizers in wasm2js') def test_ubsan_full_overflow(self, args): self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_overflow.c')).read(), assert_all=True, expected_output=[ "src.cpp:3:5: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'", "src.cpp:7:7: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'", ]) @parameterized({ 'fsanitize_undefined': (['-fsanitize=undefined'],), 'fsanitize_return': (['-fsanitize=return'],), }) @no_wasm2js('TODO: sanitizers in wasm2js') @no_fastcomp('ubsan not supported on fastcomp') def test_ubsan_full_no_return(self, args): self.emcc_args += ['-Wno-return-type'] + args self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_no_return.c')).read(), expected_output='src.cpp:1:5: runtime error: execution reached the end of a value-returning function without returning a value', assert_returncode=None) @parameterized({ 'fsanitize_undefined': (['-fsanitize=undefined'],), 'fsanitize_integer': (['-fsanitize=integer'],), 'fsanitize_shift': (['-fsanitize=shift'],), }) @no_fastcomp('ubsan not supported on fastcomp') @no_wasm2js('TODO: sanitizers in wasm2js') def test_ubsan_full_left_shift(self, args): self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_left_shift.c')).read(), assert_all=True, expected_output=[ 'src.cpp:3:5: runtime error: left shift of negative value -1', "src.cpp:7:5: runtime error: left shift of 16 by 29 places cannot be represented in type 'int'" ]) @parameterized({ 'fsanitize_undefined': (['-fsanitize=undefined'],), 'fsanitize_null': (['-fsanitize=null'],), }) @no_fastcomp('ubsan not supported on fastcomp') @no_wasm2js('TODO: sanitizers in wasm2js') def test_ubsan_full_null_ref(self, args): self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_null_ref.cpp')).read(), assert_all=True, expected_output=[ "src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'", "src.cpp:4:13: runtime error: reference binding to null pointer of type 'int'", "src.cpp:5:14: runtime error: reference binding to null pointer of type 'int'", ]) @parameterized({ 'fsanitize_undefined': (['-fsanitize=undefined'],), 'fsanitize_vptr': (['-fsanitize=vptr'],), }) @no_fastcomp('ubsan not supported on fastcomp') @no_wasm2js('TODO: sanitizers in wasm2js') def test_ubsan_full_static_cast(self, args): self.emcc_args += args self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_static_cast.cpp')).read(), assert_all=True, expected_output=[ "src.cpp:18:10: runtime error: downcast of address", "which does not point to an object of type 'R'", ]) @parameterized({ 'g': ('-g', [ "src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'", 'in main', ]), 'g4': ('-g4', [ "src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'", 'in main ', 'src.cpp:3:8' ]), }) @no_fastcomp('ubsan not supported on fastcomp') @no_wasm2js('TODO: sanitizers in wasm2js') def test_ubsan_full_stack_trace(self, g_flag, expected_output): self.emcc_args += ['-fsanitize=null', g_flag, '-s', 'ALLOW_MEMORY_GROWTH=1'] if g_flag == '-g4': if not self.get_setting('WASM'): self.skipTest('wasm2js has no source map support') elif '-Oz' in self.emcc_args: self.skipTest('-Oz breaks stack traces') def modify_env(filename): with open(filename) as f: contents = f.read() contents = 'Module = {UBSAN_OPTIONS: "print_stacktrace=1"};' + contents with open(filename, 'w') as f: f.write(contents) self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_null_ref.cpp')).read(), post_build=modify_env, assert_all=True, expected_output=expected_output) def test_template_class_deduction(self): self.emcc_args += ['-std=c++17'] self.do_run_in_out_file_test('tests', 'core', 'test_template_class_deduction') @parameterized({ 'c': ['test_asan_no_error.c'], 'cpp': ['test_asan_no_error.cpp'], }) @no_fastcomp('asan not supported on fastcomp') def test_asan_no_error(self, name): self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'] self.do_run(open(path_from_root('tests', 'core', name)).read(), basename=name, expected_output=[''], assert_returncode=None) # note: these tests have things like -fno-builtin-memset in order to avoid # clang optimizing things away. for example, a memset might be optimized into # stores, and then the stores identified as dead, which leaves nothing for # asan to test. here we want to test asan itself, so we work around that. @parameterized({ 'use_after_free_c': ('test_asan_use_after_free.c', [ 'AddressSanitizer: heap-use-after-free on address', ]), 'use_after_free_cpp': ('test_asan_use_after_free.cpp', [ 'AddressSanitizer: heap-use-after-free on address', ]), 'use_after_return': ('test_asan_use_after_return.c', [ 'AddressSanitizer: stack-use-after-return on address', ], ['-Wno-return-stack-address']), 'static_buffer_overflow': ('test_asan_static_buffer_overflow.c', [ 'AddressSanitizer: global-buffer-overflow on address', ], ['-fno-builtin-memset']), 'heap_buffer_overflow_c': ('test_asan_heap_buffer_overflow.c', [ 'AddressSanitizer: heap-buffer-overflow on address', ], ['-fno-builtin-memset']), 'heap_buffer_overflow_cpp': ('test_asan_heap_buffer_overflow.cpp', [ 'AddressSanitizer: heap-buffer-overflow on address', ], ['-fno-builtin-memset']), 'stack_buffer_overflow': ('test_asan_stack_buffer_overflow.c', [ 'AddressSanitizer: stack-buffer-overflow' ], ['-fno-builtin-memset']), 'stack_buffer_overflow_js': ('test_asan_stack_buffer_overflow_js.c', [ 'AddressSanitizer: stack-buffer-overflow' ], ['-fno-builtin-memset']), 'bitfield_unround_size': ('test_asan_bitfield_unround_size.c', [ 'AddressSanitizer: stack-buffer-overflow' ], ['-fno-builtin-memset']), 'bitfield_unround_offset': ('test_asan_bitfield_unround_offset.c', [ 'AddressSanitizer: stack-buffer-overflow' ], ['-fno-builtin-memset']), 'bitfield_round': ('test_asan_bitfield_round.c', [ 'AddressSanitizer: stack-buffer-overflow' ], ['-fno-builtin-memset']), 'memset_null': ('test_asan_memset_null.c', [ 'AddressSanitizer: null-pointer-dereference on address 0x00000001' ], ['-fno-builtin-memset']), 'memset_freed': ('test_asan_memset_freed.c', [ 'AddressSanitizer: heap-use-after-free on address' ], ['-fno-builtin-memset']), 'strcpy': ('test_asan_strcpy.c', [ 'AddressSanitizer: heap-buffer-overflow on address' ], ['-fno-builtin-strcpy']), 'memcpy': ('test_asan_memcpy.c', [ 'AddressSanitizer: heap-buffer-overflow on address' ], ['-fno-builtin-memcpy']), 'memchr': ('test_asan_memchr.c', [ 'AddressSanitizer: global-buffer-overflow on address' ], ['-fno-builtin-memchr']), 'vector': ('test_asan_vector.cpp', [ 'AddressSanitizer: container-overflow on address' ]), }) @no_fastcomp('asan not supported on fastcomp') def test_asan(self, name, expected_output, cflags=None): if '-Oz' in self.emcc_args: self.skipTest('-Oz breaks source maps') if not self.get_setting('WASM'): self.skipTest('wasm2js has no ASan support') self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'] if cflags: self.emcc_args += cflags self.do_run(open(path_from_root('tests', 'core', name)).read(), basename='src.c' if name.endswith('.c') else 'src.cpp', expected_output=expected_output, assert_all=True, check_for_error=False, assert_returncode=None) @no_wasm2js('TODO: ASAN in wasm2js') @no_fastcomp('asan not supported on fastcomp') def test_asan_js_stack_op(self): self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'] self.do_run(open(path_from_root('tests', 'core', 'test_asan_js_stack_op.c')).read(), basename='src.c', expected_output='Hello, World!') @no_fastcomp('WASM backend stack protection') def test_safe_stack(self): self.set_setting('STACK_OVERFLOW_CHECK', 2) self.set_setting('TOTAL_STACK', 65536) self.do_run(open(path_from_root('tests', 'core', 'test_safe_stack.c')).read(), expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None) @no_fastcomp('WASM backend stack protection') def test_safe_stack_alloca(self): self.set_setting('STACK_OVERFLOW_CHECK', 2) self.set_setting('TOTAL_STACK', 65536) self.do_run(open(path_from_root('tests', 'core', 'test_safe_stack_alloca.c')).read(), expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None) @needs_dlfcn @no_fastcomp('WASM backend stack protection') def test_safe_stack_dylink(self): self.set_setting('STACK_OVERFLOW_CHECK', 2) self.set_setting('TOTAL_STACK', 65536) self.dylink_test(r''' #include <stdio.h> extern void sidey(); int main() { sidey(); } ''', ''' #include <string.h> int f(int *b) { int a[64]; memset(b, 0, 2048 * sizeof(int)); return f(a); } void sidey() { int a[2048]; f(a); } ''', ['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None) def test_fpic_static(self): self.emcc_args.append('-fPIC') self.do_run_in_out_file_test('tests', 'core', 'test_hello_world') @node_pthreads def test_pthreads_create(self): def test(): self.do_run_in_out_file_test('tests', 'core', 'pthread', 'create') test() # with a pool, we can synchronously depend on workers being available self.set_setting('PTHREAD_POOL_SIZE', '2') self.emcc_args += ['-DPOOL'] test() @no_fastcomp('new wasm backend atomics') def test_emscripten_atomics_stub(self): self.do_run_in_out_file_test('tests', 'core', 'pthread', 'emscripten_atomics') @no_asan('incompatibility with atomics') @no_fastcomp('new wasm backend atomics') @node_pthreads def test_emscripten_atomics(self): self.set_setting('USE_PTHREADS', '1') self.do_run_in_out_file_test('tests', 'core', 'pthread', 'emscripten_atomics') # Tests the emscripten_get_exported_function() API. def test_emscripten_get_exported_function(self): # Could also test with -s ALLOW_TABLE_GROWTH=1 self.set_setting('RESERVED_FUNCTION_POINTERS', 2) self.emcc_args += ['-lexports.js'] self.do_run_in_out_file_test('tests', 'core', 'test_get_exported_function') # Tests the emscripten_get_exported_function() API. def test_minimal_runtime_emscripten_get_exported_function(self): # Could also test with -s ALLOW_TABLE_GROWTH=1 self.set_setting('RESERVED_FUNCTION_POINTERS', 2) self.emcc_args += ['-lexports.js', '-s', 'MINIMAL_RUNTIME=1'] self.do_run_in_out_file_test('tests', 'core', 'test_get_exported_function') # Marked as impure since the WASI reactor modules (modules without main) # are not yet suppored by the wasm engines we test against. @also_with_impure_standalone_wasm def test_undefined_main(self): # Traditionally in emscripten we allow main to be undefined. This allows programs with a main # and libraries without a main to be compiled identically. # However we are trying to move away from that model to a more explicit opt-out model. See: # https://github.com/emscripten-core/emscripten/issues/9640 if not self.get_setting('LLD_REPORT_UNDEFINED') and not self.get_setting('STRICT') and not self.get_setting('STANDALONE_WASM'): self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main') # Disabling IGNORE_MISSING_MAIN should cause link to fail due to missing main self.set_setting('IGNORE_MISSING_MAIN', 0) err = self.expect_fail([EMCC, path_from_root('tests', 'core', 'test_ctors_no_main.cpp')] + self.get_emcc_args()) self.assertContained('error: entry symbol not defined (pass --no-entry to suppress): main', err) # If we pass --no-entry or set EXPORTED_FUNCTIONS to empty should never see any errors self.emcc_args.append('--no-entry') self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main') self.emcc_args.remove('--no-entry') self.set_setting('EXPORTED_FUNCTIONS', []) self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main') # Tests the operation of API found in #include <emscripten/math.h> def test_emscripten_math(self): self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_math') # Generate tests for everything def make_run(name, emcc_args, settings=None, env=None): if env is None: env = {} if settings is None: settings = {} TT = type(name, (TestCoreBase,), dict(run_name=name, env=env, __module__=__name__)) # noqa def tearDown(self): try: super(TT, self).tearDown() finally: for k, v in self.env.items(): del os.environ[k] TT.tearDown = tearDown def setUp(self): super(TT, self).setUp() for k, v in self.env.items(): assert k not in os.environ, k + ' should not be in environment' os.environ[k] = v os.chdir(self.get_dir()) # Ensure the directory exists and go there for k, v in settings.items(): self.set_setting(k, v) self.emcc_args += emcc_args # avoid various compiler warnings in our test output self.emcc_args += [ '-Wno-dynamic-class-memaccess', '-Wno-format', '-Wno-format-extra-args', '-Wno-format-security', '-Wno-pointer-bool-conversion', '-Wno-unused-volatile-lvalue', '-Wno-c++11-compat-deprecated-writable-strings', '-Wno-invalid-pp-token', '-Wno-shift-negative-value' ] TT.setUp = setUp return TT # Main asm.js test modes if not shared.Settings.WASM_BACKEND: asm0 = make_run('asm0', emcc_args=[], settings={'ASM_JS': 2, 'WASM': 0}) asm2 = make_run('asm2', emcc_args=['-O2'], settings={'WASM': 0}) asm3 = make_run('asm3', emcc_args=['-O3'], settings={'WASM': 0}) asm2g = make_run('asm2g', emcc_args=['-O2', '-g'], settings={'WASM': 0, 'ASSERTIONS': 1, 'SAFE_HEAP': 1}) # Main wasm test modes wasm0 = make_run('wasm0', emcc_args=['-O0']) wasm0g = make_run('wasm0g', emcc_args=['-O0', '-g']) wasm1 = make_run('wasm1', emcc_args=['-O1']) wasm2 = make_run('wasm2', emcc_args=['-O2']) wasm2g = make_run('wasm2g', emcc_args=['-O2', '-g']) wasm3 = make_run('wasm3', emcc_args=['-O3']) wasms = make_run('wasms', emcc_args=['-Os']) wasmz = make_run('wasmz', emcc_args=['-Oz']) wasmlto0 = make_run('wasmlto0', emcc_args=['-flto', '-O0', '--llvm-lto', '1']) wasmlto1 = make_run('wasmlto1', emcc_args=['-flto', '-O1', '--llvm-lto', '1']) wasmlto2 = make_run('wasmlto2', emcc_args=['-flto', '-O2', '--llvm-lto', '1']) wasmlto3 = make_run('wasmlto3', emcc_args=['-flto', '-O3', '--llvm-lto', '1']) wasmltos = make_run('wasmltos', emcc_args=['-flto', '-Os', '--llvm-lto', '1']) wasmltoz = make_run('wasmltoz', emcc_args=['-flto', '-Oz', '--llvm-lto', '1']) if shared.Settings.WASM_BACKEND: wasm2js0 = make_run('wasm2js0', emcc_args=['-O0'], settings={'WASM': 0}) wasm2js1 = make_run('wasm2js1', emcc_args=['-O1'], settings={'WASM': 0}) wasm2js2 = make_run('wasm2js2', emcc_args=['-O2'], settings={'WASM': 0}) wasm2js3 = make_run('wasm2js3', emcc_args=['-O3'], settings={'WASM': 0}) wasm2jss = make_run('wasm2jss', emcc_args=['-Os'], settings={'WASM': 0}) wasm2jsz = make_run('wasm2jsz', emcc_args=['-Oz'], settings={'WASM': 0}) # Secondary test modes - run directly when there is a specific need # features simd2 = make_run('simd2', emcc_args=['-O2', '-msimd128']) bulkmem2 = make_run('bulkmem2', emcc_args=['-O2', '-mbulk-memory']) # asm.js asm2f = make_run('asm2f', emcc_args=['-Oz', '-Wno-almost-asm'], settings={'PRECISE_F32': 1, 'ALLOW_MEMORY_GROWTH': 1, 'WASM': 0}) asm2nn = make_run('asm2nn', emcc_args=['-O2'], settings={'WASM': 0}, env={'EMCC_NATIVE_OPTIMIZER': '0'}) # wasm wasm2s = make_run('wasm2s', emcc_args=['-O2'], settings={'SAFE_HEAP': 1}) wasm2ss = make_run('wasm2ss', emcc_args=['-O2'], settings={'STACK_OVERFLOW_CHECK': 2}) # Add DEFAULT_TO_CXX=0 strict = make_run('strict', emcc_args=[], settings={'STRICT': 1}) if shared.Settings.WASM_BACKEND: lsan = make_run('lsan', emcc_args=['-fsanitize=leak'], settings={'ALLOW_MEMORY_GROWTH': 1}) asan = make_run('asan', emcc_args=['-fsanitize=address'], settings={'ALLOW_MEMORY_GROWTH': 1, 'ASAN_SHADOW_SIZE': 128 * 1024 * 1024}) asani = make_run('asani', emcc_args=['-fsanitize=address', '--pre-js', os.path.join(os.path.dirname(__file__), 'asan-no-leak.js')], settings={'ALLOW_MEMORY_GROWTH': 1}) # Experimental modes (not tested by CI) lld = make_run('lld', emcc_args=[], settings={'LLD_REPORT_UNDEFINED': 1}) # TestCoreBase is just a shape for the specific subclasses, we don't test it itself del TestCoreBase # noqa
tests/test_core.py
313,518
Copyright 2013 The Emscripten Authors. All rights reserved. Emscripten is available under two separate licenses, the MIT license and the University of Illinois/NCSA Open Source License. Both these licenses can be found in the LICENSE file. decorators for limiting which modes a test can run in without EMTEST_ALL_ENGINES set we only run tests in a single VM by default. in some tests we know that cross-VM differences may happen and so are worth testing, and they should be marked with this decorator Tests exception handling in emscripten exception handling mode, and if possible, new wasm EH mode. Wasm EH is currently supported only in wasm backend and V8 Async wasm compilation can't work in some tests, they are set up synchronously test is set up synchronously Also run the test with -s STANDALONE. If we have wasm runtimes, also run in them (regardless we also check that the js+wasm combo works in js vms). Standalone mode is only supported in the wasm backend, and not in all modes there. we will not legalize the JS ffi interface, so we must use BigInt support in order for JS to have a chance to run this without trapping when it sees an i64 on the ffi. Similar to also_with_standalone_wasm, but suitable for tests that cannot run in a wasm VM yet, as they are not 100% standalone. We can still run them with the JS code though. Standalone mode is only supported in the wasm backend, and not in all modes there. we will not legalize the JS ffi interface, so we must use BigInt support in order for JS to have a chance to run this without trapping when it sees an i64 on the ffi. Similar to also_with_standalone_wasm, but suitable for tests that can *only* run in a wasm VM, or in non-standalone mode, but not in standalone mode with our JS. Standalone mode is only supported in the wasm backend, and not in all modes there. A simple check whether the compiler arguments cause optimization. whether the test mode supports duplicate function elimination in js wasm does this when optimizing anyhow, and the wasm backend always optimizes the wasm even if it does wasm2js later Use closure in some tests for some additional coverage Depending on whether 'configure' or 'cmake' is used to build, Bullet places output files in different directory structures. Force a nondefault --host= so that the configure script will interpret that we are doing cross-compilation and skip attempting to run the generated executable with './a.out', which would fail since we are building a .js file. must not emit this unneeded internal thing stuff that also needs sign corrections needs to flush stdio streams needs to flush stdio streams intrinsics should be lowered out extra coverages No other configuration is supported, so always run this. A good test of i64 math TODO: A version of this with int64s as well TODO: continue to the next part here Test for undefined behavior in C. This is not legitimate code, but does exist expected to fail without emulation needs to flush stdio streams Test that fmodf with -s PRECISE_F32=1 properly validates as asm.js (% operator cannot take in f32, only f64) needs to flush stdio streams Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP No compiling from C/C++ - just process an existing .o/.ll/.bc file. to be able to find the generated code in newer clang+llvm, the internal calls to malloc in emmalloc may be optimized under the assumption that they are external, so like in system_libs.py where we build malloc, we need to disable builtin here too Test case against https://github.com/emscripten-core/emscripten/issues/10363 Test for binaryen regression: https://github.com/WebAssembly/binaryen/issues/2180 needs to flush stdio streams Can't use safe heap with ASan Wasm does not add an underscore to function names. For wasm, the mismatches are fixed in fixImports() function in JS glue code. otherwise it is inlined and not identified check that an empty whitelist works properly (as in, same as exceptions disabled) big change when we disable exception catching of the function full disable can remove a little bit more Wasm does not add an underscore to function names. For wasm, the mismatches are fixed in fixImports() function in JS glue code. otherwise it is inlined and not identified Wasm does not add an underscore to function names. For wasm, the mismatches are fixed in fixImports() function in JS glue code. otherwise it is inlined and not identified needs to flush stdio streams needs to flush stdio streams needs to flush stdio streams Throwing null will cause an ignorable null pointer access. TODO Make setjmp-longjmp also use Wasm exception handling iostream stuff must be globally constructed before user global constructors, so iostream works in global constructors Marked as impure since the WASI reactor modules (modules without main) are not yet suppored by the wasm engines we test against. needs to flush stdio streams needs to flush stdio streams tests wasm_libc_rt needs to flush stdio streams in node.js we allocate argv[0] on the stack, which means the length of the program directory influences how much stack we need, and so long random temp dir names can lead to random failures. The stack size was increased here to avoid that. in node.js we allocate argv[0] on the stack, which means the length of the program directory influences how much stack we need, and so long random temp dir names can lead to random failures. The stack size was increased here to avoid that. this tests recursive global structs => nontrivial postSets for relocation Has invalid writes between printouts test EXPORT_ALL (this is not compatible with asan, which doesn't support dynamic linking at all or the LINKING flag) timer limitations in v8 shell needs to flush stdio streams Use closure here for some additional coverage with assertions, a nice message is shown TODO: test only worked in non-fastcomp only supports EM_ASM opts will eliminate the comments TODO: test only worked in non-fastcomp only supports EM_ASM Tests various different ways to invoke the EM_ASM(), EM_ASM_INT() and EM_ASM_DOUBLE() macros. Tests various different ways to invoke the MAIN_THREAD_EM_ASM(), MAIN_THREAD_EM_ASM_INT() and MAIN_THREAD_EM_ASM_DOUBLE() macros. This test is identical to test_em_asm_2, just search-replaces EM_ASM to MAIN_THREAD_EM_ASM on the test file. That way if new test cases are added to test_em_asm_2.cpp for EM_ASM, they will also get tested in MAIN_THREAD_EM_ASM form. Tests MAIN_THREAD_EM_ASM_INT() function call with different signatures. Verify that EM_ASM macros support getting called with multiple arities. Maybe tests will later be joined into larger compilation units? Then this must still be compiled separately from other code using EM_ASM macros with arities 1-3. Otherwise this may incorrectly report a success. Tests that -s MINIMAL_RUNTIME=1 builds can utilize -s ALLOW_MEMORY_GROWTH=1 option. Fail without memory growth Win with it verify NO_DYNAMIC_EXECUTION is compatible with closure With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY, since we then need to enlarge the heap(s). Fail without memory growth Win with it Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized) Tracing of memory growths should work With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY, since we then need to enlarge the heap(s). Fail without memory growth Win with it Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized) checks handling of malloc failure properly check that memory growth does not exceed the wasm mem max limit check that memory growth does not exceed the wasm mem max limit and is exactly or one step below the wasm mem max test that C++ new properly errors if we fail to malloc when growth is enabled, with or without growth struct self-ref part 1: make sure that normally, passing structs by value works safe heap prints a warning that messes up our output. needs atexit tests strtoll for hex strings (0x...) tests strtoll for decimal strings (0x...) tests strtoll for binary strings (0x...) tests strtoll for decimal strings (0x...) tests strtoll for hex strings (0x...) tests strtoll for decimal strings (0x...) tests strtoll for binary strings (0x...) tests strtoll for decimal strings (0x...) Confirms they are called in the proper reverse order also tests thread exit (__cxa_thread_atexit) Run the test with different time zone settings if possible. It seems that the TZ environment variable does not work all the time (at least it's not well respected by Node.js on Windows), but it does no harm either. Confirms they are called in reverse order needs to flush stdio streams Some programs intentionally segfault themselves, we should compile that into a throw clang generated code is vulnerable to this, as it uses memcpy for assignments, with hardcoded numbers of bytes (llvm-gcc copies items one by one). Currently broken under V8_ENGINE but not node remove extra node output Bloated memory; same layout as C/C++ emcc emits a wasm in this case Failing under v8 since: https://chromium-review.googlesource.com/712595 this test is not actually valid - it fails natively. the child should fail to be loaded, not load and successfully see the parent print_ints func TODO(https://github.com/emscripten-core/emscripten/issues/11121) We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove this if this issues is fixed. ensure there aren't too many globals; we don't want unnamed_addr wasm backend includes alias in NAMED_GLOBALS will be exhausted without functional malloc/free TODO: make this work. need to forward tempRet0 across modules TODO Enable @with_both_exception_handling (the test is not working now) verify that dlopen does not allocate already used handles XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough side settings side is just a library main settings main is just a library test the reverse as well Verify that a function pointer can be passed back and forth and invoked on both sides. test dynamic linking of a module with multiple function pointers, stored statically TODO: this in wasm one module uses libcxx, need to force its inclusion when it isn't the main https://github.com/emscripten-core/emscripten/issues/10571 issue 3465 test hyper-dynamic linking, and test duplicate warnings in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO .a linking must force all .o files inside it, when in a shared module contents of libfourth.a must be included, even if they aren't referred to! @needs_dlfcn def test_dylink_bullet(self): self.emcc_args += ['-I' + path_from_root('tests', 'bullet', 'src')] side = self.get_bullet_library(self, True) self.dylink_test(main=open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(), side=side, expected=[open(path_from_root('tests', 'bullet', 'output.txt')).read(), different roundings open(path_from_root('tests', 'bullet', 'output2.txt')).read(), open(path_from_root('tests', 'bullet', 'output3.txt')).read()]) Verify that objects created in one module and be dynamic_cast<> correctly in the another module. Each module will define its own copy of certain COMDAT symbols such as each classs's typeinfo, but at runtime they should both use the same one. needs to flush stdio streams Run one test without assertions, for additional coverage doubles needs to flush stdio streams use i16s in printf needs to flush stdio streams closure can generate variables called 'gc', which pick up js shell stuff Use closure here, to test we don't break FS stuff ensure we test --closure 1 --memory-init-file 1 (-g would disable closure) Use closure 2 here for some additional coverage Test for Module.stdin etc. needs to flush stdio streams https://bugs.chromium.org/p/v8/issues/detail?id=6881 https://bugs.chromium.org/p/v8/issues/detail?id=6881 only node handles utf well Test that invalid character in UTF8 does not cause decoding to crash. Test that invalid character in UTF8 does not cause decoding to crash. needs to flush stdio streams needs to flush stdio streams , 1]: regression check for issue 273 TODO(sbc): It seems that INCLUDE_FULL_LIBRARY will generally generate undefined symbols at link time so perhaps have it imply this setting? see issue 2334 Enables strict mode, which may catch some strict-mode-only errors so that users can safely work with strict JavaScript if enabled. engines has different error stack format Node.js fs.chmod is nearly no-op on Windows FIXME symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges) so skip testing those bits on that combination. Several differences/bugs on non-linux including https://github.com/nodejs/node/issues/18014 0 if root user Also, other detected discrepancies if you do end up running this test on NODEFS: test expects /, but Windows gives \ as path slashes. Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. Also, other detected discrepancies if you do end up running this test on NODEFS: test expects /, but Windows gives \ as path slashes. Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. uses constants from ERRNO_CODES avoid errors when linking in full library i64s in the API, which we'd need to legalize for JS, so in standalone mode all we can test is wasm VMs v8 lacks monotonic time the fastcomp implementation is incorrect in one way node, can find itself properly spidermonkey, v8 node, can find itself properly spidermonkey, v8 the wasm backend has lock-free atomics, but not asm.js or asm2wasm TODO: test with USE_PTHREADS in wasm backend as well libc++ tests add some timing nondeterminism here, not that we need it, but whatever Verify that this build is identical to the previous one Same but for the wasm file. 'Medium' tests TODO: Should we remove this test? slower, and fail on 64-bit needed with typed arrays slower, and fail on 64-bit needed with typed arrays Linked version TODO: do this in other passes too, passing their opts into emcc emcc should build in dlmalloc automatically, and do all the sign correction etc. for it The same for new and all its variants present part of the symbols of dlmalloc, not all present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak. needs atexit needs to flush stdio streams uses register keyword Use closure here for some additional coverage remove -g, so we have one test without it by default TODO: wrappers for wasm modules extra coverage Improves test readability Improves test readability Tests invoking the SIMD API via x86 SSE1 xmmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 SSE2 emmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 SSE3 pmmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 SSSE3 tmmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 SSE4.1 smmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 SSE4.2 nmmintrin.h header (_mm_x() functions) Tests invoking the SIMD API via x86 AVX avxintrin.h header (_mm_x() functions) flip for some more coverage here Not needed for js, but useful for debugging Main github issue 324 without registerize (which -g disables), we generate huge amounts of code newer clang has a warning for implicit conversions that lose information, which happens in sqlite (see 9138) temporarily ignore unknown flags, which lets the above flag be used on our CI which doesn't yet have the new clang with that flag more source maps coverage Called thus so it runs late in the alphabetical cycle... it is long extra testing for ASSERTIONS == 2 different roundings Make sure that DFE ends up eliminating more than 200 functions (if we can view source) remove -g, so we have one test without it by default configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'], no -j 2, since parallel builds can fail We use doubles in JS, so we get slightly different values than native code. So we check our output by comparing the average pixel difference Get the image generated by JS, from the JSON.stringify'd array Evaluate the output as a python array Our output may be signed, so unsign it Get the correct output Compare them print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']' to compare the versions The real test for valid output is in image_compare extra testing The python build contains several undefined symbols lifetime stuff and their vars must be culled Test cases in separate files. Note that these files may contain invalid .ll! They are only valid enough for us to read for test purposes, not for llvm-as to process. needs to flush stdio streams These tests don't end up linking with libc due to a behaviour in emcc where the llvm-link step is skipped when the input is a single object file. Since most of them `printf` (which comes from JS) but depends on `strlen` (which comes from musl) these tests almost all have an undefined `strlen`, which happens to not get called. TODO(sbc): Remove the special case from emcc what bypasses llvm-link and then remove this line? The following tests link to libc, whereas others link with -nostdlib invalid ir pnacl limitations in ExpandStructRegs pnacl limitations in ExpandGetElementPtr current fastcomp limitations FIXME assumes malloc exists in JS casts a function pointer from (i32, i32)* to (i64)*, which happens to work in asm.js but is a general function pointer undefined behavior TODO: test only worked in non-fastcomp (well, these cases) no libc is linked in; with FILESYSTEM=0 we have a chance at printfing anyhow Optional source checking, a python script that gets a global generated with the source noqa LLVM LTO bug puts exists before LTO, but is not used; LTO cleans it out, but then creates uses to it (printf=>puts) XXX https://llvm.org/bugs/show_bug.cgi?id=23814 if os.path.basename(name) != '4.c': continue pnacl legalization issue, see https://code.google.com/p/nativeclient/issues/detail?id=4027 LLVM LTO bug Autodebug the code rebuild .bc TODO: use code in do_autodebug_post for this Run a test that should work, generating some code Add an ll hook, to force ll generation Compare to each other, and to expected output Test using build_ll_hook Autodebug does not work with too much shadow memory. Memory consumed by autodebug depends on the size of the WASM linear memory. With a large shadow memory, the JS engine runs out of memory. test that the program both works and also emits some of the logging (but without the specific output, as it is logging the actual locals used and so forth, which will change between opt modes and updates of llvm etc.) Integration tests test dyncall (and other runtime methods in support.js) can be exported these used to be exported, but no longer are by default see that direct usage (not on module) works. we don't export, but the use keeps it alive through JSDCE see that with assertions, we get a nice error message see that when we export them, things work on the module these used to be exported, but no longer are by default see that direct usage (not on module) works. we don't export, but the use keeps it alive through JSDCE see that with assertions, we get a nice error message see that when we export them, things work on the module these used to be exported, but no longer are by default see that direct usage (not on module) works. we don't export, but the use keeps it alive through JSDCE see that with assertions, we get a nice error message see that when we export them, things work on the module Sanity check that it works and the dead function is emitted Kill off the dead function, and check a code path using it aborts Kill off a library function, check code aborts This should expand into -Wl,--start-group <objfile> -Wl,--end-group enable costly assertions to verify correct table behavior with emulation, we don't need to reserve, except with wasm where we still do. asm.js, double <-> int wasm, reinterpret the bits ensure function names are preserved make sure the shortened name is the right one split by the first ':' (wasm backend demangling may include more :'s later on) we may see the full one, if -g, or the short one if not stack traces may also be ' name ' or 'name@' etc Use number of functions as a for code size Use number of functions as a for code size otherwise we ignore memory size The wasm backend currently exports a single initalizer so the ctor evaluation is all or nothing. As well as that it doesn't currently do DCE of libcxx symbols (because the are marked as visibility(defaault) and because of that we end up not being able to eval ctors unless all libcxx constrcutors can be eval'd noqa extra testing avoid closure minified names competing with our test code in the global name space Force IDL checks mode Export things on "TheModule". This matches the typical use pattern of the bound library being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it). Tests for tools the file name may find its way into the generated code, so make sure we can do an apples-to-apples comparison by compiling with the same file name after removing the @line and @sourceMappingURL comments, the build result should be identical to the non-source-mapped debug version. this is worth checking because the parser AST swaps strings for token objects when generating source maps, so we want to make sure the optimizer can deal with both types. in fastcomp, we have the absolute path, which is not good Python 2 compatibility the file attribute is optional, but if it is present it needs to refer the output file. the sourcesContent attribute is optional, but if it is present it needs to containt valid source text. Python 2 compatibility ensure that all the 'meaningful' lines in the original code get mapped when optimizing, the binaryen optimizer may remove some of them (by inlining, etc.) parse the sections a new section, a line like ".debug_str contents:" possibly a line in a section make sure the right sections exist verify some content in the sections the line section looks like this: Address Line Column File ISA Discriminator Flags ------------------ ------ ------ ------ --- ------------- ------------- 0x000000000000000b 5 0 3 0 0 is_stmt each of the calls must remain in the binary, and be mapped the lines must appear in sequence (as calls to JS, the optimizer cannot reorder them) get the wat, printing with -g which has binary offsets we expect to see a pattern like this, as in both debug and opt builds there isn't much that can change with such calls to JS (they can't be reordered or anything else): ;; code offset: 0x? (drop ;; code offset: 0x? (call $out_to_js ;; code offset: 0x? (local.get ?) or (i32.const ?) ) ) In stacky stream of instructions form, it is local.get or i32.const call $out_to_js drop get_wat_addr gets the address of one of the 3 interesting calls, by its index (0,1,2). find the call_index-th call the call begins with the local.get/i32.const printed below it, which is the first instruction in the stream, so it has the lowest address the call ends with the drop, which is the last in the stream, at the highest address match up the DWARF and the wat the dwarf may match any of the 3 instructions that form the stream of of instructions implementing the call in the source code, in theory test that the combination of modularize + closure + pre-js works. in that mode, closure should not minify the Module object in a way that the pre-js cannot use it. v8 doesn't support console.log XXX Does not work in SpiderMonkey since callstacks cannot be captured when running in asm.js, see https://bugzilla.mozilla.org/show_bug.cgi?id=947996 test closure compiler as well extra testing needs to flush stdio streams needs to flush stdio streams needs setTimeout which only node has needs to flush stdio streams test a program not using asyncify, but the pref is set so that we can find the functions for the changes below If the call to lazy-load is unconditional, then the optimizer can dce out more than half attempts to "break" the wasm by adding an unreachable in $foo_end. returns whether we found it. we found the first line after the local defs $foo_end is not present in the wasm, nothing to break the first-loaded wasm will not reach the second call, since we call it after lazy-loading. verify that by changing the first wasm to throw in that function but breaking the second wasm actually breaks us restore if we do not call the lazy load function, then we do not need the lazy wasm, and we do the second call in the first wasm Test basic wasm2js functionality in all core compilation modes. a mem init file is emitted just like with JS see that running as wasm works run wasm2js, bundle the code, and use the wasm2js path remove the wasm to make sure we never use it again verify that it runs First run with WebAssembly support enabled Move the Wasm2js fallback away to test it is not accidentally getting loaded. Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback. Also delete the Wasm file to test that it is not attempted to be loaded. See https://github.com/emscripten-core/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735 This test checks for the global variables required to run the memory profiler. It would fail if these variables were made no longer global or if their identifiers were changed. if IDBFS and NODEFS are not enabled, they are not present. wasm backend does not use asm2wasm imports, which js trap mode requires JS did an fmod 2^32 | normal Tests that we can use the dlmalloc mallinfo() function to obtain information about malloc()ed blocks and compute how much memory is used/freed. set us to test in just this engine tell the compiler to build with just that engine test with the right env test with the wrong env test with a combined env test for issue 6331 verify that an exception thrown in postRun() will not trigger the compilation failed handler, and will be printed to stderr. Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works In optimized builds only the single reference cFunction that exists in the EM_ASM should exist With js the asm module itself also contains a reference for the cFunction name Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works Tests that -s MINIMAL_RUNTIME=1 works well in different build modes TODO: Support for non-Node.js shells has not yet been added to MINIMAL_RUNTIME Test that printf() works in MINIMAL_RUNTIME=1 Tests that -s MINIMAL_RUNTIME=1 works well with SAFE_HEAP Tests global initializer with -s MINIMAL_RUNTIME=1 Need to use `-g` to get proper line numbers in asm.js Need to use `-g` to get proper line numbers in asm.js note: these tests have things like -fno-builtin-memset in order to avoid clang optimizing things away. for example, a memset might be optimized into stores, and then the stores identified as dead, which leaves nothing for asan to test. here we want to test asan itself, so we work around that. with a pool, we can synchronously depend on workers being available Tests the emscripten_get_exported_function() API. Could also test with -s ALLOW_TABLE_GROWTH=1 Tests the emscripten_get_exported_function() API. Could also test with -s ALLOW_TABLE_GROWTH=1 Marked as impure since the WASI reactor modules (modules without main) are not yet suppored by the wasm engines we test against. Traditionally in emscripten we allow main to be undefined. This allows programs with a main and libraries without a main to be compiled identically. However we are trying to move away from that model to a more explicit opt-out model. See: https://github.com/emscripten-core/emscripten/issues/9640 Disabling IGNORE_MISSING_MAIN should cause link to fail due to missing main If we pass --no-entry or set EXPORTED_FUNCTIONS to empty should never see any errors Tests the operation of API found in include <emscripten/math.h> Generate tests for everything noqa Ensure the directory exists and go there avoid various compiler warnings in our test output Main asm.js test modes Main wasm test modes Secondary test modes - run directly when there is a specific need features asm.js wasm Add DEFAULT_TO_CXX=0 Experimental modes (not tested by CI) TestCoreBase is just a shape for the specific subclasses, we don't test it itself noqa
28,778
en
0.873127
""" License: This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. """ from collections.abc import MutableMapping import posixpath import boto3 import botocore from botocore.exceptions import ClientError from s3fs import S3FileSystem from hub.exceptions import S3Exception from hub.log import logger class S3Storage(MutableMapping): def __init__( self, s3fs: S3FileSystem, url: str = None, public=False, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, parallel=25, endpoint_url=None, aws_region=None, ): self.s3fs = s3fs self.root = {} self.url = url self.public = public self.parallel = parallel self.aws_region = aws_region self.endpoint_url = endpoint_url self.bucket = url.split("/")[2] self.path = "/".join(url.split("/")[3:]) if self.bucket == "s3:": # FIXME for some reason this is wasabi case here, probably url is something like wasabi://s3://... self.bucket = url.split("/")[4] self.path = "/".join(url.split("/")[5:]) self.bucketpath = posixpath.join(self.bucket, self.path) self.protocol = "object" client_config = botocore.config.Config( max_pool_connections=parallel, ) self.client = boto3.client( "s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, config=client_config, endpoint_url=endpoint_url, region_name=aws_region, ) self.resource = boto3.resource( "s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, config=client_config, endpoint_url=endpoint_url, region_name=aws_region, ) def __setitem__(self, path, content): try: path = posixpath.join(self.path, path) content = bytearray(memoryview(content)) attrs = { "Bucket": self.bucket, "Body": content, "Key": path, "ContentType": ("application/octet-stream"), } self.client.put_object(**attrs) except Exception as err: logger.error(err) raise S3Exception(err) def __getitem__(self, path): try: path = posixpath.join(self.path, path) resp = self.client.get_object( Bucket=self.bucket, Key=path, ) x = resp["Body"].read() return x except ClientError as err: if err.response["Error"]["Code"] == "NoSuchKey": raise KeyError(err) else: raise except Exception as err: logger.error(err) raise S3Exception(err) def __delitem__(self, path): try: path = posixpath.join(self.bucketpath, path) self.s3fs.rm(path, recursive=True) except Exception as err: logger.error(err) raise S3Exception(err) def __len__(self): return len(self.s3fs.ls(self.bucketpath, detail=False, refresh=True)) def __iter__(self): items = self.s3fs.ls(self.bucketpath, detail=False, refresh=True) yield from [item[len(self.bucketpath) + 1 :] for item in items]
hub/store/s3_storage.py
3,735
License: This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. FIXME for some reason this is wasabi case here, probably url is something like wasabi://s3://...
301
en
0.924941
""" Implementation of the `CID spec <https://github.com/multiformats/cid>`_. This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely encapsulated by a single class :class:`CID`, which is imported from top level instead of the module itself: >>> from multiformats import CID """ from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union from typing_extensions import Literal, Final from typing_validation import validate from bases import base58btc from multiformats import varint, multicodec, multibase, multihash from multiformats.multicodec import Multicodec from multiformats.multibase import Multibase from multiformats.multihash import Multihash, _validate_raw_digest_size from multiformats.varint import BytesLike, byteslike _CIDSubclass = TypeVar("_CIDSubclass", bound="CID") CIDVersion = Literal[0, 1] CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1}) def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]: if len(cid) == 46 and cid.startswith("Qm"): # CIDv0 to be decoded as base58btc return base58btc.decode(cid), multibase.get("base58btc") mb, b = multibase.decode_raw(cid) if b[0] == 0x12: # CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes) # CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).") return b, mb def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase: if isinstance(base, str): base = multibase.get(base) else: multibase.validate_multibase(base) return base def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec: if isinstance(codec, str): codec = multicodec.get(codec) elif isinstance(codec, int): codec = multicodec.get(code=codec) else: multicodec.validate_multicodec(codec) return codec def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash: if isinstance(hashfun, str): hashfun = multihash.get(hashfun) elif isinstance(hashfun, int): hashfun = multihash.get(code=hashfun) else: pass return hashfun def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes: if isinstance(raw_digest, str): raw_digest = bytes.fromhex(raw_digest) else: validate(raw_digest, BytesLike) if not isinstance(raw_digest, bytes): raw_digest = bytes(raw_digest) _, max_digest_size = hashfun.implementation _validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size) return raw_digest def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]: if isinstance(digest, str): digest = bytes.fromhex(digest) raw_digest: BytesLike code, raw_digest = multihash.unwrap_raw(digest) hashfun = _CID_validate_multihash(code) raw_digest = _CID_validate_raw_digest(raw_digest, hashfun) return hashfun, raw_digest def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int: if version in (2, 3): raise ValueError("CID versions 2 and 3 are reserved for future use.") if version not in (0, 1): raise ValueError(f"CID version {version} is not allowed.") if version == 0: if base.name != 'base58btc': raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.") if codec.name != "dag-pb": raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.") if hashfun.name != "sha2-256": raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.") return version class CID: """ Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_. CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor: >>> cid = CID("base58btc", 1, "raw", ... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> str(cid) 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest: >>> raw_digest = bytes.fromhex( ... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest)) >>> str(cid) 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings: >>> isinstance(raw_digest, bytes) True >>> raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' Note: the hex strings are not multibase encoded. Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object: >>> cid = CID("base58btc", 1, "raw", ... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' >>> bytes(cid).hex() '015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' #^^ 0x01 = CIDv1 # ^^ 0x55 = 'raw' codec >>> bytes(cid) :param base: default multibase to use when encoding this CID :type base: :obj:`str` or :class:`~multiformats.multibase.Multibase` :param version: the CID version :type version: 0 or 1 :param codec: the content multicodec :type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec` :param digest: the content multihash digest, or a pair of multihash codec and raw content digest :type digest: see below The ``digest`` parameter can be specified in the following ways: - as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex` - as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly - as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest via the :meth:`~multiformats.multihash.Multihash.wrap` metho If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways: - by multihash multicodec name, as a :obj:`str` - by multihash multicodec code, as a :obj:`int` - as a :class:`~multiformats.multihash.Multihash` object If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways: - as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex` - as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly :raises ValueError: if the CID version is unsupported :raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb' :raises KeyError: if the multibase, multicodec or multihash are unknown """ _base: Multibase _version: CIDVersion _codec: Multicodec _hashfun: Multihash _digest: bytes __slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest") def __new__(cls: Type[_CIDSubclass], base: Union[str, Multibase], version: int, codec: Union[str, int, Multicodec], digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]], ) -> _CIDSubclass: # pylint: disable = too-many-arguments base = _CID_validate_multibase(base) codec = _CID_validate_multicodec(codec) raw_digest: Union[str, bytes] hashfun: Union[str, int, Multihash] if isinstance(digest, (str,)+byteslike): hashfun, raw_digest = _CID_validate_multihash_digest(digest) else: validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]) hashfun, raw_digest = digest hashfun = _CID_validate_multihash(hashfun) raw_digest = _CID_validate_raw_digest(raw_digest, hashfun) version = _CID_validate_version(version, base, codec, hashfun) if isinstance(digest, bytes): return CID._new_instance(cls, base, version, codec, hashfun, digest) return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest)) @staticmethod def _new_instance(CID_subclass: Type[_CIDSubclass], base: Multibase, version: int, codec: Multicodec, hashfun: Multihash, digest: Union[bytes, Tuple[Multihash, bytes]], ) -> _CIDSubclass: # pylint: disable = too-many-arguments instance: _CIDSubclass = super().__new__(CID_subclass) instance._base = base assert version in (0, 1) instance._version = cast(Literal[0, 1], version) instance._codec = codec instance._hashfun = hashfun if isinstance(digest, bytes): instance._digest = digest elif isinstance(digest, byteslike): instance._digest = bytes(digest) else: _hashfun, raw_digest = digest if not isinstance(raw_digest, bytes): raw_digest = bytes(raw_digest) assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair." instance._digest = hashfun.wrap(raw_digest) return instance @property def version(self) -> CIDVersion: """ CID version. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.version 1 """ return self._version @property def base(self) -> Multibase: """ Multibase used to encode the CID: - if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used - if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used - for a CIDv0, 'base58btc' is always used Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.base Multibase(name='base58btc', code='z', status='default', description='base58 bitcoin') """ return self._base @property def codec(self) -> Multicodec: """ Codec that the multihash digest refers to. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.codec Multicodec(name='raw', tag='ipld', code='0x55', status='permanent', description='raw binary') """ return self._codec @property def hashfun(self) -> Multihash: """ Multihash used to produce the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.hashfun Multicodec(name='sha2-256', tag='multihash', code='0x12', status='permanent', description='') """ return self._hashfun @property def digest(self) -> bytes: """ Multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.digest.hex() '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' """ return self._digest @property def raw_digest(self) -> bytes: """ Raw hash digest, decoded from the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' """ return multihash.unwrap(self._digest) @property def human_readable(self) -> str: """ Human-readable representation of the CID. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.human_readable 'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)' """ raw_digest = self.raw_digest hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})" return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}" def encode(self, base: Union[None, str, Multibase] = None) -> str: """ Encodes the CID using a given multibase. If :obj:`None` is given, the CID's own multibase is used by default. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.encode() # default: cid.base 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' >>> cid.encode("base32") 'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su' :param base: the multibase to be used for encoding :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :raises KeyError: see :meth:`multiformats.multibase.Multibase.encode` """ if self.version == 0: if base is not None: raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.") return base58btc.encode(bytes(self)) if base is None or base == self.base: base = self.base # use CID's own multibase as default else: if isinstance(base, str): base = multibase.get(base) else: multibase.validate_multibase(base) return base.encode(bytes(self)) def set(self, *, base: Union[None, str, Multibase] = None, version: Union[None, int] = None, codec: Union[None, str, int, Multicodec] = None ) -> "CID": """ Returns a new CID obtained by setting new values for one or more of: ``base``, ``version``, or ``codec``. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(base="base32") CID('base32', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(codec="dag-cbor") CID('base58btc', 1, 'dag-cbor', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') # Note: 'CID.set' returns new instances, # the original 'cid' instance is unchanged If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'. >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0) ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead. >>> cid.set(version=0, codec="dag-pb", base="base32") ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead :param base: the new CID multibase, or :obj:`None` if multibase unchanged :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :param version: the new CID version, or :obj:`None` if version unchanged :type version: :obj:`None`, 0 or 1, *optional* :param codec: the new content multicodec, or :obj:`None` if multicodec unchanged :type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional* :raises KeyError: if the multibase or multicodec are unknown """ hashfun = self.hashfun digest = self.digest if base is not None and base not in (self.base, self.base.name): base = _CID_validate_multibase(base) else: base = self.base if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code): codec = _CID_validate_multicodec(codec) else: codec = self.codec if version is not None and version != self.version: _CID_validate_version(version, base, codec, hashfun) else: version = self.version return CID._new_instance(CID, base, version, codec, hashfun, digest) def __bytes__(self) -> bytes: if self.version == 0: return self.digest return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest def __str__(self) -> str: return self.encode() def __repr__(self) -> str: mb = self.base.name v = self.version mc = self.codec.name d = self.digest return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})" @property def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]: return (CID, self.version, self.codec, self.digest) def __hash__(self) -> int: return hash(self._as_tuple) def __eq__(self, other: Any) -> bool: if self is other: return True if not isinstance(other, CID): return NotImplemented return self._as_tuple == other._as_tuple @staticmethod def decode(cid: Union[str, BytesLike]) -> "CID": """ Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage for CIDv1 multibase-encoded string: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> CID.decode(s) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv1 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "015512206e6ff7950a36187a801613426e85" ... "8dce686cd7d7e3c0fc42ee0330072d245c95") >>> CID.decode(b) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv0 base58-encoded string: >>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR" >>> CID.decode(s) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') Example usage for CIDv0 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "1220c3c4733ec8affd06cf9e9ff50ffc6b" ... "cd2ec85a6170004bb709669c31de94391a") >>> CID.decode(b) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') :param cid: the CID bytes or multibase-encoded string :type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if the CID is malformed or the CID version is unsupported :raises KeyError: if the multibase, multicodec or multihash are unknown """ if isinstance(cid, str): cid, mb = _binary_cid_from_str(cid) else: mb = multibase.get("base58btc") validate(cid, BytesLike) cid = memoryview(cid) # if len(cid) == 34 and cid.startswith(b"\x12\x20"): if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20: v = 0 # CID version mc_code = 0x70 # multicodec.get("dag-pb") digest = cid # multihash digest is what's left else: v, _, cid = varint.decode_raw(cid) # CID version if v == 0: raise ValueError("CIDv0 is malformed.") if v in (2, 3): raise ValueError("CID versions 2 and 3 are reserved for future use.") if v != 1: raise ValueError(f"CIDv{v} is currently not supported.") mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec digest = cid # multihash digest is what's left mc = multicodec.get(code=mc_code) mh_code, _ = multihash.unwrap_raw(digest) mh = multihash.get(code=mh_code) return CID._new_instance(CID, mb, v, mc, mh, digest) @staticmethod def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID": """ Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1. The ``pk_bytes`` argument should be the binary public key, encoded according to the `PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_. This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage with Ed25519 public key: >>> pk_bytes = bytes.fromhex( ... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93") ... # a 32-byte Ed25519 public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93') #^^ 0x00 = 'identity' multihash used (public key length <= 42) # ^^ 0x20 = 32-bytes of raw hash digestlength >>> str(peer_id) 'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm' Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = Ed25519PrivateKey.generate() >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.Raw, ... format=PublicFormat.Raw ... ) >>> pk_bytes.hex() "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93" Example usage with DER-encoded RSA public key: >>> pk_bytes = bytes.fromhex( ... "30820122300d06092a864886f70d01010105000382010f003082010a02820101" ... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" ... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" ... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" ... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" ... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" ... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" ... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" ... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" ... "370203010001") ... # a 294-byte RSA public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f') #^^ 0x12 = 'sha2-256' multihash used (public key length > 42) # ^^ 0x20 = 32-bytes of raw hash digest length >>> str(peer_id) 'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4' Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric import rsa >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = rsa.generate_private_key( ... public_exponent=65537, ... key_size=2048, ... ) >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.DER, ... format=PublicFormat.SubjectPublicKeyInfo ... ) >>> pk_bytes.hex() "30820122300d06092a864886f70d01010105000382010f003082010a02820101" "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" "370203010001" :param pk_bytes: the public key bytes :type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes """ if isinstance(pk_bytes, str): pk_bytes = bytes.fromhex(pk_bytes) else: validate(pk_bytes, BytesLike) if len(pk_bytes) <= 42: mh = multihash.get("identity") digest = multihash.digest(pk_bytes, mh) else: mh = multihash.get("sha2-256") digest = multihash.digest(pk_bytes, mh) mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key") mb = multibase.get("base32") return CID._new_instance(CID, mb, 1, mc, mh, digest)
multiformats/cid/__init__.py
28,154
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_. CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor: >>> cid = CID("base58btc", 1, "raw", ... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> str(cid) 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest: >>> raw_digest = bytes.fromhex( ... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest)) >>> str(cid) 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings: >>> isinstance(raw_digest, bytes) True >>> raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' Note: the hex strings are not multibase encoded. Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object: >>> cid = CID("base58btc", 1, "raw", ... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95") >>> raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' >>> bytes(cid).hex() '015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' #^^ 0x01 = CIDv1 # ^^ 0x55 = 'raw' codec >>> bytes(cid) :param base: default multibase to use when encoding this CID :type base: :obj:`str` or :class:`~multiformats.multibase.Multibase` :param version: the CID version :type version: 0 or 1 :param codec: the content multicodec :type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec` :param digest: the content multihash digest, or a pair of multihash codec and raw content digest :type digest: see below The ``digest`` parameter can be specified in the following ways: - as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex` - as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly - as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest via the :meth:`~multiformats.multihash.Multihash.wrap` metho If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways: - by multihash multicodec name, as a :obj:`str` - by multihash multicodec code, as a :obj:`int` - as a :class:`~multiformats.multihash.Multihash` object If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways: - as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex` - as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly :raises ValueError: if the CID version is unsupported :raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb' :raises KeyError: if the multibase, multicodec or multihash are unknown Multibase used to encode the CID: - if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used - if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used - for a CIDv0, 'base58btc' is always used Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.base Multibase(name='base58btc', code='z', status='default', description='base58 bitcoin') Codec that the multihash digest refers to. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.codec Multicodec(name='raw', tag='ipld', code='0x55', status='permanent', description='raw binary') Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage for CIDv1 multibase-encoded string: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> CID.decode(s) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv1 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "015512206e6ff7950a36187a801613426e85" ... "8dce686cd7d7e3c0fc42ee0330072d245c95") >>> CID.decode(b) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv0 base58-encoded string: >>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR" >>> CID.decode(s) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') Example usage for CIDv0 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "1220c3c4733ec8affd06cf9e9ff50ffc6b" ... "cd2ec85a6170004bb709669c31de94391a") >>> CID.decode(b) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') :param cid: the CID bytes or multibase-encoded string :type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if the CID is malformed or the CID version is unsupported :raises KeyError: if the multibase, multicodec or multihash are unknown Multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.digest.hex() '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' Encodes the CID using a given multibase. If :obj:`None` is given, the CID's own multibase is used by default. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.encode() # default: cid.base 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' >>> cid.encode("base32") 'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su' :param base: the multibase to be used for encoding :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :raises KeyError: see :meth:`multiformats.multibase.Multibase.encode` Multihash used to produce the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.hashfun Multicodec(name='sha2-256', tag='multihash', code='0x12', status='permanent', description='') Human-readable representation of the CID. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.human_readable 'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)' Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1. The ``pk_bytes`` argument should be the binary public key, encoded according to the `PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_. This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage with Ed25519 public key: >>> pk_bytes = bytes.fromhex( ... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93") ... # a 32-byte Ed25519 public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93') #^^ 0x00 = 'identity' multihash used (public key length <= 42) # ^^ 0x20 = 32-bytes of raw hash digestlength >>> str(peer_id) 'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm' Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = Ed25519PrivateKey.generate() >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.Raw, ... format=PublicFormat.Raw ... ) >>> pk_bytes.hex() "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93" Example usage with DER-encoded RSA public key: >>> pk_bytes = bytes.fromhex( ... "30820122300d06092a864886f70d01010105000382010f003082010a02820101" ... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" ... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" ... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" ... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" ... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" ... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" ... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" ... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" ... "370203010001") ... # a 294-byte RSA public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f') #^^ 0x12 = 'sha2-256' multihash used (public key length > 42) # ^^ 0x20 = 32-bytes of raw hash digest length >>> str(peer_id) 'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4' Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric import rsa >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = rsa.generate_private_key( ... public_exponent=65537, ... key_size=2048, ... ) >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.DER, ... format=PublicFormat.SubjectPublicKeyInfo ... ) >>> pk_bytes.hex() "30820122300d06092a864886f70d01010105000382010f003082010a02820101" "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" "370203010001" :param pk_bytes: the public key bytes :type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes Raw hash digest, decoded from the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95' Returns a new CID obtained by setting new values for one or more of: ``base``, ``version``, or ``codec``. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(base="base32") CID('base32', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(codec="dag-cbor") CID('base58btc', 1, 'dag-cbor', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') # Note: 'CID.set' returns new instances, # the original 'cid' instance is unchanged If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'. >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0) ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead. >>> cid.set(version=0, codec="dag-pb", base="base32") ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead :param base: the new CID multibase, or :obj:`None` if multibase unchanged :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :param version: the new CID version, or :obj:`None` if version unchanged :type version: :obj:`None`, 0 or 1, *optional* :param codec: the new content multicodec, or :obj:`None` if multicodec unchanged :type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional* :raises KeyError: if the multibase or multicodec are unknown CID version. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.version 1 Implementation of the `CID spec <https://github.com/multiformats/cid>`_. This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely encapsulated by a single class :class:`CID`, which is imported from top level instead of the module itself: >>> from multiformats import CID CIDv0 to be decoded as base58btc CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes) CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity pylint: disable = too-many-arguments pylint: disable = too-many-arguments use CID's own multibase as default if len(cid) == 34 and cid.startswith(b"\x12\x20"): CID version multicodec.get("dag-pb") multihash digest is what's left CID version multicodec multihash digest is what's left multicodec.get("libp2p-key")
14,288
en
0.435841
import argparse import torch import os import numpy as np import random as rd from models import GCN from utils import get_folder_path from base_solver import BaseSolver MODEL = 'GCN' parser = argparse.ArgumentParser() # Dataset params parser.add_argument("--dataset", type=str, default='Movielens', help="") parser.add_argument("--dataset_name", type=str, default='1m', help="") parser.add_argument("--if_use_features", type=bool, default=False, help="") parser.add_argument("--num_core", type=int, default=10, help="") parser.add_argument("--num_feat_core", type=int, default=10, help="") parser.add_argument("--train_ratio", type=float, default=0.8, help="") # Model params parser.add_argument("--dropout", type=float, default=0.5, help="") parser.add_argument("--emb_dim", type=int, default=64, help="") parser.add_argument("--repr_dim", type=int, default=16, help="") parser.add_argument("--hidden_size", type=int, default=64, help="") # Train params parser.add_argument("--num_negative_samples", type=int, default=5, help="") parser.add_argument("--init_eval", type=bool, default=True, help="") parser.add_argument("--device", type=str, default='cuda', help="") parser.add_argument("--gpu_idx", type=str, default='0', help="") parser.add_argument("--runs", type=int, default=100, help="") parser.add_argument("--epochs", type=int, default=100, help="") parser.add_argument("--opt", type=str, default='adam', help="") parser.add_argument("--loss", type=str, default='mse', help="") parser.add_argument("--batch_size", type=int, default=4, help="") parser.add_argument("--lr", type=float, default=1e-4, help="") parser.add_argument("--weight_decay", type=float, default=1e-3, help="") parser.add_argument("--early_stopping", type=int, default=60, help="") parser.add_argument("--save_epochs", type=list, default=[10, 40, 80], help="") parser.add_argument("--save_every_epoch", type=int, default=40, help="") args = parser.parse_args() # Setup data and weights file path data_folder, weights_folder, logger_folder = \ get_folder_path(model=MODEL, dataset=args.dataset + args.dataset_name) # Setup device if not torch.cuda.is_available() or args.device == 'cpu': device = 'cpu' else: device = 'cuda:{}'.format(args.gpu_idx) # Setup args dataset_args = { 'root': data_folder, 'dataset': args.dataset, 'name': args.dataset_name, 'if_use_features': args.if_use_features, 'num_core': args.num_core, 'num_feat_core': args.num_feat_core, 'train_ratio': args.train_ratio } model_args = { 'if_use_features': args.if_use_features, 'emb_dim': args.emb_dim, 'hidden_size': args.hidden_size, 'repr_dim': args.repr_dim, 'dropout': args.dropout } train_args = { 'init_eval': args.init_eval, 'num_negative_samples': args.num_negative_samples, 'opt': args.opt, 'loss': args.loss, 'runs': args.runs, 'epochs': args.epochs, 'batch_size': args.batch_size, 'weight_decay': args.weight_decay, 'lr': args.lr, 'device': device, 'weights_folder': os.path.join(weights_folder, str(model_args)), 'logger_folder': os.path.join(logger_folder, str(model_args)), 'save_epochs': args.save_epochs, 'save_every_epoch': args.save_every_epoch } print('dataset params: {}'.format(dataset_args)) print('task params: {}'.format(model_args)) print('train params: {}'.format(train_args)) class GCNSolver(BaseSolver): def __init__(self, GCN, dataset_args, model_args, train_args): super(GCNSolver, self).__init__(GCN, dataset_args, model_args, train_args) def prepare_model_input(self, data, if_use_features=False): edge_index_np = np.hstack(data.edge_index_nps[0].values()) edge_index_np = np.hstack([edge_index_np, np.flip(edge_index_np, 0)]) edge_index = torch.from_numpy(edge_index_np).long().to(self.train_args['device']) kwargs = {'edge_index': edge_index} if if_use_features: kwargs['x'] = data.x return kwargs def train_negative_sampling(self, u_nid, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, data): """ Unliked popular movie negative sampling: :param u_nid: :param train_pos_unid_inid_map: :param test_pos_unid_inid_map: :param neg_unid_inid_map: :param data: :return: """ num_pos_samples = len(train_pos_unid_inid_map[u_nid]) negative_inids = test_pos_unid_inid_map[u_nid] + neg_unid_inid_map[u_nid] nid_occs = np.array([data.item_nid_occs[0][nid] for nid in negative_inids]) nid_occs = nid_occs / np.sum(nid_occs) negative_inids = rd.choices(population=negative_inids, weights=nid_occs, k=num_pos_samples * 5) return negative_inids def generate_candidates(self, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, u_nid): pos_i_nids = test_pos_unid_inid_map[u_nid] neg_i_nids = np.array(neg_unid_inid_map[u_nid]) neg_i_nids_indices = np.array(rd.sample(range(neg_i_nids.shape[0]), 99), dtype=int) return pos_i_nids, list(neg_i_nids[neg_i_nids_indices]) if __name__ == '__main__': solver = GCNSolver(GCN, dataset_args, model_args, train_args) solver.run()
benchmark/recsys/gcn_solver.py
5,204
Unliked popular movie negative sampling: :param u_nid: :param train_pos_unid_inid_map: :param test_pos_unid_inid_map: :param neg_unid_inid_map: :param data: :return: Dataset params Model params Train params Setup data and weights file path Setup device Setup args
265
en
0.487988
"""add x,y to markers Revision ID: 20f14f4f1de7 Revises: 21b54c24a2c8 Create Date: 2018-10-01 23:27:21.307860 """ # revision identifiers, used by Alembic. revision = '20f14f4f1de7' down_revision = '21b54c24a2c8' branch_labels = None depends_on = None import sqlalchemy as sa from alembic import op def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('markers', sa.Column('x', sa.Float(), nullable=True)) op.add_column('markers', sa.Column('y', sa.Float(), nullable=True)) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('markers', 'y') op.drop_column('markers', 'x') ### end Alembic commands ###
alembic/versions/20f14f4f1de7_add_x_y_to_markers.py
747
add x,y to markers Revision ID: 20f14f4f1de7 Revises: 21b54c24a2c8 Create Date: 2018-10-01 23:27:21.307860 revision identifiers, used by Alembic. commands auto generated by Alembic - please adjust! end Alembic commands commands auto generated by Alembic - please adjust! end Alembic commands
297
en
0.626255
def mergeSort(_list): n = len(_list) if n > 1: mid = n // 2 # int left = _list[:mid] right = _list[mid:] mergeSort(left) mergeSort(right) i = j = k = 0 # 左右比較 while i < len(left) and j < len(right): if left[i] < right[j]: # left right compared _list[k] = left[i] i += 1 else: _list[k] = right[j] j += 1 k += 1 # 看有沒有剩,直接塞滿 while i < len(left): _list[k] = left[i] i += 1 k += 1 while j < len(right): _list[k] = right[j] j += 1 k += 1
sorting/0853426_HW1_merge.py
735
int 左右比較 left right compared 看有沒有剩,直接塞滿
39
zh
0.680809
# # This file is part of SEQGIBBS # (https://github.com/I-Bouros/seqgibbs.git) which is released # under the MIT license. See accompanying LICENSE for copyright # notice and full license details. # import unittest import scipy.stats import numpy as np import numpy.testing as npt import seqgibbs as gibbs def fun(x): """ Function returning the parameters of the normal sampler. mean = product of elements of x variance = exp(|x|)/(1+exp(|x|)). """ return np.prod(x), np.exp(np.sum(x))/(np.exp(np.sum(x))+1) def another_fun(x): """ Function returning the parameters of the normal sampler. mean = sum of elements of x variance = exp(|x|)/(1+exp(|x|)). """ return np.sum(x), np.exp(np.sum(x))/(np.exp(np.sum(x))+1) class TestSysGibbsAlgoClass(unittest.TestCase): """ Test the 'SysGibbsAlgo' class. """ def test__init__(self): sampler = gibbs.SysGibbsAlgo(num_dim=2) self.assertEqual(sampler.num_dim, 2) self.assertEqual(len(sampler.one_d_samplers), 0) self.assertEqual(len(sampler.chain_states), 1) npt.assert_array_equal(sampler.initial_state, np.zeros(2)) npt.assert_array_equal(sampler.current_state, np.zeros(2)) with self.assertRaises(TypeError): gibbs.SysGibbsAlgo('0', np.ones(2)) with self.assertRaises(ValueError): gibbs.SysGibbsAlgo(0, np.ones(2)) with self.assertRaises(ValueError): gibbs.SysGibbsAlgo(3, np.ones(2)) with self.assertRaises(ValueError): gibbs.SysGibbsAlgo(2, [[1], [2]]) def test_change_initial_state(self): sampler = gibbs.SysGibbsAlgo(num_dim=2) sampler.change_initial_state(new_state=np.array([2, 0])) npt.assert_array_equal(sampler.initial_state, np.array([2, 0])) with self.assertRaises(ValueError): sampler.change_initial_state(new_state=np.array([[1], [2]])) with self.assertRaises(ValueError): sampler.change_initial_state(new_state=np.array([1, 2, 0])) def test_add_1_d_sampler(self): sampler = gibbs.SysGibbsAlgo(num_dim=2, initial_state=np.array([2, 3])) new_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun) sampler.add_1_d_sampler(new_1_d_sampler) self.assertEqual(len(sampler.one_d_samplers), 1) with self.assertRaises(TypeError): sampler.add_1_d_sampler(0) def test_run(self): sampler = gibbs.SysGibbsAlgo( num_dim=2, initial_state=np.array([2, 3])) # Feed in the two partial conditional samplers first_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun) second_1_d_sampler = gibbs.OneDimSampler( scipy.stats.norm.rvs, another_fun) sampler.add_1_d_sampler(first_1_d_sampler) sampler.add_1_d_sampler(second_1_d_sampler) # Run 3 complete scan cycles of the algorithm sampler.run(num_cycles=3) last_state = sampler.chain_states[-1] self.assertEqual(len(sampler.chain_states), 4) self.assertEqual(len(last_state), len(sampler.initial_state)) npt.assert_array_equal(last_state, sampler.current_state) # Run 3 more complete scan cycles of the algorithm sampler.run(num_cycles=3, mode='continue') self.assertEqual(len(sampler.chain_states), 7) # Rerun for 3 complete scan cycles of the algorithm sampler.run(num_cycles=3, mode='restart') self.assertEqual(len(sampler.chain_states), 4) with self.assertRaises(ValueError): sampler.run(num_cycles=3, mode='0') with self.assertRaises(TypeError): sampler.run(num_cycles=3.5) with self.assertRaises(ValueError): sampler.run(num_cycles=0, mode='restart') class TestRandGibbsAlgoClass(unittest.TestCase): """ Test the 'RandGibbsAlgo' class. """ def test__init__(self): sampler = gibbs.RandGibbsAlgo(num_dim=2) self.assertEqual(sampler.num_dim, 2) self.assertEqual(len(sampler.one_d_samplers), 0) self.assertEqual(len(sampler.chain_states), 1) npt.assert_array_equal(sampler.initial_state, np.zeros(2)) npt.assert_array_equal(sampler.current_state, np.zeros(2)) with self.assertRaises(ValueError): gibbs.RandGibbsAlgo(3, dimen_prob=np.ones(2)) with self.assertRaises(ValueError): gibbs.RandGibbsAlgo(2, dimen_prob=[[1], [2]]) def test_change_dimen_prob(self): sampler = gibbs.RandGibbsAlgo(num_dim=3) sampler.change_dimen_prob(new_probs=np.array([2, 0, 1])) npt.assert_array_equal( sampler.dimen_prob, np.array([2, 0, 1])/np.sum(np.array([2, 0, 1]))) with self.assertRaises(ValueError): sampler.change_dimen_prob(new_probs=np.array([[2], [0], [1]])) with self.assertRaises(ValueError): sampler.change_dimen_prob(new_probs=np.array([2, 1])) def test_run(self): sampler = gibbs.RandGibbsAlgo( num_dim=2, initial_state=np.array([2, 3]), dimen_prob=np.array([2, 5])) # Feed in the two partial conditional samplers first_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun) second_1_d_sampler = gibbs.OneDimSampler( scipy.stats.norm.rvs, another_fun) sampler.add_1_d_sampler(first_1_d_sampler) sampler.add_1_d_sampler(second_1_d_sampler) # Run 3 complete scan cycles of the algorithm sampler.run(num_cycles=3) last_state = sampler.chain_states[-1] self.assertEqual(len(sampler.chain_states), 4) self.assertEqual(len(last_state), len(sampler.initial_state)) npt.assert_array_equal(last_state, sampler.current_state) # Run 3 more complete scan cycles of the algorithm sampler.run(num_cycles=3, mode='continue') self.assertEqual(len(sampler.chain_states), 7) # Rerun for 3 complete scan cycles of the algorithm sampler.run(num_cycles=3, mode='restart') self.assertEqual(len(sampler.chain_states), 4)
seqgibbs/tests/test_samplers.py
6,196
Test the 'RandGibbsAlgo' class. Test the 'SysGibbsAlgo' class. Function returning the parameters of the normal sampler. mean = sum of elements of x variance = exp(|x|)/(1+exp(|x|)). Function returning the parameters of the normal sampler. mean = product of elements of x variance = exp(|x|)/(1+exp(|x|)). This file is part of SEQGIBBS (https://github.com/I-Bouros/seqgibbs.git) which is released under the MIT license. See accompanying LICENSE for copyright notice and full license details. Feed in the two partial conditional samplers Run 3 complete scan cycles of the algorithm Run 3 more complete scan cycles of the algorithm Rerun for 3 complete scan cycles of the algorithm Feed in the two partial conditional samplers Run 3 complete scan cycles of the algorithm Run 3 more complete scan cycles of the algorithm Rerun for 3 complete scan cycles of the algorithm
884
en
0.778963
"""Implementation of group based authorization API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import grp import logging _LOGGER = logging.getLogger(__name__) def _group(template, resource, action, proid): """Render group template.""" return template.format( resource=resource, action=action, proid=proid ) class API: """Group based authorization REST api.""" def __init__(self, **kwargs): groups = kwargs.get('groups', []) for group in groups: _LOGGER.info('Using authorization template: %s', group) # TODO: add schema validation. def authorize(user, action, resource, resource_id, payload): """Authorize user/action/resource""" del payload _LOGGER.info( 'Authorize: %s %s %s %s', user, action, resource, resource_id ) proid = None if resource_id: proid = resource_id.partition('.')[0] why = [] for group_template in groups: group_name = _group( group_template, action=action, resource=resource, proid=proid ) _LOGGER.info('Check authorization group: %s', group_name) try: group = grp.getgrnam(group_name) username = user.partition('@')[0] members = group.gr_mem _LOGGER.info( 'Authorized: User %s is member of %s.', username, group_name ) if username in members: return True, why else: why.append( '{} not member of {}'.format( username, group_name ) ) except KeyError: _LOGGER.info('Group does not exist: %s', group_name) why.append('no such group: {}'.format(group_name)) return False, why self.authorize = authorize
lib/python/treadmill/api/authz/group.py
2,387
Group based authorization REST api. Render group template. Authorize user/action/resource Implementation of group based authorization API. TODO: add schema validation.
169
en
0.5706
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # # Copyright 2012-2018 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Python Client Automatically generated with: # https://github.com/ebi-wp/webservice-clients-generator # # EMBOSS pepwindow (REST) web service Python client using xmltramp2. # # For further information see: # https://www.ebi.ac.uk/Tools/webservices/ # ############################################################################### from __future__ import print_function import os import sys import time import requests import platform from xmltramp2 import xmltramp from optparse import OptionParser try: from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request from urllib.error import HTTPError from urllib.request import __version__ as urllib_version except ImportError: from urlparse import urlparse from urllib import urlencode from urllib2 import urlopen, Request, HTTPError from urllib2 import __version__ as urllib_version # allow unicode(str) to be used in python 3 try: unicode('') except NameError: unicode = str # Base URL for service baseUrl = u'https://www.ebi.ac.uk/Tools/services/rest/emboss_pepwindow' version = u'2019-07-03 12:51' # Set interval for checking status pollFreq = 3 # Output level outputLevel = 1 # Debug level debugLevel = 0 # Number of option arguments. numOpts = len(sys.argv) # Process command-line options parser = OptionParser(add_help_option=False) # Tool specific options (Try to print all the commands automatically) parser.add_option('--sequence', type=str, help=('The sequence to be analysed can be entered directly into this form.' 'The sequence can be in GCG, FASTA, PIR, NBRF, PHYLIP or' 'UniProtKB/Swiss-Prot format. Partially formatted sequences are not' 'accepted..')) parser.add_option('--windowsize', type=int, help=('Window size for averaging (smoothing) the hydropathy plot. Use an' 'integer between 1 and 200.')) parser.add_option('--normalize', action='store_true', help=('Normalize data values (mean = 0.0, standard deviation = 1.0)')) # General options parser.add_option('-h', '--help', action='store_true', help='Show this help message and exit.') parser.add_option('--email', help='E-mail address.') parser.add_option('--title', help='Job title.') parser.add_option('--outfile', help='File name for results.') parser.add_option('--outformat', help='Output format for results.') parser.add_option('--asyncjob', action='store_true', help='Asynchronous mode.') parser.add_option('--jobid', help='Job identifier.') parser.add_option('--polljob', action="store_true", help='Get job result.') parser.add_option('--pollFreq', type='int', default=3, help='Poll frequency in seconds (default 3s).') parser.add_option('--status', action="store_true", help='Get job status.') parser.add_option('--resultTypes', action='store_true', help='Get result types.') parser.add_option('--params', action='store_true', help='List input parameters.') parser.add_option('--paramDetail', help='Get details for parameter.') parser.add_option('--quiet', action='store_true', help='Decrease output level.') parser.add_option('--verbose', action='store_true', help='Increase output level.') parser.add_option('--version', action='store_true', help='Prints out the version of the Client and exit.') parser.add_option('--debugLevel', type='int', default=debugLevel, help='Debugging level.') parser.add_option('--baseUrl', default=baseUrl, help='Base URL for service.') (options, args) = parser.parse_args() # Increase output level if options.verbose: outputLevel += 1 # Decrease output level if options.quiet: outputLevel -= 1 # Debug level if options.debugLevel: debugLevel = options.debugLevel if options.pollFreq: pollFreq = options.pollFreq if options.baseUrl: baseUrl = options.baseUrl # Debug print def printDebugMessage(functionName, message, level): if (level <= debugLevel): print(u'[' + functionName + u'] ' + message, file=sys.stderr) # User-agent for request (see RFC2616). def getUserAgent(): printDebugMessage(u'getUserAgent', u'Begin', 11) # Agent string for urllib2 library. urllib_agent = u'Python-urllib/%s' % urllib_version clientRevision = version # Prepend client specific agent string. try: pythonversion = platform.python_version() pythonsys = platform.system() except ValueError: pythonversion, pythonsys = "Unknown", "Unknown" user_agent = u'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % ( clientRevision, os.path.basename(__file__), pythonversion, pythonsys, urllib_agent) printDebugMessage(u'getUserAgent', u'user_agent: ' + user_agent, 12) printDebugMessage(u'getUserAgent', u'End', 11) return user_agent # Wrapper for a REST (HTTP GET) request def restRequest(url): printDebugMessage(u'restRequest', u'Begin', 11) printDebugMessage(u'restRequest', u'url: ' + url, 11) try: # Set the User-agent. user_agent = getUserAgent() http_headers = {u'User-Agent': user_agent} req = Request(url, None, http_headers) # Make the request (HTTP GET). reqH = urlopen(req) resp = reqH.read() contenttype = reqH.info() if (len(resp) > 0 and contenttype != u"image/png;charset=UTF-8" and contenttype != u"image/jpeg;charset=UTF-8" and contenttype != u"application/gzip;charset=UTF-8"): try: result = unicode(resp, u'utf-8') except UnicodeDecodeError: result = resp else: result = resp reqH.close() # Errors are indicated by HTTP status codes. except HTTPError as ex: result = requests.get(url).content printDebugMessage(u'restRequest', u'End', 11) return result # Get input parameters list def serviceGetParameters(): printDebugMessage(u'serviceGetParameters', u'Begin', 1) requestUrl = baseUrl + u'/parameters' printDebugMessage(u'serviceGetParameters', u'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage(u'serviceGetParameters', u'End', 1) return doc[u'id':] # Print list of parameters def printGetParameters(): printDebugMessage(u'printGetParameters', u'Begin', 1) idList = serviceGetParameters() for id_ in idList: print(id_) printDebugMessage(u'printGetParameters', u'End', 1) # Get input parameter information def serviceGetParameterDetails(paramName): printDebugMessage(u'serviceGetParameterDetails', u'Begin', 1) printDebugMessage(u'serviceGetParameterDetails', u'paramName: ' + paramName, 2) requestUrl = baseUrl + u'/parameterdetails/' + paramName printDebugMessage(u'serviceGetParameterDetails', u'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage(u'serviceGetParameterDetails', u'End', 1) return doc # Print description of a parameter def printGetParameterDetails(paramName): printDebugMessage(u'printGetParameterDetails', u'Begin', 1) doc = serviceGetParameterDetails(paramName) print(unicode(doc.name) + u"\t" + unicode(doc.type)) print(doc.description) if hasattr(doc, 'values'): for value in doc.values: print(value.value) if unicode(value.defaultValue) == u'true': print(u'default') print(u"\t" + unicode(value.label)) if hasattr(value, u'properties'): for wsProperty in value.properties: print(u"\t" + unicode(wsProperty.key) + u"\t" + unicode(wsProperty.value)) printDebugMessage(u'printGetParameterDetails', u'End', 1) # Submit job def serviceRun(email, title, params): printDebugMessage(u'serviceRun', u'Begin', 1) # Insert e-mail and title into params params[u'email'] = email if title: params[u'title'] = title requestUrl = baseUrl + u'/run/' printDebugMessage(u'serviceRun', u'requestUrl: ' + requestUrl, 2) # Get the data for the other options requestData = urlencode(params) printDebugMessage(u'serviceRun', u'requestData: ' + requestData, 2) # Errors are indicated by HTTP status codes. try: # Set the HTTP User-agent. user_agent = getUserAgent() http_headers = {u'User-Agent': user_agent} req = Request(requestUrl, None, http_headers) # Make the submission (HTTP POST). reqH = urlopen(req, requestData.encode(encoding=u'utf_8', errors=u'strict')) jobId = unicode(reqH.read(), u'utf-8') reqH.close() except HTTPError as ex: print(xmltramp.parse(unicode(ex.read(), u'utf-8'))[0][0]) quit() printDebugMessage(u'serviceRun', u'jobId: ' + jobId, 2) printDebugMessage(u'serviceRun', u'End', 1) return jobId # Get job status def serviceGetStatus(jobId): printDebugMessage(u'serviceGetStatus', u'Begin', 1) printDebugMessage(u'serviceGetStatus', u'jobId: ' + jobId, 2) requestUrl = baseUrl + u'/status/' + jobId printDebugMessage(u'serviceGetStatus', u'requestUrl: ' + requestUrl, 2) status = restRequest(requestUrl) printDebugMessage(u'serviceGetStatus', u'status: ' + status, 2) printDebugMessage(u'serviceGetStatus', u'End', 1) return status # Print the status of a job def printGetStatus(jobId): printDebugMessage(u'printGetStatus', u'Begin', 1) status = serviceGetStatus(jobId) if outputLevel > 0: print("Getting status for job %s" % jobId) print(status) if outputLevel > 0 and status == "FINISHED": print("To get results: python %s --polljob --jobid %s" "" % (os.path.basename(__file__), jobId)) printDebugMessage(u'printGetStatus', u'End', 1) # Get available result types for job def serviceGetResultTypes(jobId): printDebugMessage(u'serviceGetResultTypes', u'Begin', 1) printDebugMessage(u'serviceGetResultTypes', u'jobId: ' + jobId, 2) requestUrl = baseUrl + u'/resulttypes/' + jobId printDebugMessage(u'serviceGetResultTypes', u'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage(u'serviceGetResultTypes', u'End', 1) return doc[u'type':] # Print list of available result types for a job. def printGetResultTypes(jobId): printDebugMessage(u'printGetResultTypes', u'Begin', 1) if outputLevel > 0: print("Getting result types for job %s" % jobId) resultTypeList = serviceGetResultTypes(jobId) if outputLevel > 0: print("Available result types:") for resultType in resultTypeList: print(resultType[u'identifier']) if hasattr(resultType, u'label'): print(u"\t", resultType[u'label']) if hasattr(resultType, u'description'): print(u"\t", resultType[u'description']) if hasattr(resultType, u'mediaType'): print(u"\t", resultType[u'mediaType']) if hasattr(resultType, u'fileSuffix'): print(u"\t", resultType[u'fileSuffix']) if outputLevel > 0: print("To get results:\n python %s --polljob --jobid %s\n" " python %s --polljob --outformat <type> --jobid %s" "" % (os.path.basename(__file__), jobId, os.path.basename(__file__), jobId)) printDebugMessage(u'printGetResultTypes', u'End', 1) # Get result def serviceGetResult(jobId, type_): printDebugMessage(u'serviceGetResult', u'Begin', 1) printDebugMessage(u'serviceGetResult', u'jobId: ' + jobId, 2) printDebugMessage(u'serviceGetResult', u'type_: ' + type_, 2) requestUrl = baseUrl + u'/result/' + jobId + u'/' + type_ result = restRequest(requestUrl) printDebugMessage(u'serviceGetResult', u'End', 1) return result # Client-side poll def clientPoll(jobId): printDebugMessage(u'clientPoll', u'Begin', 1) result = u'PENDING' while result == u'RUNNING' or result == u'PENDING': result = serviceGetStatus(jobId) if outputLevel > 0: print(result) if result == u'RUNNING' or result == u'PENDING': time.sleep(pollFreq) printDebugMessage(u'clientPoll', u'End', 1) # Get result for a jobid # Allows more than one output file written when 'outformat' is defined. def getResult(jobId): printDebugMessage(u'getResult', u'Begin', 1) printDebugMessage(u'getResult', u'jobId: ' + jobId, 1) if outputLevel > 1: print("Getting results for job %s" % jobId) # Check status and wait if necessary clientPoll(jobId) # Get available result types resultTypes = serviceGetResultTypes(jobId) for resultType in resultTypes: # Derive the filename for the result if options.outfile: filename = (options.outfile + u'.' + unicode(resultType[u'identifier']) + u'.' + unicode(resultType[u'fileSuffix'])) else: filename = (jobId + u'.' + unicode(resultType[u'identifier']) + u'.' + unicode(resultType[u'fileSuffix'])) # Write a result file outformat_parm = str(options.outformat).split(',') for outformat_type in outformat_parm: outformat_type = outformat_type.replace(' ', '') if outformat_type == 'None': outformat_type = None if not outformat_type or outformat_type == unicode(resultType[u'identifier']): if outputLevel > 1: print("Getting %s" % unicode(resultType[u'identifier'])) # Get the result result = serviceGetResult(jobId, unicode(resultType[u'identifier'])) if (unicode(resultType[u'mediaType']) == u"image/png" or unicode(resultType[u'mediaType']) == u"image/jpeg" or unicode(resultType[u'mediaType']) == u"application/gzip"): fmode = 'wb' else: fmode = 'w' try: fh = open(filename, fmode) fh.write(result) fh.close() except TypeError: fh.close() fh = open(filename, "wb") fh.write(result) fh.close() if outputLevel > 0: print("Creating result file: " + filename) printDebugMessage(u'getResult', u'End', 1) # Read a file def readFile(filename): printDebugMessage(u'readFile', u'Begin', 1) fh = open(filename, 'r') data = fh.read() fh.close() printDebugMessage(u'readFile', u'End', 1) return data def print_usage(): print("""\ EMBL-EBI EMBOSS pepwindow Python Client: Sequence statistics and plots with pepwindow. [Required (for job submission)] --email E-mail address. --sequence The sequence to be analysed can be entered directly into this form. The sequence can be in GCG, FASTA, PIR, NBRF, PHYLIP or UniProtKB/Swiss-Prot format. Partially formatted sequences are not accepted. [Optional] --windowsize Window size for averaging (smoothing) the hydropathy plot. Use an integer between 1 and 200. --normalize Normalize data values (mean = 0.0, standard deviation = 1.0). [General] -h, --help Show this help message and exit. --asyncjob Forces to make an asynchronous query. --title Title for job. --status Get job status. --resultTypes Get available result types for job. --polljob Poll for the status of a job. --pollFreq Poll frequency in seconds (default 3s). --jobid JobId that was returned when an asynchronous job was submitted. --outfile File name for results (default is JobId; for STDOUT). --outformat Result format(s) to retrieve. It accepts comma-separated values. --params List input parameters. --paramDetail Display details for input parameter. --verbose Increase output. --version Prints out the version of the Client and exit. --quiet Decrease output. --baseUrl Base URL. Defaults to: https://www.ebi.ac.uk/Tools/services/rest/emboss_pepwindow Synchronous job: The results/errors are returned as soon as the job is finished. Usage: python emboss_pepwindow.py --email <your@email.com> [options...] <SeqFile|SeqID(s)> Returns: results as an attachment Asynchronous job: Use this if you want to retrieve the results at a later time. The results are stored for up to 24 hours. Usage: python emboss_pepwindow.py --asyncjob --email <your@email.com> [options...] <SeqFile|SeqID(s)> Returns: jobid Check status of Asynchronous job: Usage: python emboss_pepwindow.py --status --jobid <jobId> Retrieve job data: Use the jobid to query for the status of the job. If the job is finished, it also returns the results/errors. Usage: python emboss_pepwindow.py --polljob --jobid <jobId> [--outfile string] Returns: string indicating the status of the job and if applicable, results as an attachment. Further information: https://www.ebi.ac.uk/Tools/webservices and https://github.com/ebi-wp/webservice-clients Support/Feedback: https://www.ebi.ac.uk/support/""") # No options... print help. if numOpts < 2: print_usage() elif options.help: print_usage() # List parameters elif options.params: printGetParameters() # Get parameter details elif options.paramDetail: printGetParameterDetails(options.paramDetail) # Print Client version elif options.version: print("Revision: %s" % version) sys.exit() # Submit job elif options.email and not options.jobid: params = {} if len(args) == 1 and "true" not in args and "false" not in args: if os.path.exists(args[0]): # Read file into content params[u'sequence'] = readFile(args[0]) else: # Argument is a sequence id params[u'sequence'] = args[0] elif len(args) == 2 and "true" not in args and "false" not in args: if os.path.exists(args[0]) and os.path.exists(args[1]): # Read file into content params[u'asequence'] = readFile(args[0]) params[u'bsequence'] = readFile(args[1]) else: # Argument is a sequence id params[u'asequence'] = args[0] params[u'bsequence'] = args[0] elif hasattr(options, "sequence") or (hasattr(options, "asequence") and hasattr(options, "bsequence")): # Specified via option if hasattr(options, "sequence"): if os.path.exists(options.sequence): # Read file into content params[u'sequence'] = readFile(options.sequence) else: # Argument is a sequence id params[u'sequence'] = options.sequence elif hasattr(options, "asequence") and hasattr(options, "bsequence"): if os.path.exists(options.asequence) and os.path.exists(options.bsequence): # Read file into content params[u'asequence'] = readFile(options.asequence) params[u'bsequence'] = readFile(options.bsequence) else: # Argument is a sequence id params[u'asequence'] = options.asequence params[u'bsequence'] = options.bsequence # Pass default values and fix bools (without default value) if options.windowsize: params['windowsize'] = options.windowsize if not options.normalize: params['normalize'] = 'false' if options.normalize: params['normalize'] = options.normalize # Submit the job jobId = serviceRun(options.email, options.title, params) if options.asyncjob: # Async mode print(jobId) if outputLevel > 0: print("To check status: python %s --status --jobid %s" "" % (os.path.basename(__file__), jobId)) else: # Sync mode if outputLevel > 0: print("JobId: " + jobId, file=sys.stderr) else: print(jobId) time.sleep(pollFreq) getResult(jobId) # Get job status elif options.jobid and options.status: printGetStatus(options.jobid) elif options.jobid and (options.resultTypes or options.polljob): status = serviceGetStatus(options.jobid) if status == 'PENDING' or status == 'RUNNING': print("Error: Job status is %s. " "To get result types the job must be finished." % status) quit() # List result types for job if options.resultTypes: printGetResultTypes(options.jobid) # Get results for job elif options.polljob: getResult(options.jobid) else: # Checks for 'email' parameter if not options.email: print('\nParameter "--email" is missing in your command. It is required!\n') print(u'Error: unrecognised argument combination', file=sys.stderr) print_usage()
python/emboss_pepwindow.py
21,872
!/usr/bin/env python -*- coding: utf-8 -*- Copyright 2012-2018 EMBL - European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Python Client Automatically generated with: https://github.com/ebi-wp/webservice-clients-generator EMBOSS pepwindow (REST) web service Python client using xmltramp2. For further information see: https://www.ebi.ac.uk/Tools/webservices/ allow unicode(str) to be used in python 3 Base URL for service Set interval for checking status Output level Debug level Number of option arguments. Process command-line options Tool specific options (Try to print all the commands automatically) General options Increase output level Decrease output level Debug level Debug print User-agent for request (see RFC2616). Agent string for urllib2 library. Prepend client specific agent string. Wrapper for a REST (HTTP GET) request Set the User-agent. Make the request (HTTP GET). Errors are indicated by HTTP status codes. Get input parameters list Print list of parameters Get input parameter information Print description of a parameter Submit job Insert e-mail and title into params Get the data for the other options Errors are indicated by HTTP status codes. Set the HTTP User-agent. Make the submission (HTTP POST). Get job status Print the status of a job Get available result types for job Print list of available result types for a job. Get result Client-side poll Get result for a jobid Allows more than one output file written when 'outformat' is defined. Check status and wait if necessary Get available result types Derive the filename for the result Write a result file Get the result Read a file No options... print help. List parameters Get parameter details Print Client version Submit job Read file into content Argument is a sequence id Read file into content Argument is a sequence id Specified via option Read file into content Argument is a sequence id Read file into content Argument is a sequence id Pass default values and fix bools (without default value) Submit the job Async mode Sync mode Get job status List result types for job Get results for job Checks for 'email' parameter
2,616
en
0.676914
# ccm node from __future__ import absolute_import, with_statement import os import re import shutil import signal import stat import subprocess import time import yaml from six import iteritems, print_ from ccmlib import common, extension from ccmlib.node import Node, NodeError, ToolError class DseNode(Node): """ Provides interactions to a DSE node. """ def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None): super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables) self.get_cassandra_version() self._dse_config_options = {} if self.cluster.hasOpscenter(): self._copy_agent() def get_install_cassandra_root(self): return os.path.join(self.get_install_dir(), 'resources', 'cassandra') def get_node_cassandra_root(self): return os.path.join(self.get_path(), 'resources', 'cassandra') def get_conf_dir(self): """ Returns the path to the directory where Cassandra config are located """ return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf') def get_tool(self, toolname): return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname) def get_tool_args(self, toolname): return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname] def get_env(self): (node_ip, _) = self.network_interfaces['binary'] return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip) def get_cassandra_version(self): return common.get_dse_cassandra_version(self.get_install_dir()) def set_workloads(self, workloads): self.workloads = workloads self._update_config() if 'solr' in self.workloads: self.__generate_server_xml() if 'graph' in self.workloads: (node_ip, _) = self.network_interfaces['binary'] conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml') with open(conf_file, 'r') as f: data = yaml.load(f) graph_options = data['graph'] graph_options['gremlin_server']['host'] = node_ip self.set_dse_configuration_options({'graph': graph_options}) self.__update_gremlin_config_yaml() if 'dsefs' in self.workloads: dsefs_options = {'dsefs_options': {'enabled': 'true', 'work_dir': os.path.join(self.get_path(), 'dsefs'), 'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}} self.set_dse_configuration_options(dsefs_options) if 'spark' in self.workloads: self._update_spark_env() def set_dse_configuration_options(self, values=None): if values is not None: for k, v in iteritems(values): self._dse_config_options[k] = v self.import_dse_config_files() def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'): """ Watch the log of this node until it detects that the provided other nodes are marked UP. This method works similarly to watch_log_for_death. We want to provide a higher default timeout when this is called on DSE. """ super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename) def get_launch_bin(self): cdir = self.get_install_dir() launch_bin = common.join_bin(cdir, 'bin', 'dse') # Copy back the dse scripts since profiling may have modified it the previous time shutil.copy(launch_bin, self.get_bin_dir()) return common.join_bin(self.get_path(), 'bin', 'dse') def add_custom_launch_arguments(self, args): args.append('cassandra') for workload in self.workloads: if 'hadoop' in workload: args.append('-t') if 'solr' in workload: args.append('-s') if 'spark' in workload: args.append('-k') if 'cfs' in workload: args.append('-c') if 'graph' in workload: args.append('-g') def start(self, join_ring=True, no_wait=False, verbose=False, update_pid=True, wait_other_notice=True, replace_token=None, replace_address=None, jvm_args=None, wait_for_binary_proto=False, profile_options=None, use_jna=False, quiet_start=False, allow_root=False, set_migration_task=True): process = super(DseNode, self).start(join_ring, no_wait, verbose, update_pid, wait_other_notice, replace_token, replace_address, jvm_args, wait_for_binary_proto, profile_options, use_jna, quiet_start, allow_root, set_migration_task) if self.cluster.hasOpscenter(): self._start_agent() def _start_agent(self): agent_dir = os.path.join(self.get_path(), 'datastax-agent') if os.path.exists(agent_dir): self._write_agent_address_yaml(agent_dir) self._write_agent_log4j_properties(agent_dir) args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))] subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def stop(self, wait=True, wait_other_notice=False, gently=True): if self.cluster.hasOpscenter(): self._stop_agent() return super(DseNode, self).stop(wait, wait_other_notice, gently) def _stop_agent(self): agent_dir = os.path.join(self.get_path(), 'datastax-agent') if os.path.exists(agent_dir): pidfile = os.path.join(agent_dir, 'datastax-agent.pid') if os.path.exists(pidfile): with open(pidfile, 'r') as f: pid = int(f.readline().strip()) f.close() if pid is not None: try: os.kill(pid, signal.SIGKILL) except OSError: pass os.remove(pidfile) def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True): if password is not None: cmd = '-pw {} '.format(password) + cmd if username is not None: cmd = '-u {} '.format(username) + cmd return super(DseNode, self).nodetool(cmd) def dsetool(self, cmd): env = self.get_env() extension.append_to_client_env(self, env) dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool') args = [dsetool, '-h', 'localhost', '-j', str(self.jmx_port)] args += cmd.split() p = subprocess.Popen(args, env=env) p.wait() def dse(self, dse_options=None): if dse_options is None: dse_options = [] env = self.get_env() extension.append_to_client_env(self, env) env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse] args += dse_options p = subprocess.Popen(args, env=env) p.wait() def hadoop(self, hadoop_options=None): if hadoop_options is None: hadoop_options = [] env = self.get_env() env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse, 'hadoop'] args += hadoop_options p = subprocess.Popen(args, env=env) p.wait() def hive(self, hive_options=None): if hive_options is None: hive_options = [] env = self.get_env() env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse, 'hive'] args += hive_options p = subprocess.Popen(args, env=env) p.wait() def pig(self, pig_options=None): if pig_options is None: pig_options = [] env = self.get_env() env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse, 'pig'] args += pig_options p = subprocess.Popen(args, env=env) p.wait() def sqoop(self, sqoop_options=None): if sqoop_options is None: sqoop_options = [] env = self.get_env() env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse, 'sqoop'] args += sqoop_options p = subprocess.Popen(args, env=env) p.wait() def spark(self, spark_options=None): if spark_options is None: spark_options = [] env = self.get_env() env['JMX_PORT'] = self.jmx_port dse = common.join_bin(self.get_install_dir(), 'bin', 'dse') args = [dse, 'spark'] args += spark_options p = subprocess.Popen(args, env=env) p.wait() def import_dse_config_files(self): self._update_config() if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')): os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf')) common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf')) self.__update_yaml() def copy_config_files(self): for product in ['dse', 'cassandra', 'hadoop', 'hadoop2-client', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']: src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf') dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf') if not os.path.isdir(src_conf): continue if os.path.isdir(dst_conf): common.rmdirs(dst_conf) shutil.copytree(src_conf, dst_conf) if product == 'solr': src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web') dst_web = os.path.join(self.get_path(), 'resources', product, 'web') if os.path.isdir(dst_web): common.rmdirs(dst_web) shutil.copytree(src_web, dst_web) if product == 'tomcat': src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib') dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib') if os.path.isdir(dst_lib): common.rmdirs(dst_lib) if os.path.exists(src_lib): shutil.copytree(src_lib, dst_lib) src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps') dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps') if os.path.isdir(dst_webapps): common.rmdirs(dst_webapps) shutil.copytree(src_webapps, dst_webapps) src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf') dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf') if os.path.isdir(dst_lib): common.rmdirs(dst_lib) if os.path.exists(src_lib): shutil.copytree(src_lib, dst_lib) def import_bin_files(self): cassandra_bin_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'bin') shutil.rmtree(cassandra_bin_dir, ignore_errors=True) os.makedirs(cassandra_bin_dir) common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir()) common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), cassandra_bin_dir) self.export_dse_home_in_dse_env_sh() def export_dse_home_in_dse_env_sh(self): ''' Due to the way CCM lays out files, separating the repository from the node(s) confs, the `dse-env.sh` script of each node needs to have its DSE_HOME var set and exported. Since DSE 4.5.x, the stock `dse-env.sh` file includes a commented-out place to do exactly this, intended for installers. Basically: read in the file, write it back out and add the two lines. 'sstableloader' is an example of a node script that depends on this, when used in a CCM-built cluster. ''' with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh: buf = dse_env_sh.readlines() with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file: for line in buf: out_file.write(line) if line == "# This is here so the installer can force set DSE_HOME\n": out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n") def _update_log4j(self): super(DseNode, self)._update_log4j() conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF) append_pattern = 'log4j.appender.V.File=' log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log') if common.is_win(): log_file = re.sub("\\\\", "/", log_file) common.replace_in_file(conf_file, append_pattern, append_pattern + log_file) append_pattern = 'log4j.appender.A.File=' log_file = os.path.join(self.get_path(), 'logs', 'audit.log') if common.is_win(): log_file = re.sub("\\\\", "/", log_file) common.replace_in_file(conf_file, append_pattern, append_pattern + log_file) append_pattern = 'log4j.appender.B.File=' log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log') if common.is_win(): log_file = re.sub("\\\\", "/", log_file) common.replace_in_file(conf_file, append_pattern, append_pattern + log_file) def __update_yaml(self): conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml') with open(conf_file, 'r') as f: data = yaml.load(f) data['system_key_directory'] = os.path.join(self.get_path(), 'keys') # Get a map of combined cluster and node configuration with the node # configuration taking precedence. full_options = common.merge_configuration( self.cluster._dse_config_options, self._dse_config_options, delete_empty=False) # Merge options with original yaml data. data = common.merge_configuration(data, full_options) with open(conf_file, 'w') as f: yaml.safe_dump(data, f, default_flow_style=False) def __generate_server_xml(self): server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml') if os.path.isfile(server_xml): os.remove(server_xml) with open(server_xml, 'w+') as f: f.write('<Server port="8005" shutdown="SHUTDOWN">\n') f.write(' <Service name="Solr">\n') f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0]) f.write(' <Engine name="Solr" defaultHost="localhost">\n') f.write(' <Host name="localhost" appBase="../solr/web"\n') f.write(' unpackWARs="true" autoDeploy="true"\n') f.write(' xmlValidation="false" xmlNamespaceAware="false">\n') f.write(' </Host>\n') f.write(' </Engine>\n') f.write(' </Service>\n') f.write('</Server>\n') f.close() def __update_gremlin_config_yaml(self): (node_ip, _) = self.network_interfaces['binary'] conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml') with open(conf_file, 'r') as f: data = yaml.load(f) data['hosts'] = [node_ip] with open(conf_file, 'w') as f: yaml.safe_dump(data, f, default_flow_style=False) def _get_directories(self): dirs = [] for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]: dirs.append(os.path.join(self.get_path(), i)) return dirs def _copy_agent(self): agent_source = os.path.join(self.get_install_dir(), 'datastax-agent') agent_target = os.path.join(self.get_path(), 'datastax-agent') if os.path.exists(agent_source) and not os.path.exists(agent_target): shutil.copytree(agent_source, agent_target) def _write_agent_address_yaml(self, agent_dir): address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml') if not os.path.exists(address_yaml): with open(address_yaml, 'w+') as f: (ip, port) = self.network_interfaces['thrift'] jmx = self.jmx_port f.write('stomp_interface: 127.0.0.1\n') f.write('local_interface: %s\n' % ip) f.write('agent_rpc_interface: %s\n' % ip) f.write('agent_rpc_broadcast_address: %s\n' % ip) f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml')) f.write('cassandra_install: %s\n' % self.get_path()) f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs')) f.write('thrift_port: %s\n' % port) f.write('jmx_port: %s\n' % jmx) f.close() def _write_agent_log4j_properties(self, agent_dir): log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties') with open(log4j_properties, 'w+') as f: f.write('log4j.rootLogger=INFO,R\n') f.write('log4j.logger.org.apache.http=OFF\n') f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n') f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n') f.write('log4j.appender.R.maxFileSize=20MB\n') f.write('log4j.appender.R.maxBackupIndex=5\n') f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n') f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n') f.write('log4j.appender.R.File=./log/agent.log\n') f.close() def _update_spark_env(self): try: node_num = re.search(u'node(\d+)', self.name).group(1) except AttributeError: node_num = 0 conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-env.sh') env = self.get_env() content = [] with open(conf_file, 'r') as f: for line in f.readlines(): for spark_var in env.keys(): if line.startswith('export %s=' % spark_var) or line.startswith('export %s=' % spark_var, 2): line = 'export %s=%s\n' % (spark_var, env[spark_var]) break content.append(line) with open(conf_file, 'w') as f: f.writelines(content) # starting with DSE 5.0 (Spark 1.6) we need to set a unique # spark.shuffle.service.port for each node if self.cluster.version() > '5.0': print_('Writing shuffle') defaults_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-defaults.conf') with open(defaults_file, 'a') as f: port_num = 7737 + int(node_num) f.write("\nspark.shuffle.service.port %s\n" % port_num)
ccmlib/dse_node.py
20,390
Provides interactions to a DSE node. Due to the way CCM lays out files, separating the repository from the node(s) confs, the `dse-env.sh` script of each node needs to have its DSE_HOME var set and exported. Since DSE 4.5.x, the stock `dse-env.sh` file includes a commented-out place to do exactly this, intended for installers. Basically: read in the file, write it back out and add the two lines. 'sstableloader' is an example of a node script that depends on this, when used in a CCM-built cluster. Returns the path to the directory where Cassandra config are located Watch the log of this node until it detects that the provided other nodes are marked UP. This method works similarly to watch_log_for_death. We want to provide a higher default timeout when this is called on DSE. ccm node Copy back the dse scripts since profiling may have modified it the previous time Get a map of combined cluster and node configuration with the node configuration taking precedence. Merge options with original yaml data. starting with DSE 5.0 (Spark 1.6) we need to set a unique spark.shuffle.service.port for each node
1,114
en
0.866257
#!/usr/bin/env python import os import cv2 import numpy as np from enum import Enum import math class Calc (Enum): OPENCV = 1 GSL_MULTI_ROOT = 2 GSL_MULTI_FIT = 3 image_file_name = "Man2_10deg.png" use_calc = Calc.GSL_MULTI_FIT #use_calc = Calc.GSL_MULTI_ROOT #use_calc = Calc.OPENCV def get_project_xy(A, R, X, Y, Z): P = np.array([X, Y, Z, 1]) pp = A.dot(R.dot(P)) return [pp[0]/pp[2], pp[1]/pp[2]] def get_project_uv(A, R, X, Y, Z): fx, fy, cx, cy = A[0][0], A[1][1], A[0][2], A[1][2] r11, r12, r13, t1 = R[0][0], R[0][1], R[0][2], R[0][3] r21, r22, r23, t2 = R[1][0], R[1][1], R[1][2], R[1][3] r31, r32, r33, t3 = R[2][0], R[2][1], R[2][2], R[2][3] s = r31 * X + r32 * Y + r33 * Z + t3 # print("%f * %f + %f * %f + %f * %f + %f = %f\n" % (r31, X, r32, Y, r33, Z, t3, s)) u = ((fx*r11 + cx*r31)*X + (fx*r12 + cx*r32)*Y + (fx*r13 + cx*r33)*Z + fx*t1 + cx*t3)/s v = ((fy*r21 + cy*r31)*X + (fy*r22 + cy*r32)*Y +(fy*r23 + cy*r33)*Z + fy*t2 + cy*t3)/s # print("%f/%f" % ((fx*r11 + cx*r31)*X + (fx*r12 + cx*r32)*Y + (fx*r13 + cx*r33)*Z + fx*t1 + cx*t3, s)) # print("%f/%f" % ((fy*r21 + cy*r31)*X + (fy*r22 + cy*r32)*Y +(fy*r23 + cy*r33)*Z + fy*t2 + cy*t3, s)) return u, v def get_rot_tran_matrix2(M): a = [] for i in range(0, 12): a.append(float(M[i])) R = np.array([[a[0], a[1], a[2], a[9]], [a[3], a[4], a[5], a[10]], [a[6], a[7], a[8], a[11]]]) return R def print_rotation_angle(RT): R = RT[:, 0:3] # print('R:', R) V = R.dot(np.array([0, 0, 1])) print('\033[92mV:', V) print('phi = %f degree' % math.degrees(math.atan(V[0] / V[2]))) print('theta = %f degree' % (math.sqrt(V[0]**2 + V[2]**2))) print('\033[0m') def verification_rot_tran_matrix(A, R, u, v, X, Y, Z): P = np.array([X, Y, Z, 1], dtype="double") pp = A.dot(R.dot(P)) diff = np.fabs(u - pp[0]/pp[2]) + np.fabs(v - pp[1]/pp[2]) print(u, v, '<->', pp[0]/pp[2], pp[1]/pp[2]) return diff def verification_rot_tran_matrix_2(A, R, u, v, X, Y, Z): ud, vd = get_project_uv(A, R, X, Y, Z) print(u, v, '<->', ud, vd) def get_rot_tran_matrix(img_pnts, mod_pnts, cam_matrix): # s = 1 (u1, v1) = img_pnts[0] # nose tip (u2, v2) = img_pnts[1] # left eye (u3, v3) = img_pnts[2] # right eye (u4, v4) = img_pnts[3] # left mouth (u5, v5) = img_pnts[4] # right mouth (X1, Y1, Z1) = model_points[0] (X2, Y2, Z2) = model_points[1] (X3, Y3, Z3) = model_points[2] (X4, Y4, Z4) = model_points[3] (X5, Y5, Z5) = model_points[4] fx = cam_matrix[0][0] fy = cam_matrix[1][1] cx = cam_matrix[0][2] cy = cam_matrix[1][2] r31, r32, r33, t3 = 0, 0, 0, 1 D = np.array([[X1, Y1, Z1, 1], [X2, Y2, Z2, 1], [X3, Y3, Z3, 1], [X4, Y4, Z4, 1]]) D1 = np.array([[(v1 - cy) / fy, Y1, Z1, 1], [(v2 - cy) / fy, Y2, Z2, 1], [(v3 - cy) / fy, Y3, Z3, 1], [(v4 - cy) / fy, Y4, Z4, 1]]) D2 = np.array([[X1, (v1 - cy) / fy, Z1, 1], [X2, (v2 - cy) / fy, Z2, 1], [X3, (v3 - cy) / fy, Z3, 1], [X4, (v4 - cy) / fy, Z4, 1]]) D3 = np.array([[X1, Y1, (v1 - cy) / fy, 1], [X2, Y2, (v2 - cy) / fy, 1], [X3, Y3, (v3 - cy) / fy, 1], [X4, Y4, (v4 - cy) / fy, 1]]) D4 = np.array([[X1, Y1, Z1, (v1 - cy) / fy], [X2, Y2, Z2, (v2 - cy) / fy], [X3, Y3, Z3, (v3 - cy) / fy], [X4, Y4, Z4, (v4 - cy) / fy]]) r21 = np.linalg.det(D1) / np.linalg.det(D) r22 = np.linalg.det(D2) / np.linalg.det(D) r23 = np.linalg.det(D3) / np.linalg.det(D) t2 = np.linalg.det(D4) / np.linalg.det(D) D1 = np.array([[(u1 - cx) / fx, Y1, Z1, 1], [(u2 - cx) / fx, Y2, Z2, 1], [(u3 - cx) / fx, Y3, Z3, 1], [(u4 - cx) / fx, Y4, Z4, 1]]) D2 = np.array([[X1, (u1 - cx) / fx, Z1, 1], [X2, (u2 - cx) / fx, Z2, 1], [X3, (u3 - cx) / fx, Z3, 1], [X4, (u4 - cx) / fx, Z4, 1]]) D3 = np.array([[X1, Y1, (u1 - cx) / fx, 1], [X2, Y2, (u2 - cx) / fx, 1], [X3, Y3, (u3 - cx) / fx, 1], [X4, Y4, (u4 - cx) / fx, 1]]) D4 = np.array([[X1, Y1, Z1, (u1 - cx) / fx], [X2, Y2, Z2, (v2 - cy) / fy], [X3, Y3, Z3, (u3 - cx) / fx], [X4, Y4, Z4, (u4 - cx) / fx]]) r11 = np.linalg.det(D1) / np.linalg.det(D) r12 = np.linalg.det(D2) / np.linalg.det(D) r13 = np.linalg.det(D3) / np.linalg.det(D) t1 = np.linalg.det(D4) / np.linalg.det(D) R = np.array([[r11, r12, r13, t1], [r21, r22, r23, t2], [r31, r32, r33, t3]]) return R if __name__ == '__main__': # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (-50.0, -40.0, 20.0), # Left eye left corner (50.0, -40.0, 20.0), # Right eye right corner (-27.5, 30.0, 10.0), # Left Mouth corner (27.5, 30.0, 10.0) # Right mouth corner ]) index = 4 points_file = "points.txt" image_file = [] key_points = [] matrix = [] if not os.path.exists(points_file): print('do not have file %s' % points_file) exit(0) points_f = open(points_file, 'r') for line in points_f: a = line.split('|') b = a[0].split(',') image_file.append(b[0]) key_points.append(b[1:11]) matrix.append(a[1].split(',')) points_f.close() image_points = np.array([ (int(key_points[index][0]), int(key_points[index][5])), # Nose tip (int(key_points[index][1]), int(key_points[index][6])), # Left eye left corner (int(key_points[index][2]), int(key_points[index][7])), # Right eye right corner (int(key_points[index][3]), int(key_points[index][8])), # Left Mouth corner (int(key_points[index][4]), int(key_points[index][9])) # Right mouth corner ], dtype="double") # Read Image im = cv2.imread(image_file[index]) size = im.shape # Camera internals focal_length = size[1] center = (size[1] / 2, size[0] / 2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype="double") R = get_rot_tran_matrix2(matrix[index]) # read gsl result print("\033[94m----check----") for i in range(0, 5): verification_rot_tran_matrix_2(camera_matrix, R, image_points[i][0], image_points[i][1], model_points[i][0], model_points[i][1], model_points[i][2]) print("----end-----\033[0m") print_rotation_angle(R) print("rotation_matrix:\n {0}".format(R)) # draw axes axis_length = 100.0 if False: Z_pnt = get_project_uv(camera_matrix, R, 0, 0, axis_length) Y_pnt = get_project_uv(camera_matrix, R, 0, axis_length, 0) X_pnt = get_project_uv(camera_matrix, R, axis_length, 0, 0) Org_pnt = get_project_uv(camera_matrix, R, 0, 0, 0) else: Z_pnt = get_project_xy(camera_matrix, R, 0, 0, axis_length) Y_pnt = get_project_xy(camera_matrix, R, 0, axis_length, 0) X_pnt = get_project_xy(camera_matrix, R, axis_length, 0, 0) Org_pnt = get_project_xy(camera_matrix, R, 0, 0, 0) #print('Rt:\033[93m', R, '\033[0m') # print('X:\033[93m', R[:, 0:3].dot(np.array([axis_length, 0, 0])), '\033[0m') # print('Y:\033[93m', R[:, 0:3].dot(np.array([0, axis_length, 0])), '\033[0m') # print('Z:\033[93m', R[:, 0:3].dot(np.array([0, 0, axis_length])), '\033[0m') for p in image_points: cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1) p1 = (int(Org_pnt[0]), int(Org_pnt[1])) p2 = (int(Z_pnt[0]), int(Z_pnt[1])) cv2.line(im, p1, p2, (255, 0, 0), 2) #blue:Z p1 = (int(Org_pnt[0]), int(Org_pnt[1])) p2 = (int(Y_pnt[0]), int(Y_pnt[1])) cv2.line(im, p1, p2, (0, 255, 0), 2) #green:Y p1 = (int(Org_pnt[0]), int(Org_pnt[1])) p2 = (int(X_pnt[0]), int(X_pnt[1])) cv2.line(im, p1, p2, (0, 255, 255), 2) #yellow: X # Display image cv2.imshow("Output", im) cv2.waitKey(0)
opencv/src/face_motion1.py
7,963
!/usr/bin/env pythonuse_calc = Calc.GSL_MULTI_ROOTuse_calc = Calc.OPENCV print("%f * %f + %f * %f + %f * %f + %f = %f\n" % (r31, X, r32, Y, r33, Z, t3, s)) print("%f/%f" % ((fx*r11 + cx*r31)*X + (fx*r12 + cx*r32)*Y + (fx*r13 + cx*r33)*Z + fx*t1 + cx*t3, s)) print("%f/%f" % ((fy*r21 + cy*r31)*X + (fy*r22 + cy*r32)*Y +(fy*r23 + cy*r33)*Z + fy*t2 + cy*t3, s)) print('R:', R) s = 1 nose tip left eye right eye left mouth right mouth 3D model points. Nose tip Left eye left corner Right eye right corner Left Mouth corner Right mouth corner Nose tip Left eye left corner Right eye right corner Left Mouth corner Right mouth corner Read Image Camera internals read gsl result draw axesprint('Rt:\033[93m', R, '\033[0m') print('X:\033[93m', R[:, 0:3].dot(np.array([axis_length, 0, 0])), '\033[0m') print('Y:\033[93m', R[:, 0:3].dot(np.array([0, axis_length, 0])), '\033[0m') print('Z:\033[93m', R[:, 0:3].dot(np.array([0, 0, axis_length])), '\033[0m')blue:Zgreen:Yyellow: X Display image
1,003
en
0.271966
""" Get attributes about images Inspired by https://github.com/CSAILVision/places365/blob/master/run_placesCNN_unified.py """ from pathlib import Path import argparse from typing import List, Iterator, Tuple, Optional, Union, Dict import hashlib import json from multiprocessing import Pool import urllib.request import sys import csv from tqdm.auto import tqdm import torch from torchvision import transforms as trn from torch import nn from torch.utils.data._utils.collate import default_collate from torch.nn import functional as F import numpy as np import cv2 from PIL import Image import argtyped from torch.utils.data import Dataset, DataLoader import scripts.wideresnet as wideresnet csv.field_size_limit(sys.maxsize) TSV_FIELDNAMES = [ "listing_id", "photo_id", "category", "attributes", "is_indoor", ] class Arguments(argtyped.Arguments, underscore=True): outfile: Path = Path("places365/detect.tsv") images: Path = Path("images") batch_size: int = 100 visualize: bool = False num_cat: int = 5 num_attr: int = 10 num_splits: int = 1 start: int = 0 num_workers: int = -1 # hacky way to deal with the Pytorch 1.0 update def recursion_change_bn(module: nn.Module) -> nn.Module: if isinstance(module, nn.BatchNorm2d): module.track_running_stats = 1 # type: ignore else: for i, (name, module1) in enumerate(module._modules.items()): # type: ignore module1 = recursion_change_bn(module1) return module def download_url(url, cache_dir): stem = hashlib.sha1(str(url).encode()) filename = cache_dir / stem.hexdigest() if not filename.is_file(): urllib.request.urlretrieve(url, filename) return filename def load_labels( cache_dir: Union[Path, str] ) -> Tuple[Tuple[str, ...], np.ndarray, List[str], np.ndarray]: """ prepare all the labels """ # indoor and outdoor relevant filename_io = download_url( "https://raw.githubusercontent.com/csailvision/places365/master/IO_places365.txt", cache_dir, ) with open(filename_io) as f: lines = f.readlines() labels_IO = [] for line in lines: items = line.rstrip().split() labels_IO.append(int(items[-1]) - 1) # 0 is indoor, 1 is outdoor labels_IO = np.array(labels_IO) # scene category relevant filename_category = download_url( "https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt", cache_dir, ) _classes = list() with open(filename_category) as class_file: for line in class_file: _classes.append(line.strip().split(" ")[0][3:]) classes = tuple(_classes) # scene attribute relevant filename_attribute = download_url( "https://raw.githubusercontent.com/csailvision/places365/master/labels_sunattribute.txt", cache_dir, ) with open(filename_attribute) as f: lines = f.readlines() labels_attribute = [item.rstrip() for item in lines] filename_W = download_url( "http://places2.csail.mit.edu/models_places365/W_sceneattribute_wideresnet18.npy", cache_dir, ) W_attribute = np.load(filename_W) return classes, labels_IO, labels_attribute, W_attribute def get_tf(): # load the image transformer tf = trn.Compose( [ trn.Resize((224, 224)), trn.ToTensor(), trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) return tf class NormalizeInverse(trn.Normalize): """ Undoes the normalization and returns the reconstructed images in the input domain. """ def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): mean = torch.tensor(mean) std = torch.tensor(std) std_inv = 1 / (std + 1e-7) # type: ignore mean_inv = -mean * std_inv super().__init__(mean=mean_inv, std=std_inv) def __call__(self, array: np.ndarray): tensor = torch.tensor(array) tensor = super().__call__(tensor.clone()) array = np.transpose(np.uint8(255 * tensor.numpy()), (1, 2, 0)) return array class Hooker: def __init__(self, model: nn.Module, features_names=("layer4", "avgpool")): self.features: List[np.ndarray] = [] # this is the last conv layer of the resnet for name in features_names: model._modules.get(name).register_forward_hook(self) # type: ignore def __call__(self, module: nn.Module, input, output): self.features.append(output.data.cpu().numpy()) def reset(self): self.features = [] # load the model def load_model(cache_dir: Union[Path, str]) -> nn.Module: # this model has a last conv feature map as 14x14 model_file = download_url( "http://places2.csail.mit.edu/models_places365/wideresnet18_places365.pth.tar", cache_dir, ) model = wideresnet.resnet18(num_classes=365) checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage) state_dict = { str.replace(k, "module.", ""): v for k, v in checkpoint["state_dict"].items() } model.load_state_dict(state_dict) # hacky way to deal with the upgraded batchnorm2D and avgpool layers... for i, (name, module) in enumerate(model._modules.items()): # type: ignore module = recursion_change_bn(model) # type: ignore model.avgpool = torch.nn.AvgPool2d(kernel_size=14, stride=1, padding=0) # type: ignore model.eval() return model def search_locations(image_folder: Path) -> List[Path]: return [f for f in image_folder.iterdir() if f.is_dir()] def load_photo_paths(locations: List[Path]) -> Iterator[Path]: for location in tqdm(locations): for photo in location.glob("*.jpg"): yield photo def load_photos(images: Path, cache_dir: Union[Path, str]) -> List[Union[str, Path]]: photo_cache = Path(cache_dir) / "photos.txt" if photo_cache.is_file(): with open(photo_cache, "r") as fid: photos: List[Union[str, Path]] = [l.strip() for l in fid.readlines()] else: print("Preloading every images") photos = list(images.rglob("*.jpg")) with open(photo_cache, "w") as fid: fid.writelines(f"{l}\n" for l in photos) return photos class ImageDataset(Dataset): def __init__(self, photos: List[Union[Path, str]]): self.photos = photos self.tf = get_tf() # image transformer def __len__(self): return len(self.photos) def __getitem__( self, index: int ) -> Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: path = Path(self.photos[index]) try: image = Image.open(path) image = image.convert("RGB") except: return None tensor = self.tf(image) listing_id, photo_id = map(int, path.stem.split("-")) return torch.tensor(listing_id), torch.tensor(photo_id), tensor def collate_fn(batch: Tuple): batch = tuple([b for b in batch if b is not None]) if not batch: return None return default_collate(batch) def class_activation_map( feature_conv: np.ndarray, weight_softmax: np.ndarray, class_idx: List[int] ): # generate the class activation maps upsample to 256x256 size_upsample = (256, 256) nc, h, w = feature_conv.shape output_cam = [] for _ in class_idx: cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w))) cam = cam.reshape(h, w) cam = cam - np.min(cam) cam_img = cam / np.max(cam) cam_img = np.uint8(255 * cam_img) output_cam.append(cv2.resize(cam_img, size_upsample)) # type: ignore return output_cam def get_key(listing_id, photo_id) -> str: return f"{listing_id}_{photo_id}" def is_indoor(idx, labels_io): # vote for the indoor or outdoor io_image = np.mean(labels_io[idx[:10]]) ans = bool(io_image < 0.5) return io_image, ans def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) # type: ignore @torch.no_grad() def run_model( batch: List[torch.Tensor], model, hook, classes: Tuple[str, ...], labels_IO: np.ndarray, labels_attribute: List[str], W_attribute: np.ndarray, num_cat: int, num_attr: int, weight_softmax: Optional[np.ndarray] = None, ) -> List[Dict]: listing_ids, photo_ids, input_img = batch # forward pass logit = model.forward(input_img.cuda()) h_x = F.softmax(logit, 1) detections = [] for i, p in enumerate(h_x): # type: ignore listing_id = int(listing_ids[i]) photo_id = int(photo_ids[i]) key = get_key(listing_id, photo_id) probs, idx = p.sort(0, True) # type: ignore probs = probs.detach().cpu().numpy() idx = idx.detach().cpu().numpy() # scene category category = [(probs[j], classes[idx[j]]) for j in range(0, num_cat)] # output the scene attributes ft = [np.squeeze(f[i]) for f in hook.features] responses_attribute = softmax(W_attribute.dot(ft[1])) idx_a = np.argsort(responses_attribute) attributes = [ (responses_attribute[idx_a[j]], labels_attribute[idx_a[j]]) for j in range(-1, -num_attr, -1) ] detections.append( { "listing_id": listing_id, "photo_id": photo_id, "category": category, "attributes": attributes, "is_indoor": is_indoor(idx, labels_IO), } ) # generate class activation mapping if weight_softmax is not None: ca_map = class_activation_map(ft[0], weight_softmax, [idx[0]])[0] # render the CAM and output img = NormalizeInverse()(input_img[i]) height, width, _ = img.shape # type: ignore heatmap = cv2.applyColorMap( # type: ignore cv2.resize(ca_map, (width, height)), cv2.COLORMAP_JET # type: ignore ) result = heatmap * 0.4 + img * 0.5 # type: ignore cv2.imwrite(f"examples/{key}-heatmap.jpg", result) # type: ignore cv2.imwrite(f"examples/{key}-image.jpg", img[:, :, ::-1]) # type: ignore hook.reset() return detections class NumpyEncoder(json.JSONEncoder): """ Special json encoder for numpy types """ def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) def save_json(data, filename: Union[str, Path]): with open(filename, "w") as fid: json.dump(data, fid, indent=2, cls=NumpyEncoder) def detection(args: Arguments, proc_id: int, cache_dir: Union[Path, str]): # load the labels classes, labels_IO, labels_attribute, W_attribute = load_labels(cache_dir) model = load_model(cache_dir) hook = Hooker(model) # load the transformer # get the softmax weight params = list(model.parameters()) if args.visualize: weight_softmax = params[-2].data.numpy() weight_softmax[weight_softmax < 0] = 0 else: weight_softmax = None photos = load_photos(args.images, cache_dir) print("The dataset contains a total of", len(photos)) photos = photos[proc_id :: args.num_splits] print("The split", proc_id, "over", args.num_splits, "contains", len(photos), "photos") dataset = ImageDataset(photos) dataloader = DataLoader( dataset, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=collate_fn, # type: ignore ) model = model.cuda() filename = args.outfile.parent / f"{args.outfile.stem}.{proc_id}.tsv" print(f"Start split {proc_id} on {len(dataset)} photos") with open(filename, "wt") as tsvfile: writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES) for batch in tqdm(dataloader): if batch is None: continue detections = run_model( batch, model, hook, classes, labels_IO, labels_attribute, W_attribute, num_cat=args.num_cat, num_attr=args.num_attr, weight_softmax=weight_softmax, ) for d in detections: writer.writerow(d) if __name__ == "__main__": args = Arguments() print(args.to_string()) cache_dir = Path.home() / ".cache" / args.outfile.parent.name cache_dir.mkdir(exist_ok=True, parents=True) start = max(local_rank, 0) + args.start detection(args, start, cache_dir)
scripts/detect_room.py
13,053
Undoes the normalization and returns the reconstructed images in the input domain. Special json encoder for numpy types prepare all the labels Compute softmax values for each sets of scores in x. Get attributes about images Inspired by https://github.com/CSAILVision/places365/blob/master/run_placesCNN_unified.py hacky way to deal with the Pytorch 1.0 update type: ignore type: ignore indoor and outdoor relevant 0 is indoor, 1 is outdoor scene category relevant scene attribute relevant load the image transformer type: ignore this is the last conv layer of the resnet type: ignore load the model this model has a last conv feature map as 14x14 hacky way to deal with the upgraded batchnorm2D and avgpool layers... type: ignore type: ignore type: ignore image transformer generate the class activation maps upsample to 256x256 type: ignore vote for the indoor or outdoor type: ignore forward pass type: ignore type: ignore scene category output the scene attributes generate class activation mapping render the CAM and output type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore load the labels load the transformer get the softmax weight type: ignore
1,181
en
0.684663
''' Задача 1 Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована. ''' # for i in range(1, 6): # print(f'{i} --', '0'*i) ####### # print('num 1') # i = 0 # while i < 5: # i += 1 # print('line', i, 'is 0') # # print('') ####### ''' Задача 2 Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5. ''' # amount = 0 # for i in range(10): # n = input() # if '5' in n: # amount += 1 # print(amount) ######### # print('num 2') # sum = 0 # for i in range(10): # answer = int(input('Напишите цифру: ')) # if answer == 5: # sum += 1 # # print('Количество 5 =', sum) # print('') ######### ''' Задача 3 Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран. ''' # sum = 0 # # for i in range(1,101): # sum+=i # print(sum) ############# # print('num 3') # sum = 0 # for i in range(1,101): # sum+=i # print(sum) # print('') ################ ''' Задача 4 Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран. ''' # n = 1 # for i in range(2, 11): # n *= i # print(n) ################ # print('num 4') # comp = 1 # for i in range(1,11): # comp *= i # print(comp) # print('') ################ ''' Задача 5 Вывести цифры числа на каждой строчке. ''' # integer_number = 2129 # # #print(integer_number%10,integer_number//10) # # while integer_number>0: # print(integer_number%10) # integer_number = integer_number//10 ################ # # print('num 5') # integer_number = 2129 # while integer_number>0: # print(integer_number%10) # integer_number = integer_number//10 # print('') # ################ ''' Задача 6 Найти сумму цифр числа. ''' # inter_number = 12345 # n = 0 # for i in str(inter_number): # n += int(i) # print(n) ################ # inter_number = 12345 # n = 0 # for i in str(inter_number): # n += int(i) # print(n) ################ ''' Задача 7 Найти произведение цифр числа. ''' # n = 1 # for i in str(inter_number): # n *= int(i) # print(n) ################ # inter_number = 12345 # x = 1 # for i in str(inter_number): # x *= int(i) # print(x) ################ ''' Задача 8 Дать ответ на вопрос: есть ли среди цифр числа 5? ''' # integer_number = 213413 # while integer_number>0: # if integer_number%10 == 5: # print('Yes') # break # integer_number = integer_number//10 # else: print('No') ################ # print('num 8') # integer_number = 2134135 # while integer_number>0: # if integer_number%10 == 5: # print('Yes') # break # integer_number = integer_number//10 # else: print('No') # print('') ################ ''' Задача 9 Найти максимальную цифру в числе ''' # inter_number = 59675 # maximum = 0 # # print(max(str(inter_number))) # while inter_number != 0: # if inter_number % 10 > maximum: # maximum = inter_number % 10 # inter_number //= 10 # print(maximum) ################ # print('num 9') # integer_number = 59675 # tmp = 0 # while integer_number>0: # if integer_number%10 >= tmp: # tmp = integer_number%10 # integer_number = integer_number//10 # print(tmp) # print('') ################ ''' Задача 10 Найти количество цифр 5 в числе ''' # inter_number = 595675 # count = 0 # while inter_number != 0: # if inter_number % 10 == 5: # count += 1 # inter_number //= 10 # print(count) ################ # print('num 10') # integer_number = 595675 # tmp = 0 # while integer_number>0: # if integer_number%10 == 5: # tmp += 1 # integer_number = integer_number//10 # print(tmp) # print('') ################
example-l2.py
4,165
Задача 1 Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована. for i in range(1, 6): print(f'{i} --', '0'*i) print('num 1') i = 0 while i < 5: i += 1 print('line', i, 'is 0') print('') amount = 0 for i in range(10): n = input() if '5' in n: amount += 1 print(amount) print('num 2') sum = 0 for i in range(10): answer = int(input('Напишите цифру: ')) if answer == 5: sum += 1 print('Количество 5 =', sum) print('') sum = 0 for i in range(1,101): sum+=i print(sum) print('num 3') sum = 0 for i in range(1,101): sum+=i print(sum) print('') n = 1 for i in range(2, 11): n *= i print(n) print('num 4') comp = 1 for i in range(1,11): comp *= i print(comp) print('') integer_number = 2129 print(integer_number%10,integer_number//10) while integer_number>0: print(integer_number%10) integer_number = integer_number//10 print('num 5') integer_number = 2129 while integer_number>0: print(integer_number%10) integer_number = integer_number//10 print('') inter_number = 12345 n = 0 for i in str(inter_number): n += int(i) print(n) inter_number = 12345 n = 0 for i in str(inter_number): n += int(i) print(n) n = 1 for i in str(inter_number): n *= int(i) print(n) inter_number = 12345 x = 1 for i in str(inter_number): x *= int(i) print(x) integer_number = 213413 while integer_number>0: if integer_number%10 == 5: print('Yes') break integer_number = integer_number//10 else: print('No') print('num 8') integer_number = 2134135 while integer_number>0: if integer_number%10 == 5: print('Yes') break integer_number = integer_number//10 else: print('No') print('') inter_number = 59675 maximum = 0 print(max(str(inter_number))) while inter_number != 0: if inter_number % 10 > maximum: maximum = inter_number % 10 inter_number //= 10 print(maximum) print('num 9') integer_number = 59675 tmp = 0 while integer_number>0: if integer_number%10 >= tmp: tmp = integer_number%10 integer_number = integer_number//10 print(tmp) print('') inter_number = 595675 count = 0 while inter_number != 0: if inter_number % 10 == 5: count += 1 inter_number //= 10 print(count) print('num 10') integer_number = 595675 tmp = 0 while integer_number>0: if integer_number%10 == 5: tmp += 1 integer_number = integer_number//10 print(tmp) print('')
2,451
en
0.313657
'''Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”, em que posição ela aparece a primeira vez e em que posição ela aparece a última vez.''' frase=str(input('Digite uma frase: ')).upper().strip() print('a letra A aparece {} vezes'.format(frase.count('A'))) print('ela aparece a primeira vez na posição: {}'.format(frase.find('A')+1)) print('elaq parece pela ultima vez na posição: {}'.format(frase.rfind('A')+1))
ex26.py
470
Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”, em que posição ela aparece a primeira vez e em que posição ela aparece a última vez.
178
pt
0.998701
from django.shortcuts import render, redirect from django.core.urlresolvers import reverse from .forms import EventCreateForm, NoteCreateForm, PropertyCreateForm, FileUploadForm,AlertCreateForm from models import Event, Property, Note, File, Alert from wsgiref.util import FileWrapper from django.http import HttpResponse, HttpResponseBadRequest from django.contrib.auth.decorators import login_required import datetime, mimetypes # Create your views here. @login_required(login_url='/accounts/login/') def index(request): return render(request, "main/base.html") @login_required(login_url='/accounts/login/') def dashboard(request): return render(request, "main/base.html") def properties(request): context = { "properties" : Property.objects.filter(user = request.user) } return render (request, "main/properties.html", context) def sidebar(request): today = datetime.datetime.now() yesterday = today - datetime.timedelta(days=1) print today alerts = Alert.objects.filter(event__property__user=request.user, when__gt=yesterday).order_by("when")[:5] notes = Note.objects.filter(event__property__user=request.user).order_by("-created_at")[:5] for alert in alerts: print alert.when context = { "alerts" : alerts, "notes" : notes, "today" : today } return render(request, "main/sidebar.html", context) def add_property(request): if request.method == "POST": prop_form = PropertyCreateForm(request.POST) if prop_form.is_valid(): prop = prop_form.save(commit=False) prop.user = request.user prop.save() context = { "properties" : Property.objects.filter(user = request.user) } print "valid form" return HttpResponse(prop.id) else: context={ 'form':prop_form } print "invalid form" return HttpResponseBadRequest(render (request,'main/add_property.html',context)) else: context={ 'form':PropertyCreateForm() } print "GET" return render(request,'main/add_property.html',context) def event(request,event_id, prop_id): property = Property.objects.get(pk=prop_id) event = Event.objects.get(pk=event_id) notes = event.note_set.all() alerts = event.alert_set.all() context={ 'prop_id':prop_id, 'event':event, "notes":notes, 'alerts':alerts, 'property':property } return render(request, 'main/event.html',context) def events(request, prop_id): property = Property.objects.get(pk=prop_id) events = property.event_set.all() context={ 'property' : property, 'events':events } return render(request, 'main/events.html',context) def get_file(request,file_id): file = File.objects.get(pk=file_id) mimetype = mimetypes.guess_type(file.docfile.name) response = HttpResponse(content_type=mimetype[0]) response['Content-Disposition'] = 'inline; filename=' + file.docfile.name.split('/')[-1] response['Accept-Ranges'] = 'bytes' response['Content-Length'] = file.docfile.size response.write(file.docfile.read()) return response def add_event(request, prop_id): if request.POST: event_form = EventCreateForm(request.POST) if event_form.is_valid(): event = event_form.save(commit=False) event.property = Property.objects.get(pk=prop_id) event.save() return HttpResponse(event.id) else: context={ 'form':event_form, 'prop_id':prop_id } return HttpResponseBadRequest(render (request,'main/add_event.html',context)) else: context={ 'form':EventCreateForm(), 'prop_id':prop_id } return render(request,'main/add_event.html',context) def note(request,event_id,prop_id,note_id): note = Note.objects.get(pk=note_id) documents = note.file_set.all() docNames = [] for document in documents: docNames.append((document.id,document.docfile.name.split('/')[-1])) print docNames form = FileUploadForm() property = Property.objects.get(pk=prop_id) event = Event.objects.get(pk=event_id) context={'form':form, 'documents': documents,'event_id':event_id, 'prop_id':prop_id,"note_id":note.id, 'note':note, 'event':event, 'property':property, "docNames":docNames} return render(request, 'main/note.html', context) def notes(request,event_id,prop_id): event = Event.objects.get(pk=event_id) notes = event.note_set.all() context={ 'event_id':event_id, 'prop_id':prop_id, } return render(request, 'main/note.html', context) def add_note(request,prop_id, event_id): if request.POST: note_form = NoteCreateForm(request.POST) if note_form.is_valid(): note = note_form.save(commit=False) note.event = Event.objects.get(pk=event_id) note.save() return HttpResponse(note.id) else: context={ 'form':note_form, 'prop_id':prop_id, 'event_id':event_id } return HttpResponseBadRequest(render (request,'main/add_note.html',context)) else: context={ 'form':NoteCreateForm(), 'prop_id':prop_id, 'event_id':event_id } return render(request,'main/add_note.html',context) def update_note(request,prop_id, event_id): print ('update') if request.POST: name = request.POST['name'] name = request.POST['comment'] note = Event.objects.get(pk=event_id) note.name=name note.comment=comment note.save() return HttpResponse(note.id) def add_file(request,prop_id, event_id, note_id): if request.method == 'POST': form = FileUploadForm(request.POST, request.FILES) note = Note.objects.get(pk=note_id) if form.is_valid(): newdoc = File(docfile=request.FILES['docfile'] ) newdoc.note = note newdoc.save() return HttpResponse("added file") else: form = FileUploadForm() documents = File.objects.all() context={'form':form, 'documents': documents,'event_id':event_id, 'prop_id':prop_id,"note_id":note_id} return HttpResponseBadRequest(render (request,'main/note.html',context)) def alert(request,event_id,prop_id,alert_id): alert = Alert.objects.get(pk=alert_id) form = AlertCreateForm() property = Property.objects.get(pk=prop_id) event = Event.objects.get(pk=event_id) context={'form':form, 'event_id':event_id, 'prop_id':prop_id,"alert_id":alert.id, 'alert':alert, 'property':property, 'event':event} return render(request, 'main/alert.html', context) def add_alert(request,prop_id, event_id): if request.POST: alert_form = AlertCreateForm(request.POST) if alert_form.is_valid(): alert = alert_form.save(commit=False) alert.event = Event.objects.get(pk=event_id) alert.save() return HttpResponse(alert.id) else: context={ 'form':alert_form, 'prop_id':prop_id, 'event_id':event_id } return HttpResponseBadRequest(render (request,'main/add_alert.html',context)) else: context={ 'form':AlertCreateForm(), 'prop_id':prop_id, 'event_id':event_id } return render(request,'main/add_alert.html',context)
apps/main/views.py
6,702
Create your views here.
23
en
0.928092
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: bigip_monitor_http short_description: Manages F5 BIG-IP LTM http monitors description: Manages F5 BIG-IP LTM http monitors. version_added: 2.5 options: name: description: - Monitor name. required: True parent: description: - The parent template of this monitor template. Once this value has been set, it cannot be changed. By default, this value is the C(http) parent on the C(Common) partition. default: "/Common/http" send: description: - The send string for the monitor call. When creating a new monitor, if this value is not provided, the default C(GET /\r\n) will be used. receive: description: - The receive string for the monitor call. receive_disable: description: - This setting works like C(receive), except that the system marks the node or pool member disabled when its response matches the C(receive_disable) string but not C(receive). To use this setting, you must specify both C(receive_disable) and C(receive). ip: description: - IP address part of the IP/port definition. If this parameter is not provided when creating a new monitor, then the default value will be '*'. port: description: - Port address part of the IP/port definition. If this parameter is not provided when creating a new monitor, then the default value will be '*'. Note that if specifying an IP address, a value between 1 and 65535 must be specified. interval: description: - The interval specifying how frequently the monitor instance of this template will run. If this parameter is not provided when creating a new monitor, then the default value will be 5. This value B(must) be less than the C(timeout) value. timeout: description: - The number of seconds in which the node or service must respond to the monitor request. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. You can change this number to any number you want, however, it should be 3 times the interval number of seconds plus 1 second. If this parameter is not provided when creating a new monitor, then the default value will be 16. time_until_up: description: - Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. If this parameter is not provided when creating a new monitor, then the default value will be 0. target_username: description: - Specifies the user name, if the monitored target requires authentication. target_password: description: - Specifies the password, if the monitored target requires authentication. partition: description: - Device partition to manage resources on. default: Common version_added: 2.5 state: description: - When C(present), ensures that the monitor exists. - When C(absent), ensures the monitor is removed. default: present choices: - present - absent version_added: 2.5 notes: - Requires BIG-IP software version >= 12 extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create HTTP Monitor bigip_monitor_http: state: present ip: 10.10.10.10 server: lb.mydomain.com user: admin password: secret name: my_http_monitor delegate_to: localhost - name: Remove HTTP Monitor bigip_monitor_http: state: absent server: lb.mydomain.com user: admin password: secret name: my_http_monitor delegate_to: localhost - name: Include a username and password in the HTTP monitor bigip_monitor_http: state: absent server: lb.mydomain.com user: admin password: secret name: my_http_monitor target_username: monitor_user target_password: monitor_pass delegate_to: localhost ''' RETURN = r''' parent: description: New parent template of the monitor. returned: changed type: string sample: http ip: description: The new IP of IP/port definition. returned: changed type: string sample: 10.12.13.14 interval: description: The new interval in which to run the monitor check. returned: changed type: int sample: 2 timeout: description: The new timeout in which the remote system must respond to the monitor. returned: changed type: int sample: 10 time_until_up: description: The new time in which to mark a system as up after first successful response. returned: changed type: int sample: 2 ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import HAS_F5SDK from library.module_utils.network.f5.bigip import F5Client from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import is_valid_ip try: from library.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False except ImportError: from ansible.module_utils.network.f5.bigip import HAS_F5SDK from ansible.module_utils.network.f5.bigip import F5Client from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import is_valid_ip try: from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'timeUntilUp': 'time_until_up', 'defaultsFrom': 'parent', 'recv': 'receive', 'recvDisable': 'receive_disable' } api_attributes = [ 'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'recv', 'send', 'destination', 'username', 'password', 'recvDisable' ] returnables = [ 'parent', 'send', 'receive', 'ip', 'port', 'interval', 'timeout', 'time_until_up', 'receive_disable' ] updatables = [ 'destination', 'send', 'receive', 'interval', 'timeout', 'time_until_up', 'target_username', 'target_password', 'receive_disable' ] def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result @property def destination(self): if self.ip is None and self.port is None: return None destination = '{0}:{1}'.format(self.ip, self.port) return destination @destination.setter def destination(self, value): ip, port = value.split(':') self._values['ip'] = ip self._values['port'] = port @property def interval(self): if self._values['interval'] is None: return None # Per BZ617284, the BIG-IP UI does not raise a warning about this. # So I do if 1 > int(self._values['interval']) > 86400: raise F5ModuleError( "Interval value must be between 1 and 86400" ) return int(self._values['interval']) @property def timeout(self): if self._values['timeout'] is None: return None return int(self._values['timeout']) @property def ip(self): if self._values['ip'] is None: return None if self._values['ip'] in ['*', '0.0.0.0']: return '*' elif is_valid_ip(self._values['ip']): return self._values['ip'] else: raise F5ModuleError( "The provided 'ip' parameter is not an IP address." ) @property def port(self): if self._values['port'] is None: return None elif self._values['port'] == '*': return '*' return int(self._values['port']) @property def time_until_up(self): if self._values['time_until_up'] is None: return None return int(self._values['time_until_up']) @property def parent(self): if self._values['parent'] is None: return None result = fq_name(self.partition, self._values['parent']) return result @property def type(self): return 'http' @property def username(self): return self._values['target_username'] @property def password(self): return self._values['target_password'] class Changes(Parameters): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result @property def parent(self): if self.want.parent != self.have.parent: raise F5ModuleError( "The parent monitor cannot be changed" ) @property def destination(self): if self.want.ip is None and self.want.port is None: return None if self.want.port is None: self.want.update({'port': self.have.port}) if self.want.ip is None: self.want.update({'ip': self.have.ip}) if self.want.port in [None, '*'] and self.want.ip != '*': raise F5ModuleError( "Specifying an IP address requires that a port number be specified" ) if self.want.destination != self.have.destination: return self.want.destination @property def interval(self): if self.want.timeout is not None and self.want.interval is not None: if self.want.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.timeout is not None: if self.have.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.interval is not None: if self.want.interval >= self.have.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) if self.want.interval != self.have.interval: return self.want.interval def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.have = None self.want = Parameters(params=self.module.params) self.changes = Changes() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Changes(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = Changes(params=changed) return True return False def _announce_deprecations(self): warnings = [] if self.want: warnings += self.want._values.get('__warnings', []) if self.have: warnings += self.have._values.get('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations() return result def present(self): if self.exists(): return self.update() else: return self.create() def create(self): self._set_changed_options() if self.want.timeout is None: self.want.update({'timeout': 16}) if self.want.interval is None: self.want.update({'interval': 5}) if self.want.time_until_up is None: self.want.update({'time_until_up': 0}) if self.want.ip is None: self.want.update({'ip': '*'}) if self.want.port is None: self.want.update({'port': '*'}) if self.want.send is None: self.want.update({'send': 'GET /\r\n'}) if self.module.check_mode: return True self.create_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the monitor.") return True def read_current_from_device(self): resource = self.client.api.tm.ltm.monitor.https.http.load( name=self.want.name, partition=self.want.partition ) result = resource.attrs return Parameters(params=result) def exists(self): result = self.client.api.tm.ltm.monitor.https.http.exists( name=self.want.name, partition=self.want.partition ) return result def update_on_device(self): params = self.want.api_params() result = self.client.api.tm.ltm.monitor.https.http.load( name=self.want.name, partition=self.want.partition ) result.modify(**params) def create_on_device(self): params = self.want.api_params() self.client.api.tm.ltm.monitor.https.http.create( name=self.want.name, partition=self.want.partition, **params ) def remove_from_device(self): result = self.client.api.tm.ltm.monitor.https.http.load( name=self.want.name, partition=self.want.partition ) if result: result.delete() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), parent=dict(default='/Common/http'), send=dict(), receive=dict(), receive_disable=dict(required=False), ip=dict(), port=dict(type='int'), interval=dict(type='int'), timeout=dict(type='int'), time_until_up=dict(type='int'), target_username=dict(), target_password=dict(no_log=True), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) if not HAS_F5SDK: module.fail_json(msg="The python f5-sdk module is required") try: client = F5Client(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) module.exit_json(**results) except F5ModuleError as ex: cleanup_tokens(client) module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
f5-ansible/library/modules/bigip_monitor_http.py
18,453
!/usr/bin/python -*- coding: utf-8 -*- Copyright (c) 2017 F5 Networks Inc. GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) Per BZ617284, the BIG-IP UI does not raise a warning about this. So I do
237
en
0.679923
import argparse import os from solver import Solver from data_loader import * #from data_loader import * from torch.backends import cudnn from torch.utils import data from torchvision import transforms as T def main(config): cudnn.benchmark = True if not os.path.exists(config.model_path): os.makedirs(config.model_path ) if not os.path.exists(config.result_path): os.makedirs(config.result_path) config.result_path = os.path.join(config.result_path) if not os.path.exists(config.result_path): os.makedirs(config.result_path) if config.train_dataset == 'african': config.img_size = (640, 640) elif config.train_dataset == 'asian': config.img_size = (640, 480) elif config.train_dataset == 'mobile': config.img_size = (384, 384) print(config) train_loader=data.DataLoader(Train_dataset(root=config.train_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=traindata_augmentation,mode='train'), batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) valid_loader=data.DataLoader(Train_valid_dataset(root=config.valid_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='valid'), batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) if config.test_mode == 1: test1_loader=data.DataLoader(Test1_dataset(root=config.test_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='test'), batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) elif config.test_mode == 2: test2_loader=data.DataLoader(Test2_dataset(root=config.test_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='test'), batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) if config.test_mode == 1: solver = Solver(config, train_loader, valid_loader, test1_loader) elif config.test_mode == 2: solver = Solver(config, train_loader, valid_loader, test2_loader) if config.mode == 'train': solver.train() elif config.mode == 'test' and config.test_mode == 1: solver.test_1() elif config.mode == 'test' and config.test_mode == 2: solver.test_2() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--num_epochs', type=int, default=100) parser.add_argument('--batch_size', type=int, default=1) parser.add_argument('--num_workers', type=int, default=2) parser.add_argument('--lr', type=float, default=0.0002) parser.add_argument('--beta1', type=float, default=0.9) # momentum1 in Adam parser.add_argument('--beta2', type=float, default=0.999) # momentum2 in Adam parser.add_argument('--mode', type=str, default='train') parser.add_argument('--model_path', type=str, default='./models/african_best.pth') parser.add_argument('--img_size', type=tuple, default=(640, 640)) parser.add_argument('--train_path', type=str, default='./dataset/train/') parser.add_argument('--valid_path', type=str, default='./dataset/valid/') parser.add_argument('--test_path', type=str, default='./dataset/test/') parser.add_argument('--test_mode', type=int, default=1, help='1 or 2')#若test_mode==1,则test时会计算评估指标。若==2,则不计算评估指标。 parser.add_argument('--result_path', type=str, default='./result/') parser.add_argument('--train_dataset', type=str, default='african', help='choose train datasets, african, asian of mobile') config = parser.parse_args() main(config)
main.py
4,135
from data_loader import * momentum1 in Adam momentum2 in Adam 若test_mode==1,则test时会计算评估指标。若==2,则不计算评估指标。
107
zh
0.357554
import os import shutil from thlib.side.Qt import QtWidgets as QtGui from thlib.side.Qt import QtGui as Qt4Gui from thlib.side.Qt import QtCore from thlib.environment import env_inst, env_tactic, cfg_controls, env_read_config, env_write_config, dl import thlib.global_functions as gf import thlib.tactic_classes as tc from thlib.ui.misc.ui_watch_folders import Ui_ProjectWatchFolder class Ui_projectWatchFoldersWidget(QtGui.QDialog, Ui_ProjectWatchFolder): def __init__(self, project, parent=None): super(self.__class__, self).__init__(parent=parent) self.project = project self.watch_folders_dict = self.get_watch_folders_dict() self.watched_items = set() env_inst.watch_folders[self.project.get_code()] = self self.setupUi(self) self.create_ui() def create_ui(self): self.watchFoldersTreeWidget.setStyleSheet('QTreeView::item {padding: 2px;}') self.setSizeGripEnabled(True) self.setWindowTitle('Watched Assets for Project: {0}'.format(self.project.info.get('title'))) self.create_fs_watcher() self.create_watch_folders_tree_context_menu() self.controls_actions() self.readSettings() self.watchEnabledCheckBox.setEnabled(False) def create_fs_watcher(self): self.fs_watcher = gf.FSObserver() self.fs_watcher.set_created_signal(self.handle_watch_created_event) def create_watch_folders_tree_context_menu(self): self.watchFoldersTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.watchFoldersTreeWidget.customContextMenuRequested.connect(self.open_menu) def watch_items_menu(self): # TODO Make this work enable_watch = QtGui.QAction('Enable Watch', self.watchFoldersTreeWidget) enable_watch.setIcon(gf.get_icon('eye')) # enable_watch.triggered.connect(self.open_file_from_tree) disable_watch = QtGui.QAction('Disable Watch', self.watchFoldersTreeWidget) disable_watch.setIcon(gf.get_icon('eye-slash')) # disable_watch.triggered.connect(self.open_file_from_tree) edit_watch = QtGui.QAction('Edit Watch', self.watchFoldersTreeWidget) edit_watch.setIcon(gf.get_icon('edit')) # edit_watch.triggered.connect(self.open_file_from_tree) delete_watch = QtGui.QAction('Delete Watch', self.watchFoldersTreeWidget) delete_watch.setIcon(gf.get_icon('remove')) # edit_watch.triggered.connect(self.open_file_from_tree) menu = QtGui.QMenu() menu.addAction(enable_watch) menu.addAction(disable_watch) menu.addAction(edit_watch) menu.addAction(delete_watch) return menu def open_menu(self): item = self.watchFoldersTreeWidget.currentItem() if item: if item.data(0, QtCore.Qt.UserRole): menu = self.watch_items_menu() if menu: menu.exec_(Qt4Gui.QCursor.pos()) def add_item_to_fs_watch(self, skey, path=None, recursive=True): watch_dict = self.get_watch_dict_by_skey(skey) if not path: path = watch_dict['path'] paths = [] for repo in watch_dict['rep']: abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + path paths.append(gf.form_path(abs_path)) self.fs_watcher.append_watch(watch_name=skey, paths=paths, repos=watch_dict['rep'], pipeline=watch_dict['asset_pipeline'], recursive=recursive) def remove_item_from_fs_watch(self, skey): self.fs_watcher.remove_watch(watch_name=skey) def handle_watch_created_event(self, event, watch): dl.log(u'File dropped to watch folder {}'.format(event.src_path), group_id='watch_folder') self.show() self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive) QtGui.QDialog.activateWindow(self) self.show() self.hide() search_key = watch.watch_name pipeline = watch.pipeline commit_path = gf.extract_dirname(event.src_path) if watch.path == commit_path: context = 'publish' else: context = gf.form_path(commit_path, 'linux').split('/')[-1] description = 'From watch folder' skey_dict = tc.split_search_key(search_key) checkin_widget = env_inst.get_check_tree( project_code=skey_dict['project_code'], tab_code='checkin_out', wdg_code=skey_dict['pipeline_code'], ) checkin_widget.do_creating_ui() match_template = gf.MatchTemplate(['$FILENAME.$EXT']) files_objects_dict = match_template.get_files_objects([event.src_path]) stypes = self.project.get_stypes() current_stype = stypes.get(skey_dict['pipeline_code']) pipelines = current_stype.get_pipeline() checkin_mode = None if pipelines: # here we do pipelines routine current_pipeline = pipelines.get(pipeline) if not current_pipeline: # looks like we don't have pipeline with Search Type name, so we take first of all # Also this is totally wrong, cause we should know exactly the pipeline and its processes, so need to write proper pipeline_code when creating watch folder current_pipeline = list(pipelines.values())[0] current_process = current_pipeline.get_pipeline_process(context) if current_process: checkin_mode = current_process.get('checkin_mode') else: context = 'publish' checkin_widget.checkin_file_objects( search_key=search_key, context=context, description=description, files_objects=files_objects_dict.get('file'), checkin_type=checkin_mode, keep_file_name=False ) else: # here we go with publish, without pipeline checkin_widget.checkin_file_objects( search_key=search_key, context='publish', description=description, files_objects=files_objects_dict.get('file'), checkin_type=checkin_mode, keep_file_name=False ) def controls_actions(self): pass def fill_watch_folders_tree_widget(self): self.watchFoldersTreeWidget.clear() if self.watch_folders_dict: for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')): root_item = QtGui.QTreeWidgetItem() root_item.setData(0, QtCore.Qt.UserRole, asset_skey) root_item.setText(1, self.watch_folders_dict['assets_stypes'][i]) root_item.setText(2, self.watch_folders_dict['assets_names'][i]) repos_names = [] for repo in self.watch_folders_dict['repos'][i]: repos_names.append(env_tactic.get_base_dir(repo)['value'][1]) root_item.setText(3, ', '.join(repos_names)) # setting actual watch status if self.watch_folders_dict['statuses'][i]: if self.check_for_item_in_watch(asset_skey): root_item.setText(0, 'Watching') self.start_watch_by_skey(asset_skey) else: root_item.setText(0, 'Waiting') else: root_item.setText(0, 'Stopped') self.stop_watch_by_skey(asset_skey) self.watchFoldersTreeWidget.addTopLevelItem(root_item) self.watchFoldersTreeWidget.resizeColumnToContents(0) self.watchFoldersTreeWidget.resizeColumnToContents(1) self.watchFoldersTreeWidget.resizeColumnToContents(2) self.watchFoldersTreeWidget.resizeColumnToContents(3) if self.watched_items: self.start_watching() else: self.stop_watching() def start_watching(self): if not self.fs_watcher.is_started(): self.fs_watcher.start() def stop_watching(self): if self.fs_watcher.is_started(): self.fs_watcher.stop() def stop_watch_by_skey(self, skey): for item in self.watched_items: if item.get_search_key() == skey: self.remove_item_from_fs_watch(skey) item.watchFolderToolButton.setChecked(False) def start_watch_by_skey(self, skey): for item in self.watched_items: if item.get_search_key() == skey: self.add_item_to_fs_watch(skey, item.get_watch_folder_path(), True) item.watchFolderToolButton.setChecked(True) def check_for_item_in_watch(self, skey): for item in self.watched_items: if item.get_search_key() == skey: if item.is_have_watch_folder(): return True def add_item_to_watch(self, sobject_item): # checking if watch folder exists watch_dict = self.get_watch_dict_by_skey(sobject_item.get_search_key()) all_folders_exists = True base_dirs = env_tactic.get_all_base_dirs() for key, val in base_dirs: if val['value'][4] and val['value'][3] in watch_dict['rep']: abs_path = u'{0}/{1}'.format(val['value'][0], watch_dict['path']) if not os.path.exists(gf.form_path(abs_path)): all_folders_exists = False dl.warning('Folders structure for: {0} is not created. ' 'Watch will be ignored.'.format(abs_path), group_id='watch_folders_ui') if all_folders_exists: self.watched_items.add(sobject_item) self.fill_watch_folders_tree_widget() def remove_item_from_watch(self, sobject_item): self.watched_items.discard(sobject_item) def add_asset_to_watch(self, sobject_item): # in case of some bugs double checking if not self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()): self.create_repo_editor_ui(sobject_item) else: sobject_item.check_watch_folder() def edit_aseet_watch(self, sobject_item): watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()) if watch_dict: self.create_repo_editor_ui(sobject_item, mode='edit') else: sobject_item.check_watch_folder(True) def delete_aseet_from_watch(self, sobject_item): watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()) if watch_dict: self.delete_watch_from_watch_folders_dict(sobject_item) else: sobject_item.check_watch_folder(True) @gf.catch_error def create_watch_folders(self, repos_list, sobject_item): # creating base folders with paths for repo in repos_list: abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path() # creating folder for publish if not os.path.exists(gf.form_path(abs_path)): os.makedirs(gf.form_path(abs_path)) # creating folders by processes if sobject_item.get_process_list(include_hierarchy=True): for process in sobject_item.get_process_list(include_hierarchy=True): process_abs_path = abs_path + '/' + process if not os.path.exists(gf.form_path(process_abs_path)): os.makedirs(gf.form_path(process_abs_path)) @gf.catch_error def delete_watch_folders_and_files(self, repos_list, sobject_item): def onerror(func, path, exc_info): """ Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)`` """ import stat if not os.access(path, os.W_OK): # Is the error an access error ? os.chmod(path, stat.S_IWUSR) func(path) # else: # raise for repo in repos_list: abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path() if os.path.exists(gf.form_path(abs_path)): shutil.rmtree(gf.form_path(abs_path), ignore_errors=True, onerror=onerror) def add_watch_to_watch_folders_dict(self, repos_list, sobject_item): self.watch_folders_dict['assets_names'].append(sobject_item.get_title()) self.watch_folders_dict['assets_codes'].append(sobject_item.sobject.info.get('code')) self.watch_folders_dict['assets_stypes'].append(sobject_item.stype.get_pretty_name()) self.watch_folders_dict['assets_skeys'].append(sobject_item.sobject.get_search_key()) self.watch_folders_dict['assets_pipelines'].append(sobject_item.sobject.get_pipeline_code()) self.watch_folders_dict['paths'].append(sobject_item.get_watch_folder_path()) self.watch_folders_dict['repos'].append(repos_list) self.watch_folders_dict['statuses'].append(True) self.create_watch_folders(repos_list, sobject_item) sobject_item.check_watch_folder() self.writeSettings() def save_watch_to_watch_folders_dict(self, repos_list, sobject_item): watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()) if watch_dict: idx = watch_dict['idx'] self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title() self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code') self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name() self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key() self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code() self.watch_folders_dict['paths'][idx] = sobject_item.get_watch_folder_path() self.watch_folders_dict['repos'][idx] = repos_list self.create_watch_folders(repos_list, sobject_item) sobject_item.check_watch_folder() self.writeSettings() def edit_watch_to_watch_folders_dict(self, sobject_item, asset_name=None, asset_code=None,asset_stype=None, asset_skey=None, asset_pipeline=None, path=None, repo=None, status=False): watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()) if watch_dict: idx = watch_dict['idx'] if asset_name: self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title() if asset_code: self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code') if asset_stype: self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name() if asset_skey: self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key() if asset_pipeline: self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code() if path: self.watch_folders_dict['paths'][idx] = path if repo: self.watch_folders_dict['repos'][idx] = repo self.watch_folders_dict['statuses'][idx] = status sobject_item.check_watch_folder() self.fill_watch_folders_tree_widget() self.writeSettings() def delete_watch_from_watch_folders_dict(self, sobject_item): buttons = (('Remove', QtGui.QMessageBox.YesRole), ('Keep', QtGui.QMessageBox.ActionRole), ('Cancel', QtGui.QMessageBox.NoRole)) reply = gf.show_message_predefined( 'Remove Watch Folder dirs from repos?', 'Watch Folder Directories and Files can also be removed from Your Repositories' '<br>Remove or Keep this Dirs and Files?</br>', buttons=buttons, message_type='question' ) delete_files = False delete_watch_folder = False if reply == QtGui.QMessageBox.YesRole: delete_files = True delete_watch_folder = True elif reply == QtGui.QMessageBox.ActionRole: delete_files = False delete_watch_folder = True if delete_watch_folder: self.stop_watch_by_skey(sobject_item.sobject.get_search_key()) idx = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())['idx'] self.watch_folders_dict['assets_names'].pop(idx) self.watch_folders_dict['assets_codes'].pop(idx) self.watch_folders_dict['assets_stypes'].pop(idx) self.watch_folders_dict['assets_skeys'].pop(idx) self.watch_folders_dict['assets_pipelines'].pop(idx) self.watch_folders_dict['paths'].pop(idx) repos = self.watch_folders_dict['repos'].pop(idx) self.watch_folders_dict['statuses'].pop(idx) sobject_item.check_watch_folder(True) self.writeSettings() if delete_files: self.delete_watch_folders_and_files(repos, sobject_item) def create_repo_editor_ui(self, sobject_item, mode='create'): add_watch_ui = Ui_repositoryEditorWidget(sobject_item=sobject_item, mode=mode, parent=env_inst.ui_main) add_watch_ui.saved_signal.connect(self.add_watch_to_watch_folders_dict) add_watch_ui.edited_signal.connect(self.save_watch_to_watch_folders_dict) add_watch_ui.exec_() def set_watch_folders_from_dict(self, watch_folders_dict=None): if watch_folders_dict: print('FILLING WATCH FOLDER') def get_watch_dict_by_skey(self, skey): if self.watch_folders_dict: for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')): if skey == asset_skey: return { 'asset_code': self.watch_folders_dict['assets_codes'][i], 'asset_name': self.watch_folders_dict['assets_names'][i], 'asset_stype': self.watch_folders_dict['assets_stypes'][i], 'asset_skey': self.watch_folders_dict['assets_skeys'][i], 'asset_pipeline': self.watch_folders_dict['assets_pipelines'][i], 'path': self.watch_folders_dict['paths'][i], 'rep': self.watch_folders_dict['repos'][i], 'status': self.watch_folders_dict['statuses'][i], 'idx': i, } @staticmethod def get_watch_folders_dict(): return { 'assets_codes': [], 'assets_names': [], 'assets_stypes': [], 'assets_skeys': [], 'assets_pipelines': [], 'paths': [], 'repos': [], 'statuses': [], } def set_settings_from_dict(self, settings_dict=None): ref_settings_dict = { 'watch_folders_dict': self.watch_folders_dict, } settings = gf.check_config(ref_settings_dict, settings_dict) self.watch_folders_dict = settings['watch_folders_dict'] def get_settings_dict(self): settings_dict = { 'watch_folders_dict': self.watch_folders_dict, } return settings_dict def readSettings(self): self.set_settings_from_dict(env_read_config( filename='ui_watch_folder', unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()), long_abs_path=True)) def writeSettings(self): env_write_config( self.get_settings_dict(), filename='ui_watch_folder', unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()), long_abs_path=True) def showEvent(self, event): event.accept() self.fill_watch_folders_tree_widget() def closeEvent(self, event): self.writeSettings() event.accept() class Ui_repositoryEditorWidget(QtGui.QDialog): saved_signal = QtCore.Signal(object, object) edited_signal = QtCore.Signal(object, object) def __init__(self, sobject_item, mode='create', parent=None): super(self.__class__, self).__init__(parent=parent) self.sobject_item = sobject_item self.mode = mode self.saved = False self.exclude_repo_list = self.get_exclude_repo_list() self.create_ui() def create_ui(self): if self.mode == 'create': self.setWindowTitle('Choose Repositories to Watch') else: self.setWindowTitle('Editing Watch Folders') self.resize(600, 420) self.setSizeGripEnabled(True) self.creat_layout() self.create_repo_path_line_edit() self.create_repo_combo_box() self.create_repos_tree_widget() self.create_buttons() if self.mode == 'edit': self.fill_repo_combo_box(self.exclude_repo_list) self.fill_repo_tree_widget(self.exclude_repo_list) else: self.fill_repo_combo_box() self.fill_repo_tree_widget() self.check_save_ability() self.controls_actions() def controls_actions(self): self.add_new_button.clicked.connect(self.add_new_repo) self.remove_button.clicked.connect(self.delete_selected_repo) self.save_button.clicked.connect(self.save_and_close) self.close_button.clicked.connect(self.close) def creat_layout(self): self.main_layout = QtGui.QGridLayout() self.main_layout.setContentsMargins(9, 9, 9, 9) self.main_layout.setColumnStretch(0, 1) self.setLayout(self.main_layout) def create_repos_tree_widget(self): self.repos_tree_widget = QtGui.QTreeWidget() self.repos_tree_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) self.repos_tree_widget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.repos_tree_widget.setRootIsDecorated(False) self.repos_tree_widget.setHeaderHidden(True) self.repos_tree_widget.setObjectName('repos_tree_widget') self.repos_tree_widget.setStyleSheet(gf.get_qtreeview_style()) self.main_layout.addWidget(self.repos_tree_widget, 2, 0, 2, 1) def create_repo_path_line_edit(self): self.repo_path_line_edit_layout = QtGui.QHBoxLayout() self.repo_path_line_edit_layout.addWidget(QtGui.QLabel('Relative Watch Path:')) self.repo_path_line_edit = QtGui.QLineEdit() self.repo_path_line_edit_layout.addWidget(self.repo_path_line_edit) if self.mode == 'create': paths = tc.get_dirs_with_naming(self.sobject_item.get_search_key(), process_list=['watch_folder']) self.repo_path_line_edit.setText(paths['versionless'][0]) elif self.mode == 'edit': self.repo_path_line_edit.setText(self.sobject_item.get_watch_folder_path()) self.main_layout.addLayout(self.repo_path_line_edit_layout, 0, 0, 1, 2) def create_repo_combo_box(self): self.repo_combo_box = QtGui.QComboBox() self.main_layout.addWidget(self.repo_combo_box, 1, 0, 1, 1) def check_save_ability(self): if self.repos_tree_widget.topLevelItemCount() < 1: self.save_button.setEnabled(False) else: self.save_button.setEnabled(True) def get_exclude_repo_list(self): watch_folder_ui = env_inst.watch_folders.get(self.sobject_item.project.get_code()) watch_dict = watch_folder_ui.get_watch_dict_by_skey(self.sobject_item.get_search_key()) if watch_dict: return watch_dict['rep'] else: return [] def fill_repo_combo_box(self, exlude_list=None): self.repo_combo_box.clear() if not exlude_list: exlude_list = [] base_dirs = env_tactic.get_all_base_dirs() # Default repo states for key, val in base_dirs: if val['value'][4] and val['value'][3] not in exlude_list: self.repo_combo_box.addItem(val['value'][1]) self.repo_combo_box.setItemData(self.repo_combo_box.count() - 1, val) self.repo_combo_box.addItem('All Repos') current_repo = gf.get_value_from_config(cfg_controls.get_checkin(), 'repositoryComboBox') if current_repo: self.repo_combo_box.setCurrentIndex(current_repo) def fill_repo_tree_widget(self, exlude_list=None): self.repos_tree_widget.clear() if not exlude_list: exlude_list = [] base_dirs = env_tactic.get_all_base_dirs() # Default repo states for key, val in base_dirs: if val['value'][4] and val['value'][3] in exlude_list: root_item = QtGui.QTreeWidgetItem() root_item.setText(0, val['value'][1]) root_item.setData(0, QtCore.Qt.UserRole, val) self.repos_tree_widget.addTopLevelItem(root_item) def create_buttons(self): self.add_new_button = QtGui.QPushButton('Add') self.add_new_button.setMinimumWidth(90) self.remove_button = QtGui.QPushButton('Remove') self.remove_button.setMinimumWidth(90) self.save_button = QtGui.QPushButton('Save and Close') self.save_button.setMinimumWidth(90) self.close_button = QtGui.QPushButton('Cancel') self.close_button.setMinimumWidth(90) self.main_layout.addWidget(self.add_new_button, 1, 1, 1, 1) self.main_layout.addWidget(self.remove_button, 2, 1, 1, 1) self.main_layout.addWidget(self.save_button, 4, 0, 1, 1) self.main_layout.addWidget(self.close_button, 4, 1, 1, 1) spacer = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.main_layout.addItem(spacer, 3, 1, 1, 1) def add_new_repo(self): current_repo_index = self.repo_combo_box.currentIndex() current_repo = self.repo_combo_box.itemData(current_repo_index) if current_repo: self.repo_combo_box.removeItem(current_repo_index) root_item = QtGui.QTreeWidgetItem() root_item.setText(0, current_repo['value'][1]) root_item.setData(0, QtCore.Qt.UserRole, current_repo) self.exclude_repo_list.append(current_repo['value'][3]) self.repos_tree_widget.addTopLevelItem(root_item) else: for i in range(self.repo_combo_box.count()-1): current_repo = self.repo_combo_box.itemData(i) root_item = QtGui.QTreeWidgetItem() root_item.setText(0, current_repo['value'][1]) root_item.setData(0, QtCore.Qt.UserRole, current_repo) self.exclude_repo_list.append(current_repo['value'][3]) self.repos_tree_widget.addTopLevelItem(root_item) self.fill_repo_combo_box(self.exclude_repo_list) self.check_save_ability() def delete_selected_repo(self): current_repo_item = self.repos_tree_widget.currentItem() if current_repo_item: current_repo = current_repo_item.data(0, QtCore.Qt.UserRole) self.exclude_repo_list.remove(current_repo['value'][3]) self.repos_tree_widget.takeTopLevelItem(self.repos_tree_widget.currentIndex().row()) self.fill_repo_combo_box(self.exclude_repo_list) self.check_save_ability() def set_saved(self): self.saved = True def save_and_close(self): self.set_saved() params = (self.get_repos_list(), self.sobject_item) self.sobject_item.set_watch_folder_path(str(self.repo_path_line_edit.text())) if self.mode == 'create': self.saved_signal.emit(*params) if self.mode == 'edit': self.edited_signal.emit(*params) self.close() def get_repos_list(self): repos_list = [] for i in range(self.repos_tree_widget.topLevelItemCount()): top_item = self.repos_tree_widget.topLevelItem(i) repo_dict = top_item.data(0, QtCore.Qt.UserRole) repos_list.append(repo_dict['value'][3]) return repos_list
thlib/ui_classes/ui_watch_folder_classes.py
28,918
Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)`` TODO Make this work enable_watch.triggered.connect(self.open_file_from_tree) disable_watch.triggered.connect(self.open_file_from_tree) edit_watch.triggered.connect(self.open_file_from_tree) edit_watch.triggered.connect(self.open_file_from_tree) here we do pipelines routine looks like we don't have pipeline with Search Type name, so we take first of all Also this is totally wrong, cause we should know exactly the pipeline and its processes, so need to write proper pipeline_code when creating watch folder here we go with publish, without pipeline setting actual watch status checking if watch folder exists in case of some bugs double checking creating base folders with paths creating folder for publish creating folders by processes Is the error an access error ? else: raise Default repo states Default repo states
1,081
en
0.780217
# !/usr/bin/env python # coding: utf-8 ''' Description: Given a binary tree, return all root-to-leaf paths. For example, given the following binary tree: 1 / \ 2 3 \ 5 All root-to-leaf paths are: ["1->2->5", "1->3"] Tags: Tree, Depth-first Search ''' # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param {TreeNode} root # @return {string[]} def binaryTreePaths(self, root): result, path = [], [] self.binaryTreePathsRecu(self, node, path, result) return result def binaryTreePathsRecu(root, path, result): if node is None: return if node.left is node.right is None: ans = '' for n in path: ans += str(n.val) + '->' result.append(ans + str(node.val)) if node.left: path.append(node) self.binaryTreePathsRecu(node.left, path, result) path.pop() if node.right: path.append(node) self.binaryTreePathsRecu(node.right, path, result) path.pop()
python/Tree/257_binary_tree_paths.py
1,254
Description: Given a binary tree, return all root-to-leaf paths. For example, given the following binary tree: 1 / 2 3 5 All root-to-leaf paths are: ["1->2->5", "1->3"] Tags: Tree, Depth-first Search !/usr/bin/env python coding: utf-8 Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None @param {TreeNode} root @return {string[]}
495
en
0.623565
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: anze@reciprocitylabs.com # Maintained By: anze@reciprocitylabs.com """Add finished/verified dates to cycle tasks Revision ID: 13e52f6a9deb Revises: 18bdb0671010 Create Date: 2016-01-04 13:52:43.017848 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. revision = '13e52f6a9deb' down_revision = '18bdb0671010' def upgrade(): op.add_column('cycle_task_group_object_tasks', sa.Column('finished_date', sa.DateTime(), nullable=True)) op.add_column('cycle_task_group_object_tasks', sa.Column('verified_date', sa.DateTime(), nullable=True)) op.execute(""" UPDATE cycle_task_group_object_tasks SET finished_date = updated_at WHERE status = "Finished" """) op.execute(""" UPDATE cycle_task_group_object_tasks SET verified_date = updated_at, finished_date = updated_at WHERE status = "Verified" """) def downgrade(): op.drop_column('cycle_task_group_object_tasks', 'verified_date') op.drop_column('cycle_task_group_object_tasks', 'finished_date')
src/ggrc_workflows/migrations/versions/20160104135243_13e52f6a9deb_add_finished_verified_dates_to_cycle_.py
1,247
Add finished/verified dates to cycle tasks Revision ID: 13e52f6a9deb Revises: 18bdb0671010 Create Date: 2016-01-04 13:52:43.017848 Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Created By: anze@reciprocitylabs.com Maintained By: anze@reciprocitylabs.com revision identifiers, used by Alembic.
403
en
0.678294
# Time: O(n^2) # Space: O(1) class Solution(object): def triangleNumber(self, nums): """ :type nums: List[int] :rtype: int """ result = 0 nums.sort() for i in reversed(xrange(2, len(nums))): left, right = 0, i-1 while left < right: if nums[left]+nums[right] > nums[i]: result += right-left right -= 1 else: left += 1 return result # Time: O(n^2) # Space: O(1) class Solution2(object): def triangleNumber(self, nums): """ :type nums: List[int] :rtype: int """ result = 0 nums.sort() for i in xrange(len(nums)-2): if nums[i] == 0: continue k = i+2 for j in xrange(i+1, len(nums)-1): while k < len(nums) and nums[i] + nums[j] > nums[k]: k += 1 result += k-j-1 return result
Python/valid-triangle-number.py
1,033
:type nums: List[int] :rtype: int :type nums: List[int] :rtype: int Time: O(n^2) Space: O(1) Time: O(n^2) Space: O(1)
121
en
0.280425
# Distributed under the MIT License. # See LICENSE.txt for details. import numpy as np from Evolution.Systems.CurvedScalarWave.Characteristics import ( char_speed_vpsi, char_speed_vzero, char_speed_vplus, char_speed_vminus) def error(face_mesh_velocity, normal_covector, normal_vector, psi, phi, inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi, d_psi, d_phi): return None def dt_psi_constraint_preserving_spherical_radiation( face_mesh_velocity, normal_covector, normal_vector, psi, phi, inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi, d_psi, d_phi): char_speed_psi = char_speed_vpsi(gamma1, lapse, shift, normal_covector) if face_mesh_velocity is not None: char_speed_psi -= np.dot(normal_covector, face_mesh_velocity) return np.dot(normal_vector, d_psi - phi) * min(0., char_speed_psi) def dt_phi_constraint_preserving_spherical_radiation( face_mesh_velocity, normal_covector, normal_vector, psi, phi, inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi, d_psi, d_phi): char_speed_zero = char_speed_vzero(gamma1, lapse, shift, normal_covector) if face_mesh_velocity is not None: char_speed_zero -= np.dot(normal_covector, face_mesh_velocity) return 0.5 * np.einsum("ij,j", d_phi.T - d_phi, normal_vector) * min( 0, char_speed_zero) def dt_pi_constraint_preserving_spherical_radiation( face_mesh_velocity, normal_covector, normal_vector, psi, phi, inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi, d_psi, d_phi): dt_psi_correction = dt_psi_constraint_preserving_spherical_radiation( face_mesh_velocity, normal_covector, normal_vector, psi, phi, inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi, d_psi, d_phi) inv_radius = 1. / np.linalg.norm(inertial_coords) bc_dt_pi = (2. * inv_radius**2 * psi + 4. * inv_radius * dt_psi + 4. * inv_radius * np.dot(normal_vector, phi) + 2. * np.dot(normal_vector, dt_phi) + np.dot(shift, dt_phi) + np.einsum("i,j,ij", normal_vector, normal_vector, d_phi)) bc_dt_pi /= lapse return bc_dt_pi - dt_pi + gamma2 * dt_psi_correction
tests/Unit/Evolution/Systems/CurvedScalarWave/BoundaryConditions/ConstraintPreservingSphericalRadiation.py
2,278
Distributed under the MIT License. See LICENSE.txt for details.
63
en
0.534709
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2017-2020 AVSystem <avsystem@avsystem.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib.request import argparse import collections import logging import sys import os from xml.etree import ElementTree from itertools import groupby from operator import attrgetter class Lwm2mObjectEntry: """ LwM2M Object Registry entry. Available attributes are the same as tag names in the DDF XML structure. """ def __init__(self, tree): self._tree = tree def __getattr__(self, name): node = self._tree.find(name) if node is not None and node.text is not None: return node.text.strip() return self._tree.get(name) def __lt__(self, other): return (self.ObjectID, self.Ver) < (other.ObjectID, other.Ver) def _read_url(url: str) -> bytes: # we need to change the User-Agent - default one causes the server # to respond with 403 Forbidden req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'}) with urllib.request.urlopen(req) as f: return f.read() class Lwm2mObjectRegistry: def __init__(self, repo_url='https://raw.githubusercontent.com/OpenMobileAlliance/lwm2m-registry/test'): self.repo_url = repo_url ddf_url = repo_url + '/DDF.xml' root = ElementTree.fromstring(_read_url(ddf_url)) entries = (Lwm2mObjectEntry(obj) for obj in root.findall('Item')) grouped = ((int(key), list(group)) for key, group in groupby(entries, attrgetter('ObjectID'))) self.objects = collections.OrderedDict(grouped) def _print_object_list(): for oid, objs in Lwm2mObjectRegistry().objects.items(): for obj in objs: print('%d\t%s\t%s' % (oid, obj.Ver, obj.Name)) def get_object_definition(urn_or_oid, version): urn = urn_or_oid.strip() if urn.startswith('urn:oma:lwm2m:'): oid = int(urn.split(':')[-1]) else: oid = int(urn) try: registry = Lwm2mObjectRegistry() objects = registry.objects[oid] available_versions_message = 'Available versions for object with ID %d: %s' % ( oid, ', '.join(str(obj.Ver) for obj in objects)) if version is None: if (len(objects) > 1): logging.info('%s; defaulting to maximum available version: %s' % ( available_versions_message, max(objects).Ver)) object_ddf_url = max(objects).DDF else: object_ddf_url = next(obj for obj in objects if obj.Ver == version).DDF if not object_ddf_url: raise ValueError("Object with ID = %d doesn't have attached XML definition" % oid) if not object_ddf_url.startswith('http'): object_ddf_url = registry.repo_url + '/' + object_ddf_url return _read_url(object_ddf_url).decode('utf-8-sig') except KeyError: raise ValueError('Object with ID = %d not found' % oid) except StopIteration: raise ValueError(available_versions_message) def _print_object_definition(urn_or_oid, version): print(get_object_definition(urn_or_oid, version)) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) parser = argparse.ArgumentParser(description="Accesses LwM2M Object registry") parser.add_argument("-l", "--list", action='store_true', help="List all registered LwM2M Objects") parser.add_argument("-g", "--get-xml", type=str, metavar='urn_or_oid', help="Get Object definition XML by URN or ID") parser.add_argument("-v", "--object-version", metavar='ver', type=str, help= "Explicitly choose version of an object if there exists more than one with the same ObjectID. Applicable only " "with --get-xml argument. Without --object-version specified, most up to date version is chosen.") args = parser.parse_args() if args.list and args.get_xml is not None: print('conflicting options: --list, --get-xml', file=sys.stderr) sys.exit(1) if args.object_version is not None and args.get_xml is None: print('--object-version option is applicable only with --get-xml', file=sys.stderr) sys.exit(1) if args.list: _print_object_list() elif args.get_xml is not None: _print_object_definition(args.get_xml, args.object_version) else: parser.print_usage() sys.exit(1)
tools/lwm2m_object_registry.py
4,934
LwM2M Object Registry entry. Available attributes are the same as tag names in the DDF XML structure. !/usr/bin/env python3 -*- coding: utf-8 -*- Copyright 2017-2020 AVSystem <avsystem@avsystem.com> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. we need to change the User-Agent - default one causes the server to respond with 403 Forbidden
817
en
0.797518
#!/usr/bin/python3 import os, argparse, difflib lookup = { "flare-form-field": "viur-form-bone", "flare-form-submit": "viur-form-submit", "flare-form": "viur-form", "boneField": "ViurFormBone", "sendForm": "ViurFormSubmit", "viurForm": "ViurForm", "boneSelector": "BoneSelector", "moduleWidgetSelector": "ModuleWidgetSelector", "displayDelegateSelector": "DisplayDelegateSelector", "from flare.forms.formtags import": "from flare.viur import", "from flare.forms": "from flare.viur", } if __name__ == "__main__": # Get arguments ap = argparse.ArgumentParser( description="Flare source code porting tool" ) ap.add_argument( "project_root", type=str, help="Flare project root" ) ap.add_argument( "-d", "--dryrun", action="store_true", help="Dry-run for testing, don't modify files" ) ap.add_argument( "-x", "--daredevil", action="store_true", help="Don't make backups of files, just replace and deal with it" ) args = ap.parse_args() # Iterate all files in current folder for root, dirs, files in os.walk(args.project_root): # Ignore ViUR library folders if any(ignore in root for ignore in ["flare"]): continue for filename in files: # Ignore anything without a .py-extension ext = os.path.splitext(filename)[1].lower()[1:] if ext not in ["py"]: continue filename = os.path.join(root, filename) with open(filename, "r") as f: original_content = content = f.read() count = 0 for k, v in lookup.items(): if k in content: content = content.replace(k, v) count += 1 if count: if not args.dryrun: if not args.daredevil: os.rename(filename, filename + ".bak") with open(filename, "w") as f: f.write(content) print("Modified %r" % filename) else: print( "\n".join( difflib.unified_diff( original_content.splitlines(), content.splitlines(), filename, filename ) ) )
tools/flare-update.py
2,576
!/usr/bin/python3 Get arguments Iterate all files in current folder Ignore ViUR library folders Ignore anything without a .py-extension
135
en
0.494719
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################### # Author: Mu yanru # Date : 2019.3 # Email : muyanru345@163.com ################################################################### """MDockWidget""" from dayu_widgets.qt import QDockWidget class MDockWidget(QDockWidget): """ Just apply the qss. No more extend. """ def __init__(self, title='', parent=None, flags=0): super(MDockWidget, self).__init__(title, parent=parent, flags=flags)
dayu_widgets/dock_widget.py
529
Just apply the qss. No more extend. MDockWidget !/usr/bin/env python -*- coding: utf-8 -*- Author: Mu yanru Date : 2019.3 Email : muyanru345@163.com
150
en
0.374809
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.ftu.app import Ftu from gaiatest.apps.homescreen.app import Homescreen class TestFtu(GaiaTestCase): def setUp(self): GaiaTestCase.setUp(self) self.ftu = Ftu(self.marionette) self.ftu.launch() def test_ftu_with_tour(self): """ https://moztrap.mozilla.org/manage/case/6119/ """ # Go through the FTU setup as quickly as possible to get to the Tour section self.ftu.run_ftu_setup_with_default_values() # Take the tour self.ftu.tap_take_tour() # Walk through the tour self.assertEqual(self.ftu.step1_header_text, "Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.") self.ftu.tap_tour_next() self.assertEqual(self.ftu.step2_header_text, "Tap to expand and collapse app groups. Drag an app into a new space to create a group.") self.ftu.tap_tour_next() self.assertEqual(self.ftu.step3_header_text, "Swipe down to access recent notifications, usage information and settings.") self.ftu.tap_tour_next() self.assertEqual(self.ftu.step4_header_text, "Drag from the left edge of your screen to return to recently used apps.") self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, "Tap on the search box anytime to start a search or go to a website.") # Try going back a step self.ftu.tap_back() self.assertEqual(self.ftu.step4_header_text, "Drag from the left edge of your screen to return to recently used apps.") self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, "Tap on the search box anytime to start a search or go to a website.") self.ftu.tap_tour_next() self.ftu.wait_for_finish_tutorial_section() self.ftu.tap_lets_go_button() # Switch back to top level now that FTU app is gone self.wait_for_condition(lambda m: self.apps.displayed_app.name == Homescreen.name)
tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py
2,257
https://moztrap.mozilla.org/manage/case/6119/ This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. Go through the FTU setup as quickly as possible to get to the Tour section Take the tour Walk through the tour Try going back a step Switch back to top level now that FTU app is gone
423
en
0.922057
import os import pytest from cassis import * FIXTURE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_files") # Small xmi @pytest.fixture def small_xmi_path(): return os.path.join(FIXTURE_DIR, "xmi", "small_cas.xmi") @pytest.fixture def small_xmi(small_xmi_path): with open(small_xmi_path, "r") as f: return f.read() # CAS with inheritance @pytest.fixture def cas_with_inheritance_xmi_path(): return os.path.join(FIXTURE_DIR, "xmi", "cas_with_inheritance.xmi") @pytest.fixture def cas_with_inheritance_xmi(cas_with_inheritance_xmi_path): with open(cas_with_inheritance_xmi_path, "r") as f: return f.read() # Small type system @pytest.fixture def small_typesystem_path(): return os.path.join(FIXTURE_DIR, "typesystems", "small_typesystem.xml") @pytest.fixture def small_typesystem_xml(small_typesystem_path): with open(small_typesystem_path, "r") as f: return f.read() # Small type system with document annotation @pytest.fixture def small_typesystem_with_predefined_types_path(): return os.path.join(FIXTURE_DIR, "typesystems", "small_typesystem_with_predefined_types.xml") @pytest.fixture def small_typesystem_with_predefined_types_xml(small_typesystem_with_predefined_types_path): with open(small_typesystem_with_predefined_types_path, "r") as f: return f.read() # Type system with types without namespace # https://github.com/dkpro/dkpro-cassis/issues/43 @pytest.fixture def typesystem_has_types_with_no_namespace_path(): return os.path.join(FIXTURE_DIR, "typesystems", "typesystem_has_types_with_no_namespace.xml") @pytest.fixture def typesystem_has_types_with_no_namespace_xml(typesystem_has_types_with_no_namespace_path): with open(typesystem_has_types_with_no_namespace_path, "r") as f: return f.read() # Type system with inheritance @pytest.fixture def typesystem_with_inheritance_path(): return os.path.join(FIXTURE_DIR, "typesystems", "typesystem_with_inheritance.xml") @pytest.fixture def typesystem_with_inheritance_xml(typesystem_with_inheritance_path): with open(typesystem_with_inheritance_path, "r") as f: return f.read() @pytest.fixture def dkpro_typesystem_path(): return os.path.join(FIXTURE_DIR, "typesystems", "important-dkpro-types.xml") @pytest.fixture def dkpro_typesystem_xml(dkpro_typesystem_path): with open(dkpro_typesystem_path, "r") as f: return f.read() # Annotations @pytest.fixture def tokens(small_typesystem_xml): typesystem = load_typesystem(small_typesystem_xml) TokenType = typesystem.get_type("cassis.Token") return [ TokenType(xmiID=3, sofa=1, begin=0, end=3, id="0", pos="NNP"), TokenType(xmiID=4, sofa=1, begin=4, end=10, id="1", pos="VBD"), TokenType(xmiID=5, sofa=1, begin=11, end=14, id="2", pos="IN"), TokenType(xmiID=6, sofa=1, begin=15, end=18, id="3", pos="DT"), TokenType(xmiID=7, sofa=1, begin=19, end=24, id="4", pos="NN"), TokenType(xmiID=8, sofa=1, begin=25, end=26, id="5", pos="."), TokenType(xmiID=9, sofa=1, begin=27, end=30, id="6", pos="DT"), TokenType(xmiID=10, sofa=1, begin=31, end=36, id="7", pos="NN"), TokenType(xmiID=11, sofa=1, begin=37, end=40, id="8", pos="VBD"), TokenType(xmiID=12, sofa=1, begin=41, end=45, id="9", pos="JJ"), TokenType(xmiID=13, sofa=1, begin=46, end=47, id="10", pos="."), ] @pytest.fixture def sentences(small_typesystem_xml): typesystem = load_typesystem(small_typesystem_xml) SentenceType = typesystem.get_type("cassis.Sentence") return [ SentenceType(xmiID=14, sofa=1, begin=0, end=26, id="0"), SentenceType(xmiID=15, sofa=1, begin=27, end=47, id="1"), ]
tests/fixtures.py
3,769
Small xmi CAS with inheritance Small type system Small type system with document annotation Type system with types without namespace https://github.com/dkpro/dkpro-cassis/issues/43 Type system with inheritance Annotations
221
en
0.790077
# Download the Python helper library from twilio.com/docs/python/install from twilio.rest import Client # Your Account Sid and Auth Token from twilio.com/console api_key_sid = "SKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" api_key_secret = "your_api_key_secret" client = Client(api_key_sid, api_key_secret) publishedtrack = client.video.rooms('DailyStandup').participants.get( 'Alice').published_tracks.get('Camera') print(publishedtrack.fetch().date_created)
video/rooms/participants/published-track/retrieve-track-published-by-participant/retrieve-track-published-by-participant.py
457
Download the Python helper library from twilio.com/docs/python/install Your Account Sid and Auth Token from twilio.com/console
126
en
0.766585
import argparse import importlib import mmcv import numpy as np import os import os.path as osp import time import torch from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import get_dist_info, init_dist, load_checkpoint from openselfsup.datasets import build_dataloader, build_dataset from openselfsup.models import build_model from openselfsup.models.utils import MultiPooling from openselfsup.utils import dist_forward_collect, nondist_forward_collect from openselfsup.utils import get_root_logger class ExtractProcess(object): def __init__(self, pool_type='specified', backbone='resnet50', layer_indices=(0, 1, 2, 3, 4)): self.multi_pooling = MultiPooling( pool_type, in_indices=layer_indices, backbone=backbone) def _forward_func(self, model, **x): backbone_feats = model(mode='extract', **x) pooling_feats = self.multi_pooling(backbone_feats) flat_feats = [xx.view(xx.size(0), -1) for xx in pooling_feats] feat_dict = {'feat{}'.format(i + 1): feat.cpu() \ for i, feat in enumerate(flat_feats)} return feat_dict def extract(self, model, data_loader, distributed=False): model.eval() func = lambda **x: self._forward_func(model, **x) if distributed: rank, world_size = get_dist_info() results = dist_forward_collect(func, data_loader, rank, len(data_loader.dataset)) else: results = nondist_forward_collect(func, data_loader, len(data_loader.dataset)) return results def parse_args(): parser = argparse.ArgumentParser( description='OpenSelfSup extract features of a model') parser.add_argument('config', help='test config file path') parser.add_argument('--checkpoint', default=None, help='checkpoint file') parser.add_argument( '--pretrained', default='random', help='pretrained model file, exclusive to --checkpoint') parser.add_argument( '--dataset-config', default='benchmarks/extract_info/voc07.py', help='extract dataset config file path') parser.add_argument( '--layer-ind', type=str, help='layer indices, separated by comma, e.g., "0,1,2,3,4"') parser.add_argument( '--work_dir', type=str, default=None, help='the dir to save logs and models') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--port', type=int, default=29500, help='port only works when launcher=="slurm"') args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir layer_ind = [int(idx) for idx in args.layer_ind.split(',')] cfg.model.backbone.out_indices = layer_ind # checkpoint and pretrained are exclusive assert args.pretrained == "random" or args.checkpoint is None, \ "Checkpoint and pretrained are exclusive." # check memcached package exists if importlib.util.find_spec('mc') is None: for field in ['train', 'val', 'test']: if hasattr(cfg.data, field): getattr(cfg.data, field).data_source.memcached = False # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True if args.launcher == 'slurm': cfg.dist_params['port'] = args.port init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # logger timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # build the dataloader dataset_cfg = mmcv.Config.fromfile(args.dataset_config) dataset = build_dataset(dataset_cfg.data.extract) data_loader = build_dataloader( dataset, imgs_per_gpu=dataset_cfg.data.imgs_per_gpu, workers_per_gpu=dataset_cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # specify pretrained model if args.pretrained != 'random': assert isinstance(args.pretrained, str) cfg.model.pretrained = args.pretrained # build the model and load checkpoint model = build_model(cfg.model) if args.checkpoint is not None: logger.info("Use checkpoint: {} to extract features".format( args.checkpoint)) load_checkpoint(model, args.checkpoint, map_location='cpu') elif args.pretrained != "random": logger.info('Use pretrained model: {} to extract features'.format( args.pretrained)) else: logger.info('No checkpoint or pretrained is give, use random init.') if not distributed: model = MMDataParallel(model, device_ids=[0]) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) # build extraction processor extractor = ExtractProcess( pool_type='specified', backbone='resnet50', layer_indices=layer_ind) # run outputs = extractor.extract(model, data_loader, distributed=distributed) rank, _ = get_dist_info() mmcv.mkdir_or_exist("{}/features/".format(args.work_dir)) if rank == 0: for key, val in outputs.items(): split_num = len(dataset_cfg.split_name) split_at = dataset_cfg.split_at for ss in range(split_num): output_file = "{}/features/{}_{}.npy".format( args.work_dir, dataset_cfg.split_name[ss], key) if ss == 0: np.save(output_file, val[:split_at[0]]) elif ss == split_num - 1: np.save(output_file, val[split_at[-1]:]) else: np.save(output_file, val[split_at[ss - 1]:split_at[ss]]) if __name__ == '__main__': main()
tools/extract.py
6,719
set cudnn_benchmark update configs according to CLI args checkpoint and pretrained are exclusive check memcached package exists init distributed env first, since logger depends on the dist info. create work_dir logger build the dataloader specify pretrained model build the model and load checkpoint build extraction processor run
330
en
0.65318
"""Temkin Approximation isotherm model.""" import numpy import scipy from ..utilities.exceptions import CalculationError from .base_model import IsothermBaseModel class TemkinApprox(IsothermBaseModel): r""" Asymptotic approximation to the Temkin isotherm. .. math:: n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1) Notes ----- The Temkin adsorption isotherm [#]_, like the Langmuir model, considers a surface with n_m identical adsorption sites, but takes into account adsorbate- adsorbate interactions by assuming that the enthalpy of adsorption is a linear function of the coverage. The Temkin isotherm is derived [#]_ using a mean-field argument and used an asymptotic approximation to obtain an explicit equation for the loading. Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model. The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate interactions (:math:`\theta < 0` for attractions). References ---------- .. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356. .. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513 """ # Model parameters name = 'TemkinApprox' calculates = 'loading' param_names = ["n_m", "K", "tht"] param_bounds = { "n_m": [0, numpy.inf], "K": [0, numpy.inf], "tht": [0, numpy.inf], } def __init__(self): """Instantiation function.""" self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan} def loading(self, pressure): """ Calculate loading at specified pressure. Parameters ---------- pressure : float The pressure at which to calculate the loading. Returns ------- float Loading at specified pressure. """ lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure) return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 * (lang_load - 1)) def pressure(self, loading): """ Calculate pressure at specified loading. For the TemkinApprox model, the pressure will be computed numerically as no analytical inversion is possible. Parameters ---------- loading : float The loading at which to calculate the pressure. Returns ------- float Pressure at specified loading. """ def fun(x): return self.loading(x) - loading opt_res = scipy.optimize.root(fun, 0, method='hybr') if not opt_res.success: raise CalculationError(""" Root finding for value {0} failed. """.format(loading)) return opt_res.x def spreading_pressure(self, pressure): r""" Calculate spreading pressure at specified gas pressure. Function that calculates spreading pressure by solving the following integral at each point i. .. math:: \pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i The integral for the TemkinApprox model is solved analytically. .. math:: \pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big) Parameters ---------- pressure : float The pressure at which to calculate the spreading pressure. Returns ------- float Spreading pressure at specified pressure. """ one_plus_kp = 1.0 + self.params["K"] * pressure return self.params["n_m"] * (numpy.log(one_plus_kp) + self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) / (2.0 * one_plus_kp ** 2)) def initial_guess(self, pressure, loading): """ Return initial guess for fitting. Parameters ---------- pressure : ndarray Pressure data. loading : ndarray Loading data. Returns ------- dict Dictionary of initial guesses for the parameters. """ saturation_loading, langmuir_k = super().initial_guess(pressure, loading) guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0} for param in guess: if guess[param] < self.param_bounds[param][0]: guess[param] = self.param_bounds[param][0] if guess[param] > self.param_bounds[param][1]: guess[param] = self.param_bounds[param][1] return guess
src/pygaps/modelling/temkinapprox.py
4,816
Asymptotic approximation to the Temkin isotherm. .. math:: n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1) Notes ----- The Temkin adsorption isotherm [#]_, like the Langmuir model, considers a surface with n_m identical adsorption sites, but takes into account adsorbate- adsorbate interactions by assuming that the enthalpy of adsorption is a linear function of the coverage. The Temkin isotherm is derived [#]_ using a mean-field argument and used an asymptotic approximation to obtain an explicit equation for the loading. Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model. The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate interactions (:math:`\theta < 0` for attractions). References ---------- .. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356. .. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513 Instantiation function. Return initial guess for fitting. Parameters ---------- pressure : ndarray Pressure data. loading : ndarray Loading data. Returns ------- dict Dictionary of initial guesses for the parameters. Calculate loading at specified pressure. Parameters ---------- pressure : float The pressure at which to calculate the loading. Returns ------- float Loading at specified pressure. Calculate pressure at specified loading. For the TemkinApprox model, the pressure will be computed numerically as no analytical inversion is possible. Parameters ---------- loading : float The loading at which to calculate the pressure. Returns ------- float Pressure at specified loading. Calculate spreading pressure at specified gas pressure. Function that calculates spreading pressure by solving the following integral at each point i. .. math:: \pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i The integral for the TemkinApprox model is solved analytically. .. math:: \pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big) Parameters ---------- pressure : float The pressure at which to calculate the spreading pressure. Returns ------- float Spreading pressure at specified pressure. Temkin Approximation isotherm model. Model parameters
2,328
en
0.695467
__author__ = "Johannes Köster" __copyright__ = "Copyright 2015-2019, Johannes Köster" __email__ = "koester@jimmy.harvard.edu" __license__ = "MIT" import html import os import shutil import textwrap import time import tarfile from collections import defaultdict, Counter from itertools import chain, filterfalse, groupby from functools import partial from pathlib import Path import uuid import math from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job from snakemake.exceptions import MissingInputException from snakemake.exceptions import MissingRuleException, AmbiguousRuleException from snakemake.exceptions import CyclicGraphException, MissingOutputException from snakemake.exceptions import IncompleteFilesException, ImproperOutputException from snakemake.exceptions import PeriodicWildcardError from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException from snakemake.exceptions import InputFunctionException from snakemake.logging import logger from snakemake.common import DYNAMIC_FILL, group_into_chunks from snakemake.deployment import conda, singularity from snakemake.output_index import OutputIndex from snakemake import workflow class Batch: """Definition of a batch for calculating only a partial DAG.""" def __init__(self, rulename: str, idx: int, batches: int): assert idx <= batches assert idx > 0 self.rulename = rulename self.idx = idx self.batches = batches def get_batch(self, items: list): """Return the defined batch of the given items. Items are usually input files.""" # make sure that we always consider items in the same order if len(items) < self.batches: raise WorkflowError( "Batching rule {} has less input files than batches. " "Please choose a smaller number of batches.".format(self.rulename) ) items = sorted(items) batch_len = math.floor(len(items) / self.batches) # self.batch is one-based, hence we have to subtract 1 idx = self.idx - 1 i = idx * batch_len if self.is_final: # extend the last batch to cover rest of list return items[i:] else: return items[i : i + batch_len] @property def is_final(self): return self.idx == self.batches def __str__(self): return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename) class DAG: """Directed acyclic graph of jobs.""" def __init__( self, workflow, rules=None, dryrun=False, targetfiles=None, targetrules=None, forceall=False, forcerules=None, forcefiles=None, priorityfiles=None, priorityrules=None, untilfiles=None, untilrules=None, omitfiles=None, omitrules=None, ignore_ambiguity=False, force_incomplete=False, ignore_incomplete=False, notemp=False, keep_remote_local=False, batch=None, ): self.dryrun = dryrun self.dependencies = defaultdict(partial(defaultdict, set)) self.depending = defaultdict(partial(defaultdict, set)) self._needrun = set() self._priority = dict() self._reason = defaultdict(Reason) self._finished = set() self._dynamic = set() self._len = 0 self.workflow = workflow self.rules = set(rules) self.ignore_ambiguity = ignore_ambiguity self.targetfiles = targetfiles self.targetrules = targetrules self.priorityfiles = priorityfiles self.priorityrules = priorityrules self.targetjobs = set() self.prioritytargetjobs = set() self._ready_jobs = set() self.notemp = notemp self.keep_remote_local = keep_remote_local self._jobid = dict() self.job_cache = dict() self.conda_envs = dict() self.container_imgs = dict() self._progress = 0 self._group = dict() self.job_factory = JobFactory() self.group_job_factory = GroupJobFactory() self.forcerules = set() self.forcefiles = set() self.untilrules = set() self.untilfiles = set() self.omitrules = set() self.omitfiles = set() self.updated_subworkflow_files = set() if forceall: self.forcerules.update(self.rules) elif forcerules: self.forcerules.update(forcerules) if forcefiles: self.forcefiles.update(forcefiles) if untilrules: self.untilrules.update(set(rule.name for rule in untilrules)) if untilfiles: self.untilfiles.update(untilfiles) if omitrules: self.omitrules.update(set(rule.name for rule in omitrules)) if omitfiles: self.omitfiles.update(omitfiles) self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules) self.omitforce = set() self.batch = batch if batch is not None and not batch.is_final: # Since not all input files of a batching rule are considered, we cannot run # beyond that rule. # For the final batch, we do not need to omit anything. self.omitrules.add(batch.rulename) self.force_incomplete = force_incomplete self.ignore_incomplete = ignore_incomplete self.periodic_wildcard_detector = PeriodicityDetector() self.update_output_index() def init(self, progress=False): """ Initialise the DAG. """ for job in map(self.rule2job, self.targetrules): job = self.update([job], progress=progress) self.targetjobs.add(job) for file in self.targetfiles: job = self.update(self.file2jobs(file), file=file, progress=progress) self.targetjobs.add(job) self.cleanup() self.update_needrun() self.set_until_jobs() self.delete_omitfrom_jobs() self.update_jobids() self.check_directory_outputs() # check if remaining jobs are valid for i, job in enumerate(self.jobs): job.is_valid() def check_directory_outputs(self): """Check that no output file is contained in a directory output of the same or another rule.""" outputs = sorted( { (path(f), job) for job in self.jobs for f in job.output for path in (os.path.abspath, os.path.realpath) } ) for i in range(len(outputs) - 1): (a, job_a), (b, job_b) = outputs[i : i + 2] try: common = os.path.commonpath([a, b]) except ValueError: # commonpath raises error if windows drives are different. continue if a != b and common == os.path.commonpath([a]) and job_a != job_b: raise ChildIOException(parent=outputs[i], child=outputs[i + 1]) @property def checkpoint_jobs(self): for job in self.needrun_jobs: if job.is_checkpoint: yield job def update_checkpoint_outputs(self): workflow.checkpoints.future_output = set( f for job in self.checkpoint_jobs for f in job.output ) def update_jobids(self): for job in self.jobs: if job not in self._jobid: self._jobid[job] = len(self._jobid) def cleanup_workdir(self): for io_dir in set( os.path.dirname(io_file) for job in self.jobs for io_file in chain(job.output, job.input) if not os.path.exists(io_file) ): if os.path.exists(io_dir) and not len(os.listdir(io_dir)): os.removedirs(io_dir) def cleanup(self): self.job_cache.clear() final_jobs = set(self.jobs) todelete = [job for job in self.dependencies if job not in final_jobs] for job in todelete: del self.dependencies[job] try: del self.depending[job] except KeyError: pass def create_conda_envs( self, dryrun=False, forceall=False, init_only=False, quiet=False ): # First deduplicate based on job.conda_env_file jobs = self.jobs if forceall else self.needrun_jobs env_set = { (job.conda_env_file, job.container_img_url) for job in jobs if job.conda_env_file } # Then based on md5sum values self.conda_envs = dict() for (env_file, simg_url) in env_set: simg = None if simg_url and self.workflow.use_singularity: assert ( simg_url in self.container_imgs ), "bug: must first pull singularity images" simg = self.container_imgs[simg_url] env = conda.Env( env_file, self, container_img=simg, cleanup=self.workflow.conda_cleanup_pkgs, ) self.conda_envs[(env_file, simg_url)] = env if not init_only: for env in self.conda_envs.values(): if not dryrun or not quiet: env.create(dryrun) def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False): # First deduplicate based on job.conda_env_file jobs = self.jobs if forceall else self.needrun_jobs img_set = {job.container_img_url for job in jobs if job.container_img_url} for img_url in img_set: img = singularity.Image(img_url, self) if not dryrun or not quiet: img.pull(dryrun) self.container_imgs[img_url] = img def update_output_index(self): """Update the OutputIndex.""" self.output_index = OutputIndex(self.rules) def check_incomplete(self): """Check if any output files are incomplete. This is done by looking up markers in the persistence module.""" if not self.ignore_incomplete: incomplete = self.incomplete_files if incomplete: if self.force_incomplete: logger.debug("Forcing incomplete files:") logger.debug("\t" + "\n\t".join(incomplete)) self.forcefiles.update(incomplete) else: raise IncompleteFilesException(incomplete) def incomplete_external_jobid(self, job): """Return the external jobid of the job if it is marked as incomplete. Returns None, if job is not incomplete, or if no external jobid has been registered or if force_incomplete is True. """ if self.force_incomplete: return None jobids = self.workflow.persistence.external_jobids(job) if len(jobids) == 1: return jobids[0] elif len(jobids) > 1: raise WorkflowError( "Multiple different external jobids registered " "for output files of incomplete job {} ({}). This job " "cannot be resumed. Execute Snakemake with --rerun-incomplete " "to fix this issue.".format(job.jobid, jobids) ) def check_dynamic(self): """Check dynamic output and update downstream rules if necessary.""" if self.has_dynamic_rules: for job in filter( lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs ): self.update_dynamic(job) self.postprocess() def is_edit_notebook_job(self, job): return self.workflow.edit_notebook and job.targetfile in self.targetfiles @property def dynamic_output_jobs(self): """Iterate over all jobs with dynamic output files.""" return (job for job in self.jobs if job.dynamic_output) @property def jobs(self): """ All jobs in the DAG. """ for job in self.bfs(self.dependencies, *self.targetjobs): yield job @property def needrun_jobs(self): """ Jobs that need to be executed. """ for job in filter( self.needrun, self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished), ): yield job @property def local_needrun_jobs(self): """Iterate over all jobs that need to be run and are marked as local.""" return filter(lambda job: job.is_local, self.needrun_jobs) @property def finished_jobs(self): """ Iterate over all jobs that have been finished.""" for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)): yield job @property def ready_jobs(self): """Jobs that are ready to execute.""" return self._ready_jobs def needrun(self, job): """Return whether a given job needs to be executed.""" return job in self._needrun def priority(self, job): """Return priority of given job.""" return self._priority[job] def noneedrun_finished(self, job): """ Return whether a given job is finished or was not required to run at all. """ return not self.needrun(job) or self.finished(job) def reason(self, job): """ Return the reason of the job execution. """ return self._reason[job] def finished(self, job): """ Return whether a job is finished. """ return job in self._finished def dynamic(self, job): """ Return whether a job is dynamic (i.e. it is only a placeholder for those that are created after the job with dynamic output has finished. """ if job.is_group(): for j in job: if j in self._dynamic: return True else: return job in self._dynamic def requested_files(self, job): """Return the files a job requests.""" return set(*self.depending[job].values()) @property def incomplete_files(self): """Return list of incomplete files.""" return list( chain( *( job.output for job in filter( self.workflow.persistence.incomplete, filterfalse(self.needrun, self.jobs), ) ) ) ) @property def newversion_files(self): """Return list of files where the current version is newer than the recorded version. """ return list( chain( *( job.output for job in filter(self.workflow.persistence.newversion, self.jobs) ) ) ) def missing_temp(self, job): """ Return whether a temp file that is input of the given job is missing. """ for job_, files in self.depending[job].items(): if self.needrun(job_) and any(not f.exists for f in files): return True return False def check_and_touch_output( self, job, wait=3, ignore_missing_output=False, no_touch=False, force_stay_on_remote=False, ): """ Raise exception if output files of job are missing. """ expanded_output = [job.shadowed_path(path) for path in job.expanded_output] if job.benchmark: expanded_output.append(job.benchmark) if not ignore_missing_output: try: wait_for_files( expanded_output, latency_wait=wait, force_stay_on_remote=force_stay_on_remote, ignore_pipe=True, ) except IOError as e: raise MissingOutputException( str(e) + "\nThis might be due to " "filesystem latency. If that is the case, consider to increase the " "wait time with --latency-wait.", rule=job.rule, ) # Ensure that outputs are of the correct type (those flagged with directory() # are directories and not files and vice versa). for f in expanded_output: if (f.is_directory and not os.path.isdir(f)) or ( os.path.isdir(f) and not f.is_directory ): raise ImproperOutputException(job.rule, [f]) # It is possible, due to archive expansion or cluster clock skew, that # the files appear older than the input. But we know they must be new, # so touch them to update timestamps. This also serves to touch outputs # when using the --touch flag. # Note that if the input files somehow have a future date then this will # not currently be spotted and the job will always be re-run. if not no_touch: for f in expanded_output: # This won't create normal files if missing, but will create # the flag file for directories. if f.exists_local: f.touch() def unshadow_output(self, job, only_log=False): """ Move files from shadow directory to real output paths. """ if not job.shadow_dir or not job.expanded_output: return files = job.log if only_log else chain(job.expanded_output, job.log) for real_output in files: shadow_output = job.shadowed_path(real_output).file # Remake absolute symlinks as relative if os.path.islink(shadow_output): dest = os.readlink(shadow_output) if os.path.isabs(dest): rel_dest = os.path.relpath(dest, job.shadow_dir) os.remove(shadow_output) os.symlink(rel_dest, shadow_output) if os.path.realpath(shadow_output) == os.path.realpath(real_output): continue logger.debug( "Moving shadow output {} to destination {}".format( shadow_output, real_output ) ) shutil.move(shadow_output, real_output) shutil.rmtree(job.shadow_dir) def check_periodic_wildcards(self, job): """Raise an exception if a wildcard of the given job appears to be periodic, indicating a cyclic dependency.""" for wildcard, value in job.wildcards_dict.items(): periodic_substring = self.periodic_wildcard_detector.is_periodic(value) if periodic_substring is not None: raise PeriodicWildcardError( "The value {} in wildcard {} is periodically repeated ({}). " "This would lead to an infinite recursion. " "To avoid this, e.g. restrict the wildcards in this rule to certain values.".format( periodic_substring, wildcard, value ), rule=job.rule, ) def handle_protected(self, job): """ Write-protect output files that are marked with protected(). """ for f in job.expanded_output: if f in job.protected_output: logger.info("Write-protecting output file {}.".format(f)) f.protect() def handle_touch(self, job): """ Touches those output files that are marked for touching. """ for f in job.expanded_output: if f in job.touch_output: f = job.shadowed_path(f) logger.info("Touching output file {}.".format(f)) f.touch_or_create() assert os.path.exists(f) def temp_input(self, job): for job_, files in self.dependencies[job].items(): for f in filter(job_.temp_output.__contains__, files): yield f def temp_size(self, job): """Return the total size of temporary input files of the job. If none, return 0. """ return sum(f.size for f in self.temp_input(job)) def handle_temp(self, job): """ Remove temp files if they are no longer needed. Update temp_mtimes. """ if self.notemp: return is_temp = lambda f: is_flagged(f, "temp") # handle temp input needed = lambda job_, f: any( f in files for j, files in self.depending[job_].items() if not self.finished(j) and self.needrun(j) and j != job ) def unneeded_files(): # temp input for job_, files in self.dependencies[job].items(): tempfiles = set(f for f in job_.expanded_output if is_temp(f)) yield from filterfalse(partial(needed, job_), tempfiles & files) # temp output if not job.dynamic_output and ( job not in self.targetjobs or job.rule.name == self.workflow.first_rule ): tempfiles = ( f for f in job.expanded_output if is_temp(f) and f not in self.targetfiles ) yield from filterfalse(partial(needed, job), tempfiles) for f in unneeded_files(): logger.info("Removing temporary output file {}.".format(f)) f.remove(remove_non_empty_dir=True) def handle_log(self, job, upload_remote=True): for f in job.log: if not f.exists_local: # If log file was not created during job, create an empty one. f.touch_or_create() if upload_remote and f.is_remote and not f.should_stay_on_remote: f.upload_to_remote() if not f.exists_remote: raise RemoteFileException( "The file upload was attempted, but it does not " "exist on remote. Check that your credentials have " "read AND write permissions." ) def handle_remote(self, job, upload=True): """ Remove local files if they are no longer needed and upload. """ if upload: # handle output files files = list(job.expanded_output) if job.benchmark: files.append(job.benchmark) for f in files: if f.is_remote and not f.should_stay_on_remote: f.upload_to_remote() remote_mtime = f.mtime # immediately force local mtime to match remote, # since conversions from S3 headers are not 100% reliable # without this, newness comparisons may fail down the line f.touch(times=(remote_mtime, remote_mtime)) if not f.exists_remote: raise RemoteFileException( "The file upload was attempted, but it does not " "exist on remote. Check that your credentials have " "read AND write permissions." ) if not self.keep_remote_local: # handle input files needed = lambda job_, f: any( f in files for j, files in self.depending[job_].items() if not self.finished(j) and self.needrun(j) and j != job ) def unneeded_files(): putative = ( lambda f: f.is_remote and not f.protected and not f.should_keep_local ) generated_input = set() for job_, files in self.dependencies[job].items(): generated_input |= files for f in filter(putative, files): if not needed(job_, f): yield f for f, f_ in zip(job.output, job.rule.output): if putative(f) and not needed(job, f) and not f in self.targetfiles: if f in job.dynamic_output: for f_ in job.expand_dynamic(f_): yield f_ else: yield f for f in filter(putative, job.input): # TODO what about remote inputs that are used by multiple jobs? if f not in generated_input: yield f for f in unneeded_files(): if f.exists_local: logger.info("Removing local output file: {}".format(f)) f.remove() def jobid(self, job): """Return job id of given job.""" if job.is_group(): return job.jobid else: return self._jobid[job] def update( self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False ): """ Update the DAG by adding given jobs and their dependencies. """ if visited is None: visited = set() producer = None exceptions = list() jobs = sorted(jobs, reverse=not self.ignore_ambiguity) cycles = list() for job in jobs: logger.dag_debug(dict(status="candidate", job=job)) if file in job.input: cycles.append(job) continue if job in visited: cycles.append(job) continue try: self.check_periodic_wildcards(job) self.update_( job, visited=set(visited), skip_until_dynamic=skip_until_dynamic, progress=progress, ) # TODO this might fail if a rule discarded here is needed # elsewhere if producer: if job < producer or self.ignore_ambiguity: break elif producer is not None: raise AmbiguousRuleException(file, job, producer) producer = job except ( MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError, ) as ex: exceptions.append(ex) except RecursionError as e: raise WorkflowError( e, "If building the DAG exceeds the recursion limit, " "this is likely due to a cyclic dependency." "E.g. you might have a sequence of rules that " "can generate their own input. Try to make " "the output files more specific. " "A common pattern is to have different prefixes " "in the output files of different rules." + "\nProblematic file pattern: {}".format(file) if file else "", ) if producer is None: if cycles: job = cycles[0] raise CyclicGraphException(job.rule, file, rule=job.rule) if len(exceptions) > 1: raise WorkflowError(*exceptions) elif len(exceptions) == 1: raise exceptions[0] else: logger.dag_debug(dict(status="selected", job=producer)) logger.dag_debug( dict( file=file, msg="Producer found, hence exceptions are ignored.", exception=WorkflowError(*exceptions), ) ) n = len(self.dependencies) if progress and n % 1000 == 0 and n and self._progress != n: logger.info("Processed {} potential jobs.".format(n)) self._progress = n return producer def update_(self, job, visited=None, skip_until_dynamic=False, progress=False): """ Update the DAG by adding the given job and its dependencies. """ if job in self.dependencies: return if visited is None: visited = set() visited.add(job) dependencies = self.dependencies[job] potential_dependencies = self.collect_potential_dependencies(job) skip_until_dynamic = skip_until_dynamic and not job.dynamic_output missing_input = set() producer = dict() exceptions = dict() for file, jobs in potential_dependencies.items(): # If possible, obtain inventory information starting from # given file and store it in the IOCache. # This should provide faster access to existence and mtime information # than querying file by file. If the file type does not support inventory # information, this call is a no-op. file.inventory() if not jobs: # no producing job found if not file.exists: # file not found, hence missing input missing_input.add(file) # file found, no problem continue try: selected_job = self.update( jobs, file=file, visited=visited, skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input, progress=progress, ) producer[file] = selected_job except ( MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError, ) as ex: if not file.exists: self.delete_job(job, recursive=False) # delete job from tree raise ex else: logger.dag_debug( dict( file=file, msg="No producers found, but file is present on disk.", exception=ex, ) ) for file, job_ in producer.items(): dependencies[job_].add(file) self.depending[job_][job].add(file) if self.is_batch_rule(job.rule) and self.batch.is_final: # For the final batch, ensure that all input files from # previous batches are present on disk. if any( f for f in job.input if f not in potential_dependencies and not f.exists ): raise WorkflowError( "Unable to execute batch {} because not all previous batches " "have been completed before or files have been deleted.".format( self.batch ) ) if missing_input: self.delete_job(job, recursive=False) # delete job from tree raise MissingInputException(job.rule, missing_input) if skip_until_dynamic: self._dynamic.add(job) def update_needrun(self): """ Update the information whether a job needs to be executed. """ output_mintime = dict() def update_output_mintime(job): try: return output_mintime[job] except KeyError: for job_ in chain([job], self.depending[job]): try: t = output_mintime[job_] except KeyError: t = job_.output_mintime if t is not None: output_mintime[job] = t return output_mintime[job] = None def update_needrun(job): reason = self.reason(job) noinitreason = not reason updated_subworkflow_input = self.updated_subworkflow_files.intersection( job.input ) if ( job not in self.omitforce and job.rule in self.forcerules or not self.forcefiles.isdisjoint(job.output) ): reason.forced = True elif updated_subworkflow_input: reason.updated_input.update(updated_subworkflow_input) elif job in self.targetjobs: # TODO find a way to handle added/removed input files here? if not job.output and not job.benchmark: if job.input: if job.rule.norun: reason.updated_input_run.update( [f for f in job.input if not f.exists] ) else: reason.nooutput = True else: reason.noio = True else: if job.rule in self.targetrules: missing_output = job.missing_output() else: missing_output = job.missing_output( requested=set(chain(*self.depending[job].values())) | self.targetfiles ) reason.missing_output.update(missing_output) if not reason: output_mintime_ = output_mintime.get(job) if output_mintime_: updated_input = [ f for f in job.input if f.exists and f.is_newer(output_mintime_) ] reason.updated_input.update(updated_input) if noinitreason and reason: reason.derived = False reason = self.reason _needrun = self._needrun dependencies = self.dependencies depending = self.depending _needrun.clear() candidates = list(self.jobs) # Update the output mintime of all jobs. # We traverse them in BFS (level order) starting from target jobs. # Then, we check output mintime of job itself and all direct descendants, # which have already been visited in the level before. # This way, we achieve a linear runtime. for job in candidates: update_output_mintime(job) # update prior reason for all candidate jobs for job in candidates: update_needrun(job) queue = list(filter(reason, candidates)) visited = set(queue) while queue: job = queue.pop(0) _needrun.add(job) for job_, files in dependencies[job].items(): missing_output = job_.missing_output(requested=files) reason(job_).missing_output.update(missing_output) if missing_output and not job_ in visited: visited.add(job_) queue.append(job_) for job_, files in depending[job].items(): if job_ in candidates: reason(job_).updated_input_run.update(files) if not job_ in visited: visited.add(job_) queue.append(job_) # update len including finished jobs (because they have already increased the job counter) self._len = len(self._finished | self._needrun) def in_until(self, job): """Return whether given job has been specified via --until.""" return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint( job.output ) def in_omitfrom(self, job): """Return whether given job has been specified via --omit-from.""" return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint( job.output ) def until_jobs(self): """Returns a generator of jobs specified by untiljobs.""" return (job for job in self.jobs if self.in_until(job)) def omitfrom_jobs(self): """Returns a generator of jobs specified by omitfromjobs.""" return (job for job in self.jobs if self.in_omitfrom(job)) def downstream_of_omitfrom(self): """Returns the downstream of --omit-from rules or files and themselves.""" return self.bfs(self.depending, *self.omitfrom_jobs()) def delete_omitfrom_jobs(self): """Removes jobs downstream of jobs specified by --omit-from.""" if not self.omitrules and not self.omitfiles: return downstream_jobs = list( self.downstream_of_omitfrom() ) # need to cast as list before deleting jobs for job in downstream_jobs: self.delete_job(job, recursive=False, add_dependencies=True) def set_until_jobs(self): """Removes jobs downstream of jobs specified by --omit-from.""" if not self.untilrules and not self.untilfiles: return self.targetjobs = set(self.until_jobs()) def update_priority(self): """ Update job priorities. """ prioritized = ( lambda job: job.rule in self.priorityrules or not self.priorityfiles.isdisjoint(job.output) ) for job in self.needrun_jobs: self._priority[job] = job.rule.priority for job in self.bfs( self.dependencies, *filter(prioritized, self.needrun_jobs), stop=self.noneedrun_finished, ): self._priority[job] = Job.HIGHEST_PRIORITY def update_groups(self): groups = dict() for job in self.needrun_jobs: if job.group is None: continue stop = lambda j: j.group != job.group # BFS into depending needrun jobs if in same group # Note: never go up here (into depending), because it may contain # jobs that have been sorted out due to e.g. ruleorder. group = self.group_job_factory.new( job.group, ( job for job in self.bfs(self.dependencies, job, stop=stop) if self.needrun(job) ), ) # merge with previously determined groups if present for j in group: if j in groups: other = groups[j] other.merge(group) group = other # update assignment for j in group: if j not in groups: groups[j] = group self._group = groups self._update_group_components() def _update_group_components(self): # span connected components if requested for groupid, conn_components in groupby( set(self._group.values()), key=lambda group: group.groupid ): n_components = self.workflow.group_components.get(groupid, 1) if n_components > 1: for chunk in group_into_chunks(n_components, conn_components): if len(chunk) > 1: primary = chunk[0] for secondary in chunk[1:]: primary.merge(secondary) for j in primary: self._group[j] = primary def update_ready(self, jobs=None): """Update information whether a job is ready to execute. Given jobs must be needrun jobs! """ if jobs is None: jobs = self.needrun_jobs candidate_groups = set() for job in jobs: if not self.finished(job) and self._ready(job): if job.group is None: self._ready_jobs.add(job) else: group = self._group[job] group.finalize() candidate_groups.add(group) self._ready_jobs.update( group for group in candidate_groups if all(self._ready(job) for job in group) ) def get_jobs_or_groups(self): visited_groups = set() for job in self.jobs: if job.group is None: yield job else: group = self._group[job] if group in visited_groups: continue visited_groups.add(group) yield group def close_remote_objects(self): """Close all remote objects.""" for job in self.jobs: if not self.needrun(job): job.close_remote() def postprocess(self): """Postprocess the DAG. This has to be invoked after any change to the DAG topology.""" self.update_jobids() self.update_needrun() self.update_priority() self.handle_pipes() self.update_groups() self.update_ready() self.close_remote_objects() self.update_checkpoint_outputs() def handle_pipes(self): """Use pipes to determine job groups. Check if every pipe has exactly one consumer""" for job in self.needrun_jobs: candidate_groups = set() if job.group is not None: candidate_groups.add(job.group) all_depending = set() has_pipe = False for f in job.output: if is_flagged(f, "pipe"): if job.is_run: raise WorkflowError( "Rule defines pipe output but " "uses a 'run' directive. This is " "not possible for technical " "reasons. Consider using 'shell' or " "'script'.", rule=job.rule, ) has_pipe = True depending = [ j for j, files in self.depending[job].items() if f in files ] if len(depending) > 1: raise WorkflowError( "Output file {} is marked as pipe " "but more than one job depends on " "it. Make sure that any pipe " "output is only consumed by one " "job".format(f), rule=job.rule, ) elif len(depending) == 0: raise WorkflowError( "Output file {} is marked as pipe " "but it has no consumer. This is " "invalid because it can lead to " "a dead lock.".format(f), rule=job.rule, ) depending = depending[0] if depending.is_run: raise WorkflowError( "Rule consumes pipe input but " "uses a 'run' directive. This is " "not possible for technical " "reasons. Consider using 'shell' or " "'script'.", rule=job.rule, ) all_depending.add(depending) if depending.group is not None: candidate_groups.add(depending.group) if not has_pipe: continue if len(candidate_groups) > 1: raise WorkflowError( "An output file is marked as " "pipe, but consuming jobs " "are part of conflicting " "groups.", rule=job.rule, ) elif candidate_groups: # extend the candidate group to all involved jobs group = candidate_groups.pop() else: # generate a random unique group name group = str(uuid.uuid4()) job.group = group for j in all_depending: j.group = group def _ready(self, job): """Return whether the given job is ready to execute.""" group = self._group.get(job, None) if group is None: is_external_needrun_dep = self.needrun else: def is_external_needrun_dep(j): g = self._group.get(j, None) return self.needrun(j) and (g is None or g != group) return self._finished.issuperset( filter(is_external_needrun_dep, self.dependencies[job]) ) def update_checkpoint_dependencies(self, jobs=None): """Update dependencies of checkpoints.""" updated = False self.update_checkpoint_outputs() if jobs is None: jobs = [job for job in self.jobs if not self.needrun(job)] for job in jobs: if job.is_checkpoint: depending = list(self.depending[job]) # re-evaluate depending jobs, replace and update DAG for j in depending: logger.info("Updating job {} ({}).".format(self.jobid(j), j)) newjob = j.updated() self.replace_job(j, newjob, recursive=False) updated = True if updated: # This has to be done for each checkpoint, # otherwise, jobs may be missing in the end. self.postprocess() return updated def finish(self, job, update_dynamic=True): """Finish a given job (e.g. remove from ready jobs, mark depending jobs as ready).""" try: self._ready_jobs.remove(job) except KeyError: pass if job.is_group(): jobs = job else: jobs = [job] self._finished.update(jobs) updated_dag = False if update_dynamic: updated_dag = self.update_checkpoint_dependencies(jobs) # mark depending jobs as ready # skip jobs that are marked as until jobs self.update_ready( j for job in jobs for j in self.depending[job] if not self.in_until(job) and self.needrun(j) ) for job in jobs: if update_dynamic and job.dynamic_output: logger.info("Dynamically updating jobs") newjob = self.update_dynamic(job) if newjob: # simulate that this job ran and was finished before self.omitforce.add(newjob) self._needrun.add(newjob) self._finished.add(newjob) updated_dag = True self.postprocess() self.handle_protected(newjob) self.handle_touch(newjob) if updated_dag: # We might have new jobs, so we need to ensure that all conda envs # and singularity images are set up. if self.workflow.use_singularity: self.pull_container_imgs() if self.workflow.use_conda: self.create_conda_envs() def new_job(self, rule, targetfile=None, format_wildcards=None): """Create new job for given rule and (optional) targetfile. This will reuse existing jobs with the same wildcards.""" key = (rule, targetfile) if key in self.job_cache: assert targetfile is not None return self.job_cache[key] wildcards_dict = rule.get_wildcards(targetfile) job = self.job_factory.new( rule, self, wildcards_dict=wildcards_dict, format_wildcards=format_wildcards, targetfile=targetfile, ) self.cache_job(job) return job def cache_job(self, job): for f in job.products: self.job_cache[(job.rule, f)] = job def update_dynamic(self, job): """Update the DAG by evaluating the output of the given job that contains dynamic output files.""" dynamic_wildcards = job.dynamic_wildcards if not dynamic_wildcards: # this happens e.g. in dryrun if output is not yet present return depending = list( filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job)) ) newrule, non_dynamic_wildcards = job.rule.dynamic_branch( dynamic_wildcards, input=False ) self.specialize_rule(job.rule, newrule) # no targetfile needed for job newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards) self.replace_job(job, newjob) for job_ in depending: needs_update = any( f.get_wildcard_names() & dynamic_wildcards.keys() for f in job_.rule.dynamic_input ) if needs_update: newrule_ = job_.rule.dynamic_branch(dynamic_wildcards) if newrule_ is not None: self.specialize_rule(job_.rule, newrule_) if not self.dynamic(job_): logger.debug("Updating job {}.".format(job_)) newjob_ = self.new_job( newrule_, targetfile=job_.output[0] if job_.output else None ) unexpected_output = self.reason( job_ ).missing_output.intersection(newjob.existing_output) if unexpected_output: logger.warning( "Warning: the following output files of rule {} were not " "present when the DAG was created:\n{}".format( newjob_.rule, unexpected_output ) ) self.replace_job(job_, newjob_) return newjob def delete_job(self, job, recursive=True, add_dependencies=False): """Delete given job from DAG.""" if job in self.targetjobs: self.targetjobs.remove(job) if add_dependencies: for _job in self.dependencies[job]: self.targetjobs.add(_job) for job_ in self.depending[job]: del self.dependencies[job_][job] del self.depending[job] for job_ in self.dependencies[job]: depending = self.depending[job_] del depending[job] if not depending and recursive: self.delete_job(job_) del self.dependencies[job] if job in self._needrun: self._len -= 1 self._needrun.remove(job) del self._reason[job] if job in self._finished: self._finished.remove(job) if job in self._dynamic: self._dynamic.remove(job) if job in self._ready_jobs: self._ready_jobs.remove(job) # remove from cache for f in job.output: try: del self.job_cache[(job.rule, f)] except KeyError: pass def replace_job(self, job, newjob, recursive=True): """Replace given job with new job.""" add_to_targetjobs = job in self.targetjobs depending = list(self.depending[job].items()) if self.finished(job): self._finished.add(newjob) self.delete_job(job, recursive=recursive) if add_to_targetjobs: self.targetjobs.add(newjob) self.cache_job(newjob) self.update([newjob]) logger.debug("Replace {} with dynamic branch {}".format(job, newjob)) for job_, files in depending: # if not job_.dynamic_input: logger.debug("updating depending job {}".format(job_)) self.dependencies[job_][newjob].update(files) self.depending[newjob][job_].update(files) def specialize_rule(self, rule, newrule): """Specialize the given rule by inserting newrule into the DAG.""" assert newrule is not None self.rules.add(newrule) self.update_output_index() def is_batch_rule(self, rule): """Return True if the underlying rule is to be used for batching the DAG.""" return self.batch is not None and rule.name == self.batch.rulename def collect_potential_dependencies(self, job): """Collect all potential dependencies of a job. These might contain ambiguities. The keys of the returned dict represent the files to be considered.""" dependencies = defaultdict(list) # use a set to circumvent multiple jobs for the same file # if user specified it twice file2jobs = self.file2jobs input_files = list(job.unique_input) if self.is_batch_rule(job.rule): # only consider the defined partition of the input files input_batch = self.batch.get_batch(input_files) if len(input_batch) != len(input_files): logger.info( "Considering only batch {} for DAG computation.\n" "All jobs beyond the batching rule are omitted until the final batch.\n" "Don't forget to run the other batches too.".format(self.batch) ) input_files = input_batch for file in input_files: # omit the file if it comes from a subworkflow if file in job.subworkflow_input: continue try: if file in job.dependencies: jobs = [self.new_job(job.dependencies[file], targetfile=file)] else: jobs = file2jobs(file) dependencies[file].extend(jobs) except MissingRuleException as ex: # no dependency found dependencies[file] = [] return dependencies def bfs(self, direction, *jobs, stop=lambda job: False): """Perform a breadth-first traversal of the DAG.""" queue = list(jobs) visited = set(queue) while queue: job = queue.pop(0) if stop(job): # stop criterion reached for this node continue yield job for job_, _ in direction[job].items(): if not job_ in visited: queue.append(job_) visited.add(job_) def level_bfs(self, direction, *jobs, stop=lambda job: False): """Perform a breadth-first traversal of the DAG, but also yield the level together with each job.""" queue = [(job, 0) for job in jobs] visited = set(jobs) while queue: job, level = queue.pop(0) if stop(job): # stop criterion reached for this node continue yield level, job level += 1 for job_, _ in direction[job].items(): if not job_ in visited: queue.append((job_, level)) visited.add(job_) def dfs(self, direction, *jobs, stop=lambda job: False, post=True): """Perform depth-first traversal of the DAG.""" visited = set() def _dfs(job): """Inner function for DFS traversal.""" if stop(job): return if not post: yield job for job_ in direction[job]: if not job_ in visited: visited.add(job_) for j in _dfs(job_): yield j if post: yield job for job in jobs: for job_ in self._dfs(direction, job, visited, stop=stop, post=post): yield job_ def new_wildcards(self, job): """Return wildcards that are newly introduced in this job, compared to its ancestors.""" new_wildcards = set(job.wildcards.items()) for job_ in self.dependencies[job]: if not new_wildcards: return set() for wildcard in job_.wildcards.items(): new_wildcards.discard(wildcard) return new_wildcards def rule2job(self, targetrule): """Generate a new job from a given rule.""" if targetrule.has_wildcards(): raise WorkflowError( "Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards." ) return self.new_job(targetrule) def file2jobs(self, targetfile): rules = self.output_index.match(targetfile) jobs = [] exceptions = list() for rule in rules: if rule.is_producer(targetfile): try: jobs.append(self.new_job(rule, targetfile=targetfile)) except InputFunctionException as e: exceptions.append(e) if not jobs: if exceptions: raise exceptions[0] raise MissingRuleException(targetfile) return jobs def rule_dot2(self): dag = defaultdict(list) visited = set() preselect = set() def preselect_parents(job): for parent in self.depending[job]: if parent in preselect: continue preselect.add(parent) preselect_parents(parent) def build_ruledag(job, key=lambda job: job.rule.name): if job in visited: return visited.add(job) deps = sorted(self.dependencies[job], key=key) deps = [ ( group[0] if preselect.isdisjoint(group) else preselect.intersection(group).pop() ) for group in (list(g) for _, g in groupby(deps, key)) ] dag[job].extend(deps) preselect_parents(job) for dep in deps: build_ruledag(dep) for job in self.targetjobs: build_ruledag(job) return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag) def rule_dot(self): graph = defaultdict(set) for job in self.jobs: graph[job.rule].update(dep.rule for dep in self.dependencies[job]) return self._dot(graph) def dot(self): def node2style(job): if not self.needrun(job): return "rounded,dashed" if self.dynamic(job) or job.dynamic_input: return "rounded,dotted" return "rounded" def format_wildcard(wildcard): name, value = wildcard if DYNAMIC_FILL in value: value = "..." return "{}: {}".format(name, value) node2rule = lambda job: job.rule node2label = lambda job: "\\n".join( chain( [job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job))) ) ) dag = {job: self.dependencies[job] for job in self.jobs} return self._dot( dag, node2rule=node2rule, node2style=node2style, node2label=node2label ) def _dot( self, graph, node2rule=lambda node: node, node2style=lambda node: "rounded", node2label=lambda node: node, ): # color rules huefactor = 2 / (3 * len(self.rules)) rulecolor = { rule: "{:.2f} 0.6 0.85".format(i * huefactor) for i, rule in enumerate(self.rules) } # markup node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format edge_markup = "\t{} -> {}".format # node ids ids = {node: i for i, node in enumerate(graph)} # calculate nodes nodes = [ node_markup( ids[node], node2label(node), rulecolor[node2rule(node)], node2style(node), ) for node in graph ] # calculate edges edges = [ edge_markup(ids[dep], ids[node]) for node, deps in graph.items() for dep in deps ] return textwrap.dedent( """\ digraph snakemake_dag {{ graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, \ fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; {items} }}\ """ ).format(items="\n".join(nodes + edges)) def filegraph_dot( self, node2rule=lambda node: node, node2style=lambda node: "rounded", node2label=lambda node: node, ): # NOTE: This is code from the rule_dot method. # This method could be split like there as well, however, # it cannot easily reuse the _dot method due to the different node type graph = defaultdict(set) for job in self.jobs: graph[job.rule].update(dep.rule for dep in self.dependencies[job]) # node ids ids = {node: i for i, node in enumerate(graph)} # Compute colors for rules def hsv_to_htmlhexrgb(h, s, v): """Convert hsv colors to hex-encoded rgb colors usable by html.""" import colorsys hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v)) return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format( hex_r=hex_r, hex_g=hex_g, hex_b=hex_b ) huefactor = 2 / (3 * len(self.rules)) rulecolor = { rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85) for i, rule in enumerate(self.rules) } def resolve_input_functions(input_files): """Iterate over all input files and replace input functions with a fixed string. """ files = [] for f in input_files: if callable(f): files.append("<input function>") # NOTE: This is a workaround. It would be more informative # to show the code of the input function here (if it is # short enough). This cannot be easily done with the inspect # module, since the line numbers in the Snakefile do not # behave as expected. One (complicated) solution for this # would be to find the Snakefile and directly extract the # code of the function. else: files.append(repr(f).strip("'")) return files def html_node(node_id, node, color): """Assemble a html style node for graphviz""" input_files = resolve_input_functions(node._input) output_files = [repr(f).strip("'") for f in node._output] input_header = ( '<b><font point-size="14">&#8618; input</font></b>' if input_files else "" ) output_header = ( '<b><font point-size="14">output &rarr;</font></b>' if output_files else "" ) html_node = [ '{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format( node_id=node_id, color=color ), "<tr><td>", '<b><font point-size="18">{node.name}</font></b>'.format(node=node), "</td></tr>", "<hr/>", '<tr><td align="left"> {input_header} </td></tr>'.format( input_header=input_header ), ] for filename in sorted(input_files): # Escape html relevant chars like '<' and '>' in filenames # These can be added by input functions etc. and cannot be # displayed in graphviz HTML nodes. in_file = html.escape(filename) html_node.extend( [ "<tr>", '<td align="left"><font face="monospace">{in_file}</font></td>'.format( in_file=in_file ), "</tr>", ] ) html_node.append("<hr/>") html_node.append( '<tr><td align="right"> {output_header} </td> </tr>'.format( output_header=output_header ) ) for filename in sorted(output_files): out_file = html.escape(filename) html_node.extend( [ "<tr>", '<td align="left"><font face="monospace">{out_file}</font></td>' "</tr>".format(out_file=out_file), ] ) html_node.append("</table>>]") return "\n".join(html_node) nodes = [ html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph ] # calculate edges edge_markup = "\t{} -> {}".format edges = [ edge_markup(ids[dep], ids[node], ids[dep], ids[node]) for node, deps in graph.items() for dep in deps ] return textwrap.dedent( """\ digraph snakemake_dag {{ graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, \ fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; {items} }}\ """ ).format(items="\n".join(nodes + edges)) def summary(self, detailed=False): if detailed: yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan" else: yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan" for job in self.jobs: output = job.rule.output if self.dynamic(job) else job.expanded_output for f in output: rule = self.workflow.persistence.rule(f) rule = "-" if rule is None else rule version = self.workflow.persistence.version(f) version = "-" if version is None else str(version) date = time.ctime(f.mtime) if f.exists else "-" pending = "update pending" if self.reason(job) else "no update" log = self.workflow.persistence.log(f) log = "-" if log is None else ",".join(log) input = self.workflow.persistence.input(f) input = "-" if input is None else ",".join(input) shellcmd = self.workflow.persistence.shellcmd(f) shellcmd = "-" if shellcmd is None else shellcmd # remove new line characters, leading and trailing whitespace shellcmd = shellcmd.strip().replace("\n", "; ") status = "ok" if not f.exists: status = "missing" elif self.reason(job).updated_input: status = "updated input files" elif self.workflow.persistence.version_changed(job, file=f): status = "version changed to {}".format(job.rule.version) elif self.workflow.persistence.code_changed(job, file=f): status = "rule implementation changed" elif self.workflow.persistence.input_changed(job, file=f): status = "set of input files changed" elif self.workflow.persistence.params_changed(job, file=f): status = "params changed" if detailed: yield "\t".join( (f, date, rule, version, log, input, shellcmd, status, pending) ) else: yield "\t".join((f, date, rule, version, log, status, pending)) def archive(self, path): """Archives workflow such that it can be re-run on a different system. Archiving includes git versioned files (i.e. Snakefiles, config files, ...), ancestral input files and conda environments. """ if path.endswith(".tar"): mode = "x" elif path.endswith("tar.bz2"): mode = "x:bz2" elif path.endswith("tar.xz"): mode = "x:xz" elif path.endswith("tar.gz"): mode = "x:gz" else: raise WorkflowError( "Unsupported archive format " "(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)" ) if os.path.exists(path): raise WorkflowError("Archive already exists:\n" + path) self.create_conda_envs(forceall=True) try: workdir = Path(os.path.abspath(os.getcwd())) with tarfile.open(path, mode=mode, dereference=True) as archive: archived = set() def add(path): if workdir not in Path(os.path.abspath(path)).parents: logger.warning( "Path {} cannot be archived: " "not within working directory.".format(path) ) else: f = os.path.relpath(path) if f not in archived: archive.add(f) archived.add(f) logger.info("archived " + f) logger.info( "Archiving snakefiles, scripts and files under " "version control..." ) for f in self.workflow.get_sources(): add(f) logger.info("Archiving external input files...") for job in self.jobs: # input files for f in job.input: if not any( f in files for files in self.dependencies[job].values() ): # this is an input file that is not created by any job add(f) logger.info("Archiving conda environments...") envs = set() for job in self.jobs: if job.conda_env_file: env_archive = job.archive_conda_env() envs.add(env_archive) for env in envs: add(env) except (Exception, BaseException) as e: os.remove(path) raise e def clean(self, only_temp=False, dryrun=False): """Removes files generated by the workflow.""" for job in self.jobs: for f in job.output: if not only_temp or is_flagged(f, "temp"): # The reason for the second check is that dangling # symlinks fail f.exists. if f.exists or os.path.islink(f): if f.protected: logger.error("Skipping write-protected file {}.".format(f)) else: msg = "Deleting {}" if not dryrun else "Would delete {}" logger.info(msg.format(f)) if not dryrun: # Remove non-empty dirs if flagged as temp() f.remove(remove_non_empty_dir=only_temp) def list_untracked(self): """List files in the workdir that are not in the dag.""" used_files = set() files_in_cwd = set() for job in self.jobs: used_files.update( os.path.relpath(file) for file in chain(job.local_input, job.local_output, job.log) ) for root, dirs, files in os.walk(os.getcwd()): # Ignore hidden files and don't traverse into hidden dirs files_in_cwd.update( [ os.path.relpath(os.path.join(root, f)) for f in files if not f[0] == "." ] ) dirs[:] = [d for d in dirs if not d[0] == "."] for f in sorted(list(files_in_cwd - used_files)): logger.info(f) def d3dag(self, max_jobs=10000): def node(job): jobid = self.jobid(job) return { "id": jobid, "value": { "jobid": jobid, "label": job.rule.name, "rule": job.rule.name, }, } def edge(a, b): return {"u": self.jobid(a), "v": self.jobid(b)} jobs = list(self.jobs) if len(jobs) > max_jobs: logger.info( "Job-DAG is too large for visualization (>{} jobs).".format(max_jobs) ) else: logger.d3dag( nodes=[node(job) for job in jobs], edges=[ edge(dep, job) for job in jobs for dep in self.dependencies[job] if self.needrun(dep) ], ) def stats(self): rules = Counter() rules.update(job.rule for job in self.needrun_jobs) rules.update(job.rule for job in self.finished_jobs) yield "Job counts:" yield "\tcount\tjobs" for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name): yield "\t{}\t{}".format(count, rule) yield "\t{}".format(len(self)) def __str__(self): return self.dot() def __len__(self): return self._len
snakemake/dag.py
74,934
Definition of a batch for calculating only a partial DAG. Directed acyclic graph of jobs. Inner function for DFS traversal. Return whether the given job is ready to execute. Archives workflow such that it can be re-run on a different system. Archiving includes git versioned files (i.e. Snakefiles, config files, ...), ancestral input files and conda environments. Perform a breadth-first traversal of the DAG. Raise exception if output files of job are missing. Check that no output file is contained in a directory output of the same or another rule. Check dynamic output and update downstream rules if necessary. Check if any output files are incomplete. This is done by looking up markers in the persistence module. Raise an exception if a wildcard of the given job appears to be periodic, indicating a cyclic dependency. Removes files generated by the workflow. Close all remote objects. Collect all potential dependencies of a job. These might contain ambiguities. The keys of the returned dict represent the files to be considered. Delete given job from DAG. Removes jobs downstream of jobs specified by --omit-from. Perform depth-first traversal of the DAG. Returns the downstream of --omit-from rules or files and themselves. Return whether a job is dynamic (i.e. it is only a placeholder for those that are created after the job with dynamic output has finished. Iterate over all jobs with dynamic output files. Finish a given job (e.g. remove from ready jobs, mark depending jobs as ready). Return whether a job is finished. Iterate over all jobs that have been finished. Return the defined batch of the given items. Items are usually input files. Use pipes to determine job groups. Check if every pipe has exactly one consumer Write-protect output files that are marked with protected(). Remove local files if they are no longer needed and upload. Remove temp files if they are no longer needed. Update temp_mtimes. Touches those output files that are marked for touching. Convert hsv colors to hex-encoded rgb colors usable by html. Assemble a html style node for graphviz Return whether given job has been specified via --omit-from. Return whether given job has been specified via --until. Return the external jobid of the job if it is marked as incomplete. Returns None, if job is not incomplete, or if no external jobid has been registered or if force_incomplete is True. Return list of incomplete files. Initialise the DAG. Return True if the underlying rule is to be used for batching the DAG. Return job id of given job. All jobs in the DAG. Perform a breadth-first traversal of the DAG, but also yield the level together with each job. List files in the workdir that are not in the dag. Iterate over all jobs that need to be run and are marked as local. Return whether a temp file that is input of the given job is missing. Return whether a given job needs to be executed. Jobs that need to be executed. Create new job for given rule and (optional) targetfile. This will reuse existing jobs with the same wildcards. Return wildcards that are newly introduced in this job, compared to its ancestors. Return list of files where the current version is newer than the recorded version. Return whether a given job is finished or was not required to run at all. Returns a generator of jobs specified by omitfromjobs. Postprocess the DAG. This has to be invoked after any change to the DAG topology. Return priority of given job. Jobs that are ready to execute. Return the reason of the job execution. Replace given job with new job. Return the files a job requests. Iterate over all input files and replace input functions with a fixed string. Generate a new job from a given rule. Removes jobs downstream of jobs specified by --omit-from. Specialize the given rule by inserting newrule into the DAG. Return the total size of temporary input files of the job. If none, return 0. Move files from shadow directory to real output paths. Returns a generator of jobs specified by untiljobs. Update the DAG by adding given jobs and their dependencies. Update the DAG by adding the given job and its dependencies. Update dependencies of checkpoints. Update the DAG by evaluating the output of the given job that contains dynamic output files. Update the information whether a job needs to be executed. Update the OutputIndex. Update job priorities. Update information whether a job is ready to execute. Given jobs must be needrun jobs! make sure that we always consider items in the same order self.batch is one-based, hence we have to subtract 1 extend the last batch to cover rest of list Since not all input files of a batching rule are considered, we cannot run beyond that rule. For the final batch, we do not need to omit anything. check if remaining jobs are valid commonpath raises error if windows drives are different. First deduplicate based on job.conda_env_file Then based on md5sum values First deduplicate based on job.conda_env_file Ensure that outputs are of the correct type (those flagged with directory() are directories and not files and vice versa). It is possible, due to archive expansion or cluster clock skew, that the files appear older than the input. But we know they must be new, so touch them to update timestamps. This also serves to touch outputs when using the --touch flag. Note that if the input files somehow have a future date then this will not currently be spotted and the job will always be re-run. This won't create normal files if missing, but will create the flag file for directories. Remake absolute symlinks as relative handle temp input temp input temp output If log file was not created during job, create an empty one. handle output files immediately force local mtime to match remote, since conversions from S3 headers are not 100% reliable without this, newness comparisons may fail down the line handle input files TODO what about remote inputs that are used by multiple jobs? TODO this might fail if a rule discarded here is needed elsewhere If possible, obtain inventory information starting from given file and store it in the IOCache. This should provide faster access to existence and mtime information than querying file by file. If the file type does not support inventory information, this call is a no-op. no producing job found file not found, hence missing input file found, no problem delete job from tree For the final batch, ensure that all input files from previous batches are present on disk. delete job from tree TODO find a way to handle added/removed input files here? Update the output mintime of all jobs. We traverse them in BFS (level order) starting from target jobs. Then, we check output mintime of job itself and all direct descendants, which have already been visited in the level before. This way, we achieve a linear runtime. update prior reason for all candidate jobs update len including finished jobs (because they have already increased the job counter) need to cast as list before deleting jobs BFS into depending needrun jobs if in same group Note: never go up here (into depending), because it may contain jobs that have been sorted out due to e.g. ruleorder. merge with previously determined groups if present update assignment span connected components if requested extend the candidate group to all involved jobs generate a random unique group name re-evaluate depending jobs, replace and update DAG This has to be done for each checkpoint, otherwise, jobs may be missing in the end. mark depending jobs as ready skip jobs that are marked as until jobs simulate that this job ran and was finished before We might have new jobs, so we need to ensure that all conda envs and singularity images are set up. this happens e.g. in dryrun if output is not yet present no targetfile needed for job remove from cache if not job_.dynamic_input: use a set to circumvent multiple jobs for the same file if user specified it twice only consider the defined partition of the input files omit the file if it comes from a subworkflow no dependency found stop criterion reached for this node stop criterion reached for this node color rules markup node ids calculate nodes calculate edges NOTE: This is code from the rule_dot method. This method could be split like there as well, however, it cannot easily reuse the _dot method due to the different node type node ids Compute colors for rules NOTE: This is a workaround. It would be more informative to show the code of the input function here (if it is short enough). This cannot be easily done with the inspect module, since the line numbers in the Snakefile do not behave as expected. One (complicated) solution for this would be to find the Snakefile and directly extract the code of the function. Escape html relevant chars like '<' and '>' in filenames These can be added by input functions etc. and cannot be displayed in graphviz HTML nodes. calculate edges remove new line characters, leading and trailing whitespace input files this is an input file that is not created by any job The reason for the second check is that dangling symlinks fail f.exists. Remove non-empty dirs if flagged as temp() Ignore hidden files and don't traverse into hidden dirs
9,218
en
0.88887
#!/usr/bin/env python3 import matplotlib.pyplot as plt import numpy as np pool_forward = __import__('1-pool_forward').pool_forward if __name__ == "__main__": np.random.seed(0) lib = np.load('../data/MNIST.npz') X_train = lib['X_train'] m, h, w = X_train.shape X_train_a = X_train.reshape((-1, h, w, 1)) X_train_b = 1 - X_train_a X_train_c = np.concatenate((X_train_a, X_train_b), axis=3) print(X_train_c.shape) plt.imshow(X_train_c[0, :, :, 0]) plt.show() plt.imshow(X_train_c[0, :, :, 1]) plt.show() A = pool_forward(X_train_c, (2, 2), stride=(2, 2)) print(A.shape) plt.imshow(A[0, :, :, 0]) plt.show() plt.imshow(A[0, :, :, 1]) plt.show()
supervised_learning/0x07-cnn/1-main.py
716
!/usr/bin/env python3
21
fr
0.448822
from starlette.datastructures import URL from dashboard.pagination import PageControl, get_page_controls, get_page_number def test_single_page_does_not_include_any_pagination_controls(): """ When there is only a single page, no pagination controls should render. """ url = URL("/") controls = get_page_controls(url, current_page=1, total_pages=1) assert controls == [] def test_first_page_in_pagination_controls(): """ First page in pagination controls, should render as: Previous [1] 2 3 4 5 Next """ url = URL("/") controls = get_page_controls(url, current_page=1, total_pages=5) assert controls == [ PageControl(text="Previous", is_disabled=True), PageControl(text="1", is_active=True, url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="3", url=URL("/?page=3")), PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5")), PageControl(text="Next", url=URL("/?page=2")), ] def test_second_page_in_pagination_controls(): """ Second page in pagination controls, should render as: Previous 1 [2] 3 4 5 Next """ url = URL("/") controls = get_page_controls(url, current_page=2, total_pages=5) assert controls == [ PageControl(text="Previous", url=URL("/")), # No query parameter needed. PageControl(text="1", url=URL("/")), PageControl(text="2", is_active=True, url=URL("/?page=2")), PageControl(text="3", url=URL("/?page=3")), PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5")), PageControl(text="Next", url=URL("/?page=3")), ] def test_middle_page_in_pagination_controls(): """ Middle page in pagination controls, should render as: Previous 1 2 [3] 4 5 Next """ url = URL("/?page=3") controls = get_page_controls(url, current_page=3, total_pages=5) assert controls == [ PageControl(text="Previous", url=URL("/?page=2")), PageControl(text="1", url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="3", is_active=True, url=URL("/?page=3")), PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5")), PageControl(text="Next", url=URL("/?page=4")), ] def test_last_page_in_pagination_controls(): """ Last page in pagination controls, should render as: Previous 1 2 3 4 [5] Next """ url = URL("/?page=5") controls = get_page_controls(url, current_page=5, total_pages=5) assert controls == [ PageControl(text="Previous", url=URL("/?page=4")), PageControl(text="1", url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="3", url=URL("/?page=3")), PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5"), is_active=True), PageControl(text="Next", is_disabled=True), ] def test_first_page_in_long_pagination_controls(): """ First page in long pagination controls, should render as: Previous [1] 2 3 4 5 ... 49 50 Next """ url = URL("/") controls = get_page_controls(url, current_page=1, total_pages=50) assert controls == [ PageControl(text="Previous", is_disabled=True), PageControl(text="1", is_active=True, url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="3", url=URL("/?page=3")), PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5")), PageControl(text="…", is_disabled=True), PageControl(text="49", url=URL("/?page=49")), PageControl(text="50", url=URL("/?page=50")), PageControl(text="Next", url=URL("/?page=2")), ] def test_last_page_in_long_pagination_controls(): """ Last page in long pagination controls, should render as: Previous 1 2 ... 46 47 48 49 [50] Next """ url = URL("/?page=50") controls = get_page_controls(url, current_page=50, total_pages=50) assert controls == [ PageControl(text="Previous", url=URL("/?page=49")), PageControl(text="1", url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="…", is_disabled=True), PageControl(text="46", url=URL("/?page=46")), PageControl(text="47", url=URL("/?page=47")), PageControl(text="48", url=URL("/?page=48")), PageControl(text="49", url=URL("/?page=49")), PageControl(text="50", is_active=True, url=URL("/?page=50")), PageControl(text="Next", is_disabled=True), ] def test_ellipsis_fill_in(): """ If an ellipsis marker can be replaced with a single page marker, then we should do so. """ url = URL("/?page=6") controls = get_page_controls(url, current_page=6, total_pages=11) assert controls == [ PageControl(text="Previous", url=URL("/?page=5")), PageControl(text="1", url=URL("/")), PageControl(text="2", url=URL("/?page=2")), PageControl(text="3", url=URL("/?page=3")), # Ellipsis fill-in case. PageControl(text="4", url=URL("/?page=4")), PageControl(text="5", url=URL("/?page=5")), PageControl(text="6", url=URL("/?page=6"), is_active=True), PageControl(text="7", url=URL("/?page=7")), PageControl(text="8", url=URL("/?page=8")), PageControl(text="9", url=URL("/?page=9")), # Ellipsis fill-in case. PageControl(text="10", url=URL("/?page=10")), PageControl(text="11", url=URL("/?page=11")), PageControl(text="Next", url=URL("/?page=7")), ] def test_default_page_number(): url = URL("/") page = get_page_number(url=url) assert page == 1 def test_explicit_page_number(): url = URL("/?page=2") page = get_page_number(url=url) assert page == 2 def test_invalid_page_number(): url = URL("/?page=invalid") page = get_page_number(url=url) assert page == 1
tests/test_pagination.py
6,089
If an ellipsis marker can be replaced with a single page marker, then we should do so. First page in long pagination controls, should render as: Previous [1] 2 3 4 5 ... 49 50 Next First page in pagination controls, should render as: Previous [1] 2 3 4 5 Next Last page in long pagination controls, should render as: Previous 1 2 ... 46 47 48 49 [50] Next Last page in pagination controls, should render as: Previous 1 2 3 4 [5] Next Middle page in pagination controls, should render as: Previous 1 2 [3] 4 5 Next Second page in pagination controls, should render as: Previous 1 [2] 3 4 5 Next When there is only a single page, no pagination controls should render. No query parameter needed. Ellipsis fill-in case. Ellipsis fill-in case.
740
en
0.606416
# Generated by Django 2.2 on 2019-04-18 10:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_auto_20190417_2232'), ] operations = [ migrations.AlterField( model_name='question', name='order', field=models.IntegerField(), ), ]
core/migrations/0007_auto_20190418_0646.py
372
Generated by Django 2.2 on 2019-04-18 10:46
43
en
0.480933
# Generated by Django 2.2.1 on 2020-03-26 05:17 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('webapi', '0015_auto_20200326_0955'), ] operations = [ migrations.AlterField( model_name='property', name='property_name', field=models.CharField(default='', max_length=255), ), migrations.AlterField( model_name='unit', name='symbol', field=models.CharField(default='', max_length=255), ), ]
src/webapi/migrations/0016_auto_20200326_1417.py
571
Generated by Django 2.2.1 on 2020-03-26 05:17
45
en
0.572193
# MIT License # # Copyright (c) 2018 Haoxintong # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """""" import os import time import mxnet as mx import numpy as np from gluonfr.loss import ArcLoss from mxnet.gluon.data.vision import MNIST from mxnet import nd, gluon, metric as mtc, autograd as ag from examples.mnist.net.lenet import LeNetPlus from examples.mnist.utils import transform_train, transform_val, plot_result os.environ['MXNET_GLUON_REPO'] = 'https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn/' os.environ['MXNET_ENABLE_GPU_P2P'] = '0' def validate(net, val_data, ctx, loss, plot=False): metric = mtc.Accuracy() val_loss = 0 ebs = [] lbs = [] for i, batch in enumerate(val_data): data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False) labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False) ots = [net(X) for X in data] embedds = [ot[0] for ot in ots] outputs = [ot[1] for ot in ots] losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)] metric.update(labels, outputs) val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses) if plot: for es, ls in zip(embedds, labels): assert len(es) == len(ls) for idx in range(len(es)): ebs.append(es[idx].asnumpy()) lbs.append(ls[idx].asscalar()) if plot: ebs = np.vstack(ebs) lbs = np.hstack(lbs) _, val_acc = metric.get() return val_acc, val_loss / len(val_data), ebs, lbs def train(): epochs = 100 lr = 0.1 lr_steps = [40, 70, np.inf] momentum = 0.9 wd = 5e-4 plot_period = 5 ctx = [mx.gpu(i) for i in range(2)] batch_size = 256 margin_s = 5 margin_m = 0.2 train_set = MNIST(train=True, transform=transform_train) train_data = gluon.data.DataLoader(train_set, batch_size, True, num_workers=4, last_batch='discard') val_set = MNIST(train=False, transform=transform_val) val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4) net = LeNetPlus(embedding_size=64, feature_norm=True, weight_norm=True) net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx) # net.load_parameters("./pretrained_mnist.params", ctx=ctx) net.hybridize() loss = ArcLoss(s=margin_s, m=margin_m, classes=10) train_params = net.collect_params() trainer = gluon.Trainer(train_params, 'sgd', {'learning_rate': lr, 'momentum': momentum, 'wd': wd}) lr_counter = 0 metric = mtc.Accuracy() num_batch = len(train_data) for epoch in range(epochs+1): if epoch == lr_steps[lr_counter]: trainer.set_learning_rate(trainer.learning_rate * 0.1) lr_counter += 1 # if (epoch % plot_period) == 0: # plot = True # else: plot = False train_loss = 0 metric.reset() tic = time.time() ebs = [] lbs = [] for batch in train_data: data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False) labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False) with ag.record(): ots = [net(X) for X in data] embedds = [ot[0] for ot in ots] outputs = [ot[1] for ot in ots] losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)] for l in losses: ag.backward(l) if plot: for es, ls in zip(embedds, labels): assert len(es) == len(ls) for idx in range(len(es)): ebs.append(es[idx].asnumpy()) lbs.append(ls[idx].asscalar()) trainer.step(batch_size) metric.update(labels, outputs) train_loss += sum([l.mean().asscalar() for l in losses]) / len(losses) _, train_acc = metric.get() train_loss /= num_batch val_acc, val_loss, val_ebs, val_lbs = validate(net, val_data, ctx, loss, plot) if plot: ebs = np.vstack(ebs) lbs = np.hstack(lbs) plot_result(ebs, lbs, os.path.join("../../resources", "arcloss-train-epoch{}.png".format(epoch))) plot_result(val_ebs, val_lbs, os.path.join("../../resources", "arcloss-val-epoch{}.png".format(epoch))) toc = time.time() print('[epoch % 3d] train accuracy: %.6f, train loss: %.6f | ' 'val accuracy: %.6f, val loss: %.6f, time: %.6f' % (epoch, train_acc, train_loss, val_acc, val_loss, toc - tic)) # if epoch == 10: # net.save_parameters("./pretrained_mnist.params") # net.save_parameters("./models/attention%d-cifar10-epoch-%d.params" % (args.num_layers, epoch)) if __name__ == '__main__': train()
examples/mnist/train_mnist_arcloss.py
5,996
MIT License Copyright (c) 2018 Haoxintong Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. net.load_parameters("./pretrained_mnist.params", ctx=ctx) if (epoch % plot_period) == 0: plot = True else: if epoch == 10: net.save_parameters("./pretrained_mnist.params") net.save_parameters("./models/attention%d-cifar10-epoch-%d.params" % (args.num_layers, epoch))
1,341
en
0.76587
""" A mechanism for plotting field values along a line through a dataset """ #----------------------------------------------------------------------------- # Copyright (c) 2017, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import numpy as np from collections import defaultdict from yt.funcs import \ iterable, \ mylog from yt.units.unit_object import \ Unit from yt.units.yt_array import \ YTArray from yt.visualization.base_plot_types import \ PlotMPL from yt.visualization.plot_container import \ PlotContainer, \ PlotDictionary, \ log_transform, \ linear_transform, \ invalidate_plot class LineBuffer(object): r""" LineBuffer(ds, start_point, end_point, npoints, label = None) This takes a data source and implements a protocol for generating a 'pixelized', fixed-resolution line buffer. In other words, LineBuffer takes a starting point, ending point, and number of sampling points and can subsequently generate YTArrays of field values along the sample points. Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object holding the data that can be sampled by the LineBuffer start_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the LineBuffer. Must contain n elements where n is the dimensionality of the dataset. end_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the LineBuffer. Must contain n elements where n is the dimensionality of the dataset. npoints : int How many points to sample between start_point and end_point Examples -------- >>> lb = yt.LineBuffer(ds, (.25, 0, 0), (.25, 1, 0), 100) >>> lb[('all', 'u')].max() 0.11562424257143075 dimensionless """ def __init__(self, ds, start_point, end_point, npoints, label=None): self.ds = ds self.start_point = _validate_point(start_point, ds, start=True) self.end_point = _validate_point(end_point, ds) self.npoints = npoints self.label = label self.data = {} def keys(self): return self.data.keys() def __setitem__(self, item, val): self.data[item] = val def __getitem__(self, item): if item in self.data: return self.data[item] mylog.info("Making a line buffer with %d points of %s" % \ (self.npoints, item)) self.points, self.data[item] = self.ds.coordinates.pixelize_line(item, self.start_point, self.end_point, self.npoints) return self.data[item] def __delitem__(self, item): del self.data[item] class LinePlotDictionary(PlotDictionary): def __init__(self, data_source): super(LinePlotDictionary, self).__init__(data_source) self.known_dimensions = {} def _sanitize_dimensions(self, item): field = self.data_source._determine_fields(item)[0] finfo = self.data_source.ds.field_info[field] dimensions = Unit( finfo.units, registry=self.data_source.ds.unit_registry).dimensions if dimensions not in self.known_dimensions: self.known_dimensions[dimensions] = item ret_item = item else: ret_item = self.known_dimensions[dimensions] return ret_item def __getitem__(self, item): ret_item = self._sanitize_dimensions(item) return super(LinePlotDictionary, self).__getitem__(ret_item) def __setitem__(self, item, value): ret_item = self._sanitize_dimensions(item) super(LinePlotDictionary, self).__setitem__(ret_item, value) def __contains__(self, item): ret_item = self._sanitize_dimensions(item) return super(LinePlotDictionary, self).__contains__(ret_item) class LinePlot(PlotContainer): r""" A class for constructing line plots Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object corresponding to the simulation output to be plotted. fields : string / tuple, or list of strings / tuples The name(s) of the field(s) to be plotted. start_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the line. Must contain n elements where n is the dimensionality of the dataset. end_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the line. Must contain n elements where n is the dimensionality of the dataset. npoints : int How many points to sample between start_point and end_point for constructing the line plot figure_size : int or two-element iterable of ints Size in inches of the image. Default: 5 (5x5) fontsize : int Font size for all text in the plot. Default: 14 field_labels : dictionary Keys should be the field names. Values should be latex-formattable strings used in the LinePlot legend Default: None Example ------- >>> import yt >>> >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> >>> plot = yt.LinePlot(ds, 'density', [0, 0, 0], [1, 1, 1], 512) >>> plot.add_legend('density') >>> plot.set_x_unit('cm') >>> plot.set_unit('density', 'kg/cm**3') >>> plot.save() """ _plot_type = 'line_plot' def __init__(self, ds, fields, start_point, end_point, npoints, figure_size=5., fontsize=14., field_labels=None): """ Sets up figure and axes """ line = LineBuffer(ds, start_point, end_point, npoints, label=None) self.lines = [line] self._initialize_instance(self, ds, fields, figure_size, fontsize, field_labels) self._setup_plots() @classmethod def _initialize_instance(cls, obj, ds, fields, figure_size=5., fontsize=14., field_labels=None): obj._x_unit = None obj._y_units = {} obj._titles = {} data_source = ds.all_data() obj.fields = data_source._determine_fields(fields) obj.plots = LinePlotDictionary(data_source) obj.include_legend = defaultdict(bool) super(LinePlot, obj).__init__(data_source, figure_size, fontsize) for f in obj.fields: finfo = obj.data_source.ds._get_field_info(*f) if finfo.take_log: obj._field_transform[f] = log_transform else: obj._field_transform[f] = linear_transform if field_labels is None: obj.field_labels = {} else: obj.field_labels = field_labels for f in obj.fields: if f not in obj.field_labels: obj.field_labels[f] = f[1] @classmethod def from_lines(cls, ds, fields, lines, figure_size=5., font_size=14., field_labels=None): """ A class method for constructing a line plot from multiple sampling lines Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object corresponding to the simulation output to be plotted. fields : field name or list of field names The name(s) of the field(s) to be plotted. lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances The lines from which to sample data figure_size : int or two-element iterable of ints Size in inches of the image. Default: 5 (5x5) fontsize : int Font size for all text in the plot. Default: 14 field_labels : dictionary Keys should be the field names. Values should be latex-formattable strings used in the LinePlot legend Default: None Example -------- >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1) >>> fields = [field for field in ds.field_list if field[0] == 'all'] >>> lines = [] >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25')) >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5')) >>> plot = yt.LinePlot.from_lines(ds, fields, lines) >>> plot.save() """ obj = cls.__new__(cls) obj.lines = lines cls._initialize_instance(obj, ds, fields, figure_size, font_size, field_labels) obj._setup_plots() return obj def _get_plot_instance(self, field): fontscale = self._font_properties._size / 14. top_buff_size = 0.35*fontscale x_axis_size = 1.35*fontscale y_axis_size = 0.7*fontscale right_buff_size = 0.2*fontscale if iterable(self.figure_size): figure_size = self.figure_size else: figure_size = (self.figure_size, self.figure_size) xbins = np.array([x_axis_size, figure_size[0], right_buff_size]) ybins = np.array([y_axis_size, figure_size[1], top_buff_size]) size = [xbins.sum(), ybins.sum()] x_frac_widths = xbins/size[0] y_frac_widths = ybins/size[1] axrect = ( x_frac_widths[0], y_frac_widths[0], x_frac_widths[1], y_frac_widths[1], ) try: plot = self.plots[field] except KeyError: plot = PlotMPL(self.figure_size, axrect, None, None) self.plots[field] = plot return plot def _setup_plots(self): if self._plot_valid: return for plot in self.plots.values(): plot.axes.cla() for line in self.lines: dimensions_counter = defaultdict(int) for field in self.fields: finfo = self.ds.field_info[field] dimensions = Unit(finfo.units, registry=self.ds.unit_registry).dimensions dimensions_counter[dimensions] += 1 for field in self.fields: # get plot instance plot = self._get_plot_instance(field) # calculate x and y x, y = self.ds.coordinates.pixelize_line( field, line.start_point, line.end_point, line.npoints) # scale x and y to proper units if self._x_unit is None: unit_x = x.units else: unit_x = self._x_unit if field in self._y_units: unit_y = self._y_units[field] else: unit_y = y.units x = x.to(unit_x) y = y.to(unit_y) # determine legend label str_seq = [] str_seq.append(line.label) str_seq.append(self.field_labels[field]) delim = "; " legend_label = delim.join(filter(None, str_seq)) # apply plot to matplotlib axes plot.axes.plot(x, y, label=legend_label) # apply log transforms if requested if self._field_transform[field] != linear_transform: if (y < 0).any(): plot.axes.set_yscale('symlog') else: plot.axes.set_yscale('log') # set font properties plot._set_font_properties(self._font_properties, None) # set x and y axis labels axes_unit_labels = self._get_axes_unit_labels(unit_x, unit_y) if self._xlabel is not None: x_label = self._xlabel else: x_label = r'$\rm{Path\ Length' + axes_unit_labels[0]+'}$' if self._ylabel is not None: y_label = self._ylabel else: finfo = self.ds.field_info[field] dimensions = Unit(finfo.units, registry=self.ds.unit_registry).dimensions if dimensions_counter[dimensions] > 1: y_label = (r'$\rm{Multiple\ Fields}$' + r'$\rm{' + axes_unit_labels[1]+'}$') else: y_label = (finfo.get_latex_display_name() + r'$\rm{' + axes_unit_labels[1]+'}$') plot.axes.set_xlabel(x_label) plot.axes.set_ylabel(y_label) # apply title if field in self._titles: plot.axes.set_title(self._titles[field]) # apply legend dim_field = self.plots._sanitize_dimensions(field) if self.include_legend[dim_field]: plot.axes.legend() self._plot_valid = True @invalidate_plot def annotate_legend(self, field): """ Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions` call ensures that a legend label will be added for every field of a multi-field plot """ dim_field = self.plots._sanitize_dimensions(field) self.include_legend[dim_field] = True @invalidate_plot def set_x_unit(self, unit_name): """Set the unit to use along the x-axis Parameters ---------- unit_name: str The name of the unit to use for the x-axis unit """ self._x_unit = unit_name @invalidate_plot def set_unit(self, field, unit_name): """Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for unit_name: str The name of the unit to use for this field """ self._y_units[self.data_source._determine_fields(field)[0]] = unit_name @invalidate_plot def annotate_title(self, field, title): """Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for title: str The title to use for the plot """ self._titles[self.data_source._determine_fields(field)[0]] = title def _validate_point(point, ds, start=False): if not iterable(point): raise RuntimeError( "Input point must be array-like" ) if not isinstance(point, YTArray): point = ds.arr(point, 'code_length') if len(point.shape) != 1: raise RuntimeError( "Input point must be a 1D array" ) if point.shape[0] < ds.dimensionality: raise RuntimeError( "Input point must have an element for each dimension" ) # need to pad to 3D elements to avoid issues later if point.shape[0] < 3: if start: val = 0 else: val = 1 point = np.append(point.d, [val]*(3-ds.dimensionality))*point.uq return point
yt/visualization/line_plot.py
15,757
LineBuffer(ds, start_point, end_point, npoints, label = None) This takes a data source and implements a protocol for generating a 'pixelized', fixed-resolution line buffer. In other words, LineBuffer takes a starting point, ending point, and number of sampling points and can subsequently generate YTArrays of field values along the sample points. Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object holding the data that can be sampled by the LineBuffer start_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the LineBuffer. Must contain n elements where n is the dimensionality of the dataset. end_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the LineBuffer. Must contain n elements where n is the dimensionality of the dataset. npoints : int How many points to sample between start_point and end_point Examples -------- >>> lb = yt.LineBuffer(ds, (.25, 0, 0), (.25, 1, 0), 100) >>> lb[('all', 'u')].max() 0.11562424257143075 dimensionless A class for constructing line plots Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object corresponding to the simulation output to be plotted. fields : string / tuple, or list of strings / tuples The name(s) of the field(s) to be plotted. start_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the line. Must contain n elements where n is the dimensionality of the dataset. end_point : n-element list, tuple, ndarray, or YTArray Contains the coordinates of the first point for constructing the line. Must contain n elements where n is the dimensionality of the dataset. npoints : int How many points to sample between start_point and end_point for constructing the line plot figure_size : int or two-element iterable of ints Size in inches of the image. Default: 5 (5x5) fontsize : int Font size for all text in the plot. Default: 14 field_labels : dictionary Keys should be the field names. Values should be latex-formattable strings used in the LinePlot legend Default: None Example ------- >>> import yt >>> >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> >>> plot = yt.LinePlot(ds, 'density', [0, 0, 0], [1, 1, 1], 512) >>> plot.add_legend('density') >>> plot.set_x_unit('cm') >>> plot.set_unit('density', 'kg/cm**3') >>> plot.save() Sets up figure and axes Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions` call ensures that a legend label will be added for every field of a multi-field plot Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for title: str The title to use for the plot A class method for constructing a line plot from multiple sampling lines Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object corresponding to the simulation output to be plotted. fields : field name or list of field names The name(s) of the field(s) to be plotted. lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances The lines from which to sample data figure_size : int or two-element iterable of ints Size in inches of the image. Default: 5 (5x5) fontsize : int Font size for all text in the plot. Default: 14 field_labels : dictionary Keys should be the field names. Values should be latex-formattable strings used in the LinePlot legend Default: None Example -------- >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1) >>> fields = [field for field in ds.field_list if field[0] == 'all'] >>> lines = [] >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25')) >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5')) >>> plot = yt.LinePlot.from_lines(ds, fields, lines) >>> plot.save() Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for unit_name: str The name of the unit to use for this field Set the unit to use along the x-axis Parameters ---------- unit_name: str The name of the unit to use for the x-axis unit A mechanism for plotting field values along a line through a dataset ----------------------------------------------------------------------------- Copyright (c) 2017, yt Development Team. Distributed under the terms of the Modified BSD License. The full license is in the file COPYING.txt, distributed with this software.----------------------------------------------------------------------------- get plot instance calculate x and y scale x and y to proper units determine legend label apply plot to matplotlib axes apply log transforms if requested set font properties set x and y axis labels apply title apply legend need to pad to 3D elements to avoid issues later
5,121
en
0.599818
import pandas as pd import numpy as np import scipy.io import dpsimpy class Reader: def __init__(self, mpc_file_path, mpc_name = 'mpc'): # read input file (returns multidimensional dict) self.mpc_raw = scipy.io.loadmat(mpc_file_path) self.mpc_name = mpc_name def process_mpc(self): version_idx = 0 base_pow_idx = 1 bus_data_idx = 2 gen_data_idx = 3 branch_data_idx = 4 # gencost_data_idx= 5 # Process raw mpc data and create corresponding dataframes # Version self.mpc_version = self.mpc_raw[self.mpc_name][0][0][version_idx] # System frequency (not included in mpc but needed for setting dpsimpy component parameters i.e inductances, capacitances ..) self.mpc_freq = 50 self.mpc_omega = 2*np.pi*50 # Base power (MVA) self.mpc_base_power_MVA = self.mpc_raw[self.mpc_name][0][0][base_pow_idx][0][0] #### Busses mpc_bus_raw = self.mpc_raw[self.mpc_name][0][0][bus_data_idx] bus_data_header = ["bus_i", "type", "Pd", "Qd", "Gs", "Bs", "area", "Vm", "Va", "baseKV", "zone", "Vmax", "Vmin"] self.mpc_bus_data = pd.DataFrame(mpc_bus_raw, columns = bus_data_header) # scipy.io.loadmat loads all matrix entries as double. Convert specific columns back to int self.mpc_bus_data['bus_i'] = self.mpc_bus_data['bus_i'].astype(int) self.mpc_bus_data['type'] = self.mpc_bus_data['type'].astype(int) self.mpc_bus_data['area'] = self.mpc_bus_data['area'].astype(int) self.mpc_bus_data['zone'] = self.mpc_bus_data['zone'].astype(int) #### Generators mpc_gen_raw = self.mpc_raw[self.mpc_name][0][0][gen_data_idx] gen_data_header = ["bus", "Pg", "Qg", "Qmax", "Qmin", "Vg", "mBase", "status", "Pmax", "Pmin", "Pc1", "Pc2", "Qc1min", "Qc1max", "Qc2min", "Qc2max", "ramp_agc", "ramp_10", "ramp_30", "ramp_q", "apf"] self.mpc_gen_data = pd.DataFrame(mpc_gen_raw, columns = gen_data_header) self.mpc_gen_data['bus'] = self.mpc_gen_data['bus'].astype(int) self.mpc_gen_data['status'] = self.mpc_gen_data['status'].astype(int) #### Branches # extract only first 13 columns since following columns include results mpc_branch_raw = self.mpc_raw[self.mpc_name][0][0][branch_data_idx][:, :13] branch_data_header = ["fbus", "tbus", "r", "x", "b", "rateA", "rateB", "rateC", "ratio", "angle", "status", "angmin", "angmax"] self.mpc_branch_data = pd.DataFrame(mpc_branch_raw, columns = branch_data_header) self.mpc_branch_data['fbus'] = self.mpc_branch_data['fbus'].astype(int) self.mpc_branch_data['tbus'] = self.mpc_branch_data['tbus'].astype(int) self.mpc_branch_data['status'] = self.mpc_branch_data['status'].astype(int) #### TODO Generator costs def create_dpsim_objects(self): self.process_mpc() # return values: nodes and components dpsimpy_busses_dict = {} dpsimpy_comp_dict = {} # default multiplier for matpower data mw_w = 1e6 kv_v = 1e3 # Nodes bus = 0 load = 0 generator = 0 inj = 0 for index, bus in self.mpc_bus_data.iterrows(): # create dpsimpy busses bus = bus + 1 bus_index = str(self.mpc_bus_data.at[index,'bus_i']) bus_name = bus_index dpsimpy_busses_dict[bus_name] = dpsimpy.sp.SimNode(bus_name, dpsimpy.PhaseType.Single) # for each bus type create corresponding dpsimpy component # 1 = PQ, 2 = PV, 3 = ref, 4 = isolated bus_type = self.mpc_bus_data.at[index,'type'] # Loads if bus_type == 1: load = load + 1 load_name = "load%s" %load load_p = self.mpc_bus_data.at[index,'Pd'] * mw_w load_q = self.mpc_bus_data.at[index,'Qd'] * mw_w load_baseV = self.mpc_bus_data.at[index,'baseKV'] * kv_v dpsimpy_comp_dict[load_name] = [dpsimpy.sp.ph1.Load(load_name, dpsimpy.LogLevel.info)] dpsimpy_comp_dict[load_name][0].set_parameters(load_p, load_q, load_baseV) dpsimpy_comp_dict[load_name][0].modify_power_flow_bus_type(dpsimpy.PowerflowBusType.PQ) # add connections dpsimpy_comp_dict[load_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus] # Generators elif bus_type == 2: generator = generator + 1 gen_name = "gen%s" %generator # relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data gen = self.mpc_gen_data.loc[self.mpc_gen_data['bus'] == self.mpc_bus_data.at[index,'bus_i']] gen_baseS = gen['mBase']*mw_w # gen base MVA default is mpc.baseMVA gen_baseV = self.mpc_bus_data.at[index,'baseKV']*kv_v # gen base kV gen_v = gen['Vg']*gen_baseV # gen set point voltage (gen['Vg'] in p.u.) gen_p = gen['Pg']*mw_w # gen ini. active power (gen['Pg'] in MVA) # gen_q = gen['Qg']*mw_w # gen ini. reactive power (gen['Qg'] in MVAr) gen_nom_s = abs(complex(gen['Pmax'], gen['Qmax'])) # gen nominal power (set default to mpc.baseMVA ? ) dpsimpy_comp_dict[gen_name] = [dpsimpy.sp.ph1.SynchronGenerator(gen_name, dpsimpy.LogLevel.info)] dpsimpy_comp_dict[gen_name][0].set_parameters(gen_nom_s, gen_baseV, gen_p, gen_v, dpsimpy.PowerflowBusType.PV) dpsimpy_comp_dict[gen_name][0].set_base_voltage(gen_baseV) # add connections dpsimpy_comp_dict[gen_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus] # Network injection (slack bus) elif bus_type == 3: inj = inj + 1 extnet_name = "extnet%s" %inj # relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data extnet = self.mpc_gen_data.loc[self.mpc_gen_data['bus'] == self.mpc_bus_data.at[index,'bus_i']] # extnet_baseS= extnet['mBase']*mw_w # default is mpc.baseMVA extnet_baseV = self.mpc_bus_data.at[index,'baseKV']*kv_v extnet_v = extnet['Vg']*extnet_baseV dpsimpy_comp_dict[extnet_name] = [dpsimpy.sp.ph1.NetworkInjection(extnet_name, dpsimpy.LogLevel.info)] dpsimpy_comp_dict[extnet_name][0].set_parameters(extnet_v) dpsimpy_comp_dict[extnet_name][0].set_base_voltage(extnet_baseV) dpsimpy_comp_dict[extnet_name][0].modify_power_flow_bus_type(dpsimpy.PowerflowBusType.VD) # add connections dpsimpy_comp_dict[extnet_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus] #isolated elif bus_type == 4: print("isolated bus type") else: print("bus type error") ### branches #### line = 0 trafo = 0 for index, branch in self.mpc_branch_data.iterrows(): branch_ratio = self.mpc_branch_data.at[index,'ratio'] # Lines if branch_ratio == 0: line = line + 1 line_name = "line%s_%s-%s" %(line, self.mpc_branch_data.at[index,'fbus'] , self.mpc_branch_data.at[index,'tbus']) line_fbus = self.mpc_branch_data.at[index,'fbus'] line_tbus = self.mpc_branch_data.at[index,'tbus'] tmp_fbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == line_fbus] tmp_tbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == line_tbus] line_fbus_baseV = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'baseKV']*kv_v line_tbus_baseV = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'baseKV']*kv_v line_baseZ = line_tbus_baseV*line_tbus_baseV / (self.mpc_base_power_MVA*mw_w) line_r = self.mpc_branch_data.at[index,'r'] * line_baseZ line_x = self.mpc_branch_data.at[index,'x'] * line_baseZ line_b = self.mpc_branch_data.at[index,'b'] / line_baseZ line_l = line_x / self.mpc_omega line_c = line_b / self.mpc_omega line_g = 0 # line conductance is not included in mpc dpsimpy_comp_dict[line_name] = [dpsimpy.sp.ph1.PiLine(line_name, dpsimpy.LogLevel.info)] dpsimpy_comp_dict[line_name][0].set_parameters(line_r, line_l, line_c, line_g) dpsimpy_comp_dict[line_name][0].set_base_voltage(line_tbus_baseV) # add connections dpsimpy_comp_dict[line_name].append([dpsimpy_busses_dict[str(line_fbus)], dpsimpy_busses_dict[str(line_tbus)]]) # Transformers else: trafo = trafo + 1 transf_name = "transformer%s_%s-%s" %(trafo, self.mpc_branch_data.at[index,'fbus'] , self.mpc_branch_data.at[index,'tbus']) transf_s = self.mpc_branch_data.at[index,'rateA']*mw_w # Matpower: Used to specify branch flow limits. By default these are limits on apparent power with units in MV transf_fbus = self.mpc_branch_data.at[index,'fbus'] transf_tbus = self.mpc_branch_data.at[index,'tbus'] tmp_fbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == transf_fbus] tmp_tbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == transf_tbus] transf_fbus_baseV = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'baseKV']*kv_v transf_tbus_baseV = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'baseKV']*kv_v transf_primary_v = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'Vm']*transf_fbus_baseV transf_secondary_v = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'Vm']*transf_tbus_baseV transf_offNom_ratio = self.mpc_branch_data.at[index,'ratio'] transf_primary_v = transf_primary_v/ transf_offNom_ratio transf_ratio = transf_primary_v / transf_secondary_v transf_baseZ = transf_tbus_baseV*transf_tbus_baseV / (self.mpc_base_power_MVA*mw_w) transf_r = self.mpc_branch_data.at[index,'r']* transf_baseZ transf_x = self.mpc_branch_data.at[index,'x']* transf_baseZ transf_l = transf_x / self.mpc_omega dpsimpy_comp_dict[transf_name] = [dpsimpy.sp.ph1.Transformer(transf_name, dpsimpy.LogLevel.info)] dpsimpy_comp_dict[transf_name][0].set_parameters(transf_primary_v, transf_secondary_v, np.abs(transf_ratio), np.angle(transf_ratio), transf_r, transf_l) dpsimpy_comp_dict[transf_name][0].set_base_voltage(transf_tbus_baseV) print(transf_primary_v, transf_secondary_v, np.abs(transf_ratio), np.angle(transf_ratio), transf_r, transf_l) print(transf_tbus_baseV) # add connections dpsimpy_comp_dict[transf_name].append([dpsimpy_busses_dict[str(transf_fbus)], dpsimpy_busses_dict[str(transf_tbus)]]) return dpsimpy_busses_dict, dpsimpy_comp_dict def load_mpc(self): dpsimpy_busses_dict, dpsimpy_comp_dict = self.create_dpsim_objects() system_comp = [] system_nodes = [] for key, value in dpsimpy_comp_dict.items(): dpsimpy_component = value[0] connection_nodes = value[1] dpsimpy_component.connect(connection_nodes) system_comp.append(dpsimpy_component) for n in connection_nodes: if n in system_nodes: continue else: system_nodes.append(n) system = dpsimpy.SystemTopology(self.mpc_freq, system_nodes, system_comp) return system
python/src/dpsim/matpower.py
12,214
read input file (returns multidimensional dict) gencost_data_idx= 5 Process raw mpc data and create corresponding dataframes Version System frequency (not included in mpc but needed for setting dpsimpy component parameters i.e inductances, capacitances ..) Base power (MVA) Busses scipy.io.loadmat loads all matrix entries as double. Convert specific columns back to int Generators Branches extract only first 13 columns since following columns include results TODO Generator costs return values: nodes and components default multiplier for matpower data Nodes create dpsimpy busses for each bus type create corresponding dpsimpy component 1 = PQ, 2 = PV, 3 = ref, 4 = isolated Loads add connections [to bus] Generators relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data gen base MVA default is mpc.baseMVA gen base kV gen set point voltage (gen['Vg'] in p.u.) gen ini. active power (gen['Pg'] in MVA) gen_q = gen['Qg']*mw_w gen ini. reactive power (gen['Qg'] in MVAr) gen nominal power (set default to mpc.baseMVA ? ) add connections [to bus] Network injection (slack bus) relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data extnet_baseS= extnet['mBase']*mw_w default is mpc.baseMVA add connections [to bus]isolated branches Lines line conductance is not included in mpc add connections Transformers Matpower: Used to specify branch flow limits. By default these are limits on apparent power with units in MV add connections
1,552
en
0.671577
from __future__ import print_function import contextlib import imp import os import shutil import subprocess import sys import tempfile from unittest import skip from ctypes import * import numpy as np try: import setuptools except ImportError: setuptools = None import llvmlite.binding as ll from numba import unittest_support as unittest from numba.pycc import main from numba.pycc.decorators import clear_export_registry from numba.pycc.platform import find_shared_ending, find_pyext_ending from numba.pycc.platform import _external_compiler_ok # if suitable compilers are not present then skip. _skip_reason = 'AOT compatible compilers missing' _skip_missing_compilers = unittest.skipIf(not _external_compiler_ok, _skip_reason) from .matmul_usecase import has_blas from .support import TestCase, tag, import_dynamic, temp_directory base_path = os.path.dirname(os.path.abspath(__file__)) def unset_macosx_deployment_target(): """Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable libraries """ if 'MACOSX_DEPLOYMENT_TARGET' in os.environ: del os.environ['MACOSX_DEPLOYMENT_TARGET'] class BasePYCCTest(TestCase): def setUp(self): unset_macosx_deployment_target() self.tmpdir = temp_directory('test_pycc') # Make sure temporary files and directories created by # distutils don't clutter the top-level /tmp tempfile.tempdir = self.tmpdir def tearDown(self): tempfile.tempdir = None # Since we're executing the module-under-test several times # from the same process, we must clear the exports registry # between invocations. clear_export_registry() @contextlib.contextmanager def check_c_ext(self, extdir, name): sys.path.append(extdir) try: lib = import_dynamic(name) yield lib finally: sys.path.remove(extdir) sys.modules.pop(name, None) @_skip_missing_compilers class TestLegacyAPI(BasePYCCTest): def test_pycc_ctypes_lib(self): """ Test creating a C shared library object using pycc. """ source = os.path.join(base_path, 'compile_with_pycc.py') cdll_modulename = 'test_dll_legacy' + find_shared_ending() cdll_path = os.path.join(self.tmpdir, cdll_modulename) if os.path.exists(cdll_path): os.unlink(cdll_path) main(args=['--debug', '-o', cdll_path, source]) lib = CDLL(cdll_path) lib.mult.argtypes = [POINTER(c_double), c_void_p, c_double, c_double] lib.mult.restype = c_int lib.multf.argtypes = [POINTER(c_float), c_void_p, c_float, c_float] lib.multf.restype = c_int res = c_double() lib.mult(byref(res), None, 123, 321) self.assertEqual(res.value, 123 * 321) res = c_float() lib.multf(byref(res), None, 987, 321) self.assertEqual(res.value, 987 * 321) def test_pycc_pymodule(self): """ Test creating a CPython extension module using pycc. """ self.skipTest("lack of environment can make the extension crash") source = os.path.join(base_path, 'compile_with_pycc.py') modulename = 'test_pyext_legacy' out_modulename = os.path.join(self.tmpdir, modulename + find_pyext_ending()) if os.path.exists(out_modulename): os.unlink(out_modulename) main(args=['--debug', '--python', '-o', out_modulename, source]) with self.check_c_ext(self.tmpdir, modulename) as lib: res = lib.multi(123, 321) self.assertPreciseEqual(res, 123 * 321) res = lib.multf(987, 321) self.assertPreciseEqual(res, 987.0 * 321.0) def test_pycc_bitcode(self): """ Test creating a LLVM bitcode file using pycc. """ modulename = os.path.join(base_path, 'compile_with_pycc') bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc') if os.path.exists(bitcode_modulename): os.unlink(bitcode_modulename) main(args=['--debug', '--llvm', '-o', bitcode_modulename, modulename + '.py']) # Sanity check bitcode file contents with open(bitcode_modulename, "rb") as f: bc = f.read() bitcode_wrapper_magic = b'\xde\xc0\x17\x0b' bitcode_magic = b'BC\xc0\xde' self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc) @_skip_missing_compilers class TestCC(BasePYCCTest): def setUp(self): super(TestCC, self).setUp() from . import compile_with_pycc self._test_module = compile_with_pycc imp.reload(self._test_module) @contextlib.contextmanager def check_cc_compiled(self, cc): #cc.verbose = True cc.output_dir = self.tmpdir cc.compile() with self.check_c_ext(self.tmpdir, cc.name) as lib: yield lib def check_cc_compiled_in_subprocess(self, lib, code): prolog = """if 1: import sys sys.path.insert(0, %(path)r) import %(name)s as lib """ % {'name': lib.__name__, 'path': os.path.dirname(lib.__file__)} code = prolog.strip(' ') + code subprocess.check_call([sys.executable, '-c', code]) def test_cc_properties(self): cc = self._test_module.cc self.assertEqual(cc.name, 'pycc_test_simple') # Inferred output directory d = self._test_module.cc.output_dir self.assertTrue(os.path.isdir(d), d) # Inferred output filename f = self._test_module.cc.output_file self.assertFalse(os.path.exists(f), f) self.assertTrue(os.path.basename(f).startswith('pycc_test_simple.'), f) if sys.platform.startswith('linux'): self.assertTrue(f.endswith('.so'), f) if sys.version_info >= (3,): self.assertIn('.cpython', f) def test_compile(self): with self.check_cc_compiled(self._test_module.cc) as lib: res = lib.multi(123, 321) self.assertPreciseEqual(res, 123 * 321) res = lib.multf(987, 321) self.assertPreciseEqual(res, 987.0 * 321.0) res = lib.square(5) self.assertPreciseEqual(res, 25) self.assertIs(lib.get_none(), None) with self.assertRaises(ZeroDivisionError): lib.div(1, 0) def check_compile_for_cpu(self, cpu_name): cc = self._test_module.cc cc.target_cpu = cpu_name with self.check_cc_compiled(cc) as lib: res = lib.multi(123, 321) self.assertPreciseEqual(res, 123 * 321) self.assertEqual(lib.multi.__module__, 'pycc_test_simple') def test_compile_for_cpu(self): # Compiling for the host CPU should always succeed self.check_compile_for_cpu(ll.get_host_cpu_name()) def test_compile_for_cpu_host(self): # Compiling for the host CPU should always succeed self.check_compile_for_cpu("host") @tag('important') def test_compile_helperlib(self): with self.check_cc_compiled(self._test_module.cc_helperlib) as lib: res = lib.power(2, 7) self.assertPreciseEqual(res, 128) for val in (-1, -1 + 0j, np.complex128(-1)): res = lib.sqrt(val) self.assertPreciseEqual(res, 1j) for val in (4, 4.0, np.float64(4)): res = lib.np_sqrt(val) self.assertPreciseEqual(res, 2.0) res = lib.spacing(1.0) self.assertPreciseEqual(res, 2**-52) # Implicit seeding at startup should guarantee a non-pathological # start state. self.assertNotEqual(lib.random(-1), lib.random(-1)) res = lib.random(42) expected = np.random.RandomState(42).random_sample() self.assertPreciseEqual(res, expected) res = lib.size(np.float64([0] * 3)) self.assertPreciseEqual(res, 3) code = """if 1: from numpy.testing import assert_equal, assert_allclose res = lib.power(2, 7) assert res == 128 res = lib.random(42) assert_allclose(res, %(expected)s) res = lib.spacing(1.0) assert_allclose(res, 2**-52) """ % {'expected': expected} self.check_cc_compiled_in_subprocess(lib, code) @tag('important') def test_compile_nrt(self): with self.check_cc_compiled(self._test_module.cc_nrt) as lib: # Sanity check self.assertPreciseEqual(lib.zero_scalar(1), 0.0) res = lib.zeros(3) self.assertEqual(list(res), [0, 0, 0]) if has_blas: res = lib.vector_dot(4) self.assertPreciseEqual(res, 30.0) code = """if 1: res = lib.zero_scalar(1) assert res == 0.0 res = lib.zeros(3) assert list(res) == [0, 0, 0] if %(has_blas)s: res = lib.vector_dot(4) assert res == 30.0 """ % dict(has_blas=has_blas) self.check_cc_compiled_in_subprocess(lib, code) @_skip_missing_compilers class TestDistutilsSupport(TestCase): def setUp(self): unset_macosx_deployment_target() # Copy the test project into a temp directory to avoid # keeping any build leftovers in the source tree self.tmpdir = temp_directory('test_pycc_distutils') source_dir = os.path.join(base_path, 'pycc_distutils_usecase') self.usecase_dir = os.path.join(self.tmpdir, 'work') shutil.copytree(source_dir, self.usecase_dir) def check_setup_py(self, setup_py_file): # Compute PYTHONPATH to ensure the child processes see this Numba import numba numba_path = os.path.abspath(os.path.dirname( os.path.dirname(numba.__file__))) env = dict(os.environ) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH'] else: env['PYTHONPATH'] = numba_path def run_python(args): p = subprocess.Popen([sys.executable] + args, cwd=self.usecase_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) out, _ = p.communicate() rc = p.wait() if rc != 0: self.fail("python failed with the following output:\n%s" % out.decode('utf-8', 'ignore')) run_python([setup_py_file, "build_ext", "--inplace"]) code = """if 1: import pycc_compiled_module as lib assert lib.get_const() == 42 res = lib.ones(3) assert list(res) == [1.0, 1.0, 1.0] """ run_python(["-c", code]) def test_setup_py_distutils(self): if sys.version_info < (3,) and sys.platform == "win32": # See e.g. https://stackoverflow.com/questions/28931875/problems-finding-vcvarsall-bat-when-using-distutils self.skipTest("must use setuptools to build extensions for Python 2") self.check_setup_py("setup_distutils.py") @unittest.skipIf(setuptools is None, "test needs setuptools") def test_setup_py_setuptools(self): self.check_setup_py("setup_setuptools.py") if __name__ == "__main__": unittest.main()
numba/tests/test_pycc.py
11,860
Test creating a LLVM bitcode file using pycc. Test creating a C shared library object using pycc. Test creating a CPython extension module using pycc. Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable libraries if suitable compilers are not present then skip. Make sure temporary files and directories created by distutils don't clutter the top-level /tmp Since we're executing the module-under-test several times from the same process, we must clear the exports registry between invocations. Sanity check bitcode file contentscc.verbose = True Inferred output directory Inferred output filename Compiling for the host CPU should always succeed Compiling for the host CPU should always succeed Implicit seeding at startup should guarantee a non-pathological start state. Sanity check Copy the test project into a temp directory to avoid keeping any build leftovers in the source tree Compute PYTHONPATH to ensure the child processes see this Numba See e.g. https://stackoverflow.com/questions/28931875/problems-finding-vcvarsall-bat-when-using-distutils
1,073
en
0.819663
from typing import List from flake8_functions_names.custom_types import FuncdefInfo from flake8_functions_names.utils.imports import is_module_installed from flake8_functions_names.words import VERBS, PURE_VERBS, BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES def validate_returns_bool_if_names_said_so(funcdef: FuncdefInfo) -> List[str]: if funcdef.is_name_looks_like_question and funcdef.return_type != 'bool': return [ f'FNE001 Name of the function says, that is should ' f'return bool, but it returns {funcdef.return_type}', ] return [] def validate_has_property_and_no_verbs(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007 if funcdef.has_property_decorator and any(w in VERBS for w in funcdef.name_words): verbs = [w for w in funcdef.name_words if w in VERBS] return [ f'FNE002 The method has a @property decorator, ' f"but has a verb in it's name ({', '.join(verbs)})", ] return [] def validate_save_to(funcdef: FuncdefInfo) -> List[str]: if 'save' in funcdef.name_words and 'to' not in funcdef.name_words: return [ 'FNE003 Name of the function uses "save", but not uses "to"', ] return [] def validate_load_from(funcdef: FuncdefInfo) -> List[str]: if 'load' in funcdef.name_words and 'from' not in funcdef.name_words: return [ 'FNE004 ame of the function uses "load", but not uses "from"', ] return [] def validate_returns_bool_and_name_shows_it(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007 if ( funcdef.return_type == 'bool' and not funcdef.is_name_looks_like_question and not funcdef.is_buildin_dundner_method_that_returns_bool ): return [ "FNE005 Return type of the function is bool, but the name doesn't show it", ] return [] def validate_names_says_its_pure_and_its_pure( # noqa: CFQ003, FNE007 funcdef: FuncdefInfo, ) -> List[str]: if ( is_module_installed('deal') and not funcdef.has_deal_pure_decorator and any(w in PURE_VERBS for w in funcdef.name_words) ): return [ 'FNE006 Name of function says, that it works with data, ' 'so it should be pure, but it has no @deal.pure()', ] return [] def validate_no_blacklisted_words_in_name(funcdef: FuncdefInfo) -> List[str]: blacklisted_words = [w for w in funcdef.name_words if w in BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES] if blacklisted_words: return [ f'FNE007 "{blacklisted_words[0]}" is not recommended in functions names', ] return [] def validate_name_not_endswith_first_argument_name(funcdef: FuncdefInfo) -> List[str]: if funcdef.arguments_names and funcdef.name.endswith(f'_{funcdef.arguments_names[0]}'): return [ "FNE008 Name of functions ends with it's first argument name", ] return []
flake8_functions_names/validators.py
2,971
noqa: FNE007 noqa: FNE007 noqa: CFQ003, FNE007
46
te
0.235659
#!/usr/bin/python import cgi import cgitb import json import parse_enumeration cgitb.enable() form = cgi.FieldStorage() # Get data from fields callback = form.getvalue('callback') email = form.getvalue('email') if (email is None): email = "<ul><li>hello, world!</li></ul>" print "Content-type: application/json" print response = parse_enumeration.parse_enumerations(email) d = json.JSONEncoder().encode((response)) if (callback): print callback+'(' + d + ');' else: print d
src/process-request.py
494
!/usr/bin/python Get data from fields
37
en
0.499878
#Punto 10 cambiar datos lista=[] datos=(input("cantidad de datos: ")) for i in range (0,datos): alt=float(input("ingrese alturas: ")) lista.append(alt) print("la altura maxima es ", max(lista)) ################################## lista=[] numero=int(input("numero 1 para agregar una altura y numero 2 para buscar el numero mas grande")) n=0 while True: if(numero==1): n=float(input("altura")) numero=int(input("numero 1 para agregar una altura y numero 2 para buscar el numero mas grande")) lista.append(n) elif(numero==2): print("la mayor altura:", max(lista)) break
Taller de Estrucuras de Control Repeticion/Punto10.py
632
Punto 10 cambiar datos
22
es
0.888637
# Generated by Django 2.2.13 on 2020-09-16 14:47 # Third-party from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("licenses", "0004_auto_20200902_1302"), ] operations = [ migrations.AlterUniqueTogether( name="translatedlicensename", unique_together=None, ), migrations.RemoveField( model_name="translatedlicensename", name="license", ), migrations.DeleteModel( name="LicenseLogo", ), migrations.DeleteModel( name="TranslatedLicenseName", ), ]
licenses/migrations/0005_auto_20200916_1047.py
647
Generated by Django 2.2.13 on 2020-09-16 14:47 Third-party
58
en
0.762247
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class AgentPoolsOperations: """AgentPoolsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.containerregistry.v2019_06_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def get( self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any ) -> "_models.AgentPool": """Gets the detailed information for a given agent pool. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPool, or the result of cls(response) :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def _create_initial( self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: "_models.AgentPool", **kwargs: Any ) -> "_models.AgentPool": cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(agent_pool, 'AgentPool') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('AgentPool', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def begin_create( self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: "_models.AgentPool", **kwargs: Any ) -> AsyncLROPoller["_models.AgentPool"]: """Creates an agent pool for a container registry with the specified parameters. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :param agent_pool: The parameters of an agent pool that needs to scheduled. :type agent_pool: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_initial( resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, agent_pool=agent_pool, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def begin_delete( self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes a specified agent pool resource. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def _update_initial( self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: "_models.AgentPoolUpdateParameters", **kwargs: Any ) -> "_models.AgentPool": cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(update_parameters, 'AgentPoolUpdateParameters') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('AgentPool', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore async def begin_update( self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: "_models.AgentPoolUpdateParameters", **kwargs: Any ) -> AsyncLROPoller["_models.AgentPool"]: """Updates an agent pool with the specified parameters. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :param update_parameters: The parameters for updating an agent pool. :type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolUpdateParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, update_parameters=update_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}'} # type: ignore def list( self, resource_group_name: str, registry_name: str, **kwargs: Any ) -> AsyncIterable["_models.AgentPoolListResult"]: """Lists all the agent pools for a specified container registry. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AgentPoolListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('AgentPoolListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools'} # type: ignore async def get_queue_status( self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any ) -> "_models.AgentPoolQueueStatus": """Gets the count of queued runs for a given agent pool. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPoolQueueStatus, or the result of cls(response) :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolQueueStatus :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolQueueStatus"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01-preview" accept = "application/json" # Construct URL url = self.get_queue_status.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str', max_length=20, min_length=3, pattern=r'^[a-zA-Z0-9-]*$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPoolQueueStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_queue_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/agentPools/{agentPoolName}/listQueueStatus'} # type: ignore
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
33,712
AgentPoolsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.containerregistry.v2019_06_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. Lists all the agent pools for a specified container registry. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AgentPoolListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult] :raises: ~azure.core.exceptions.HttpResponseError coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- type: ClsType["_models.AgentPool"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: ClsType["_models.AgentPool"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.AgentPool"] type: Optional[str] type: ignore type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType[None] type: Optional[str] type: ignore type: ClsType["_models.AgentPool"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.AgentPool"] type: Optional[str] type: ignore type: ClsType["_models.AgentPoolListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.AgentPoolQueueStatus"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore
2,925
en
0.44389
import dis import unittest from test.support.bytecode_helper import BytecodeTestCase def count_instr_recursively(f, opname): count = 0 for instr in dis.get_instructions(f): if instr.opname == opname: count += 1 if hasattr(f, '__code__'): f = f.__code__ for c in f.co_consts: if hasattr(c, 'co_code'): count += count_instr_recursively(c, opname) return count class TestTranforms(BytecodeTestCase): def check_jump_targets(self, code): instructions = list(dis.get_instructions(code)) targets = {instr.offset: instr for instr in instructions} for instr in instructions: if 'JUMP_' not in instr.opname: continue tgt = targets[instr.argval] # jump to unconditional jump if tgt.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD'): self.fail(f'{instr.opname} at {instr.offset} ' f'jumps to {tgt.opname} at {tgt.offset}') # unconditional jump to RETURN_VALUE if (instr.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD') and tgt.opname == 'RETURN_VALUE'): self.fail(f'{instr.opname} at {instr.offset} ' f'jumps to {tgt.opname} at {tgt.offset}') # JUMP_IF_*_OR_POP jump to conditional jump if '_OR_POP' in instr.opname and 'JUMP_IF_' in tgt.opname: self.fail(f'{instr.opname} at {instr.offset} ' f'jumps to {tgt.opname} at {tgt.offset}') def check_lnotab(self, code): "Check that the lnotab byte offsets are sensible." code = dis._get_code_object(code) lnotab = list(dis.findlinestarts(code)) # Don't bother checking if the line info is sensible, because # most of the line info we can get at comes from lnotab. min_bytecode = min(t[0] for t in lnotab) max_bytecode = max(t[0] for t in lnotab) self.assertGreaterEqual(min_bytecode, 0) self.assertLess(max_bytecode, len(code.co_code)) # This could conceivably test more (and probably should, as there # aren't very many tests of lnotab), if peepholer wasn't scheduled # to be replaced anyway. def test_unot(self): # UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE' def unot(x): if not x == 2: del x self.assertNotInBytecode(unot, 'UNARY_NOT') self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE') self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE') self.check_lnotab(unot) def test_elim_inversion_of_is_or_in(self): for line, cmp_op, invert in ( ('not a is b', 'IS_OP', 1,), ('not a is not b', 'IS_OP', 0,), ('not a in b', 'CONTAINS_OP', 1,), ('not a not in b', 'CONTAINS_OP', 0,), ): code = compile(line, '', 'single') self.assertInBytecode(code, cmp_op, invert) self.check_lnotab(code) def test_global_as_constant(self): # LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False def f(): x = None x = None return x def g(): x = True return x def h(): x = False return x for func, elem in ((f, None), (g, True), (h, False)): self.assertNotInBytecode(func, 'LOAD_GLOBAL') self.assertInBytecode(func, 'LOAD_CONST', elem) self.check_lnotab(func) def f(): 'Adding a docstring made this test fail in Py2.5.0' return None self.assertNotInBytecode(f, 'LOAD_GLOBAL') self.assertInBytecode(f, 'LOAD_CONST', None) self.check_lnotab(f) def test_while_one(self): # Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx def f(): while 1: pass return list for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'): self.assertNotInBytecode(f, elem) for elem in ('JUMP_ABSOLUTE',): self.assertInBytecode(f, elem) self.check_lnotab(f) def test_pack_unpack(self): for line, elem in ( ('a, = a,', 'LOAD_CONST',), ('a, b = a, b', 'ROT_TWO',), ('a, b, c = a, b, c', 'ROT_THREE',), ): code = compile(line,'','single') self.assertInBytecode(code, elem) self.assertNotInBytecode(code, 'BUILD_TUPLE') self.assertNotInBytecode(code, 'UNPACK_TUPLE') self.check_lnotab(code) def test_folding_of_tuples_of_constants(self): for line, elem in ( ('a = 1,2,3', (1, 2, 3)), ('("a","b","c")', ('a', 'b', 'c')), ('a,b,c = 1,2,3', (1, 2, 3)), ('(None, 1, None)', (None, 1, None)), ('((1, 2), 3, 4)', ((1, 2), 3, 4)), ): code = compile(line,'','single') self.assertInBytecode(code, 'LOAD_CONST', elem) self.assertNotInBytecode(code, 'BUILD_TUPLE') self.check_lnotab(code) # Long tuples should be folded too. code = compile(repr(tuple(range(10000))),'','single') self.assertNotInBytecode(code, 'BUILD_TUPLE') # One LOAD_CONST for the tuple, one for the None return value load_consts = [instr for instr in dis.get_instructions(code) if instr.opname == 'LOAD_CONST'] self.assertEqual(len(load_consts), 2) self.check_lnotab(code) # Bug 1053819: Tuple of constants misidentified when presented with: # . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . . # The following would segfault upon compilation def crater(): (~[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ],) self.check_lnotab(crater) def test_folding_of_lists_of_constants(self): for line, elem in ( # in/not in constants with BUILD_LIST should be folded to a tuple: ('a in [1,2,3]', (1, 2, 3)), ('a not in ["a","b","c"]', ('a', 'b', 'c')), ('a in [None, 1, None]', (None, 1, None)), ('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)), ): code = compile(line, '', 'single') self.assertInBytecode(code, 'LOAD_CONST', elem) self.assertNotInBytecode(code, 'BUILD_LIST') self.check_lnotab(code) def test_folding_of_sets_of_constants(self): for line, elem in ( # in/not in constants with BUILD_SET should be folded to a frozenset: ('a in {1,2,3}', frozenset({1, 2, 3})), ('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})), ('a in {None, 1, None}', frozenset({1, None})), ('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})), ('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})), ): code = compile(line, '', 'single') self.assertNotInBytecode(code, 'BUILD_SET') self.assertInBytecode(code, 'LOAD_CONST', elem) self.check_lnotab(code) # Ensure that the resulting code actually works: def f(a): return a in {1, 2, 3} def g(a): return a not in {1, 2, 3} self.assertTrue(f(3)) self.assertTrue(not f(4)) self.check_lnotab(f) self.assertTrue(not g(3)) self.assertTrue(g(4)) self.check_lnotab(g) def test_folding_of_binops_on_constants(self): for line, elem in ( ('a = 2+3+4', 9), # chained fold ('"@"*4', '@@@@'), # check string ops ('a="abc" + "def"', 'abcdef'), # check string ops ('a = 3**4', 81), # binary power ('a = 3*4', 12), # binary multiply ('a = 13//4', 3), # binary floor divide ('a = 14%4', 2), # binary modulo ('a = 2+3', 5), # binary add ('a = 13-4', 9), # binary subtract ('a = (12,13)[1]', 13), # binary subscr ('a = 13 << 2', 52), # binary lshift ('a = 13 >> 2', 3), # binary rshift ('a = 13 & 7', 5), # binary and ('a = 13 ^ 7', 10), # binary xor ('a = 13 | 7', 15), # binary or ): code = compile(line, '', 'single') self.assertInBytecode(code, 'LOAD_CONST', elem) for instr in dis.get_instructions(code): self.assertFalse(instr.opname.startswith('BINARY_')) self.check_lnotab(code) # Verify that unfoldables are skipped code = compile('a=2+"b"', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', 2) self.assertInBytecode(code, 'LOAD_CONST', 'b') self.check_lnotab(code) # Verify that large sequences do not result from folding code = compile('a="x"*10000', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', 10000) self.assertNotIn("x"*10000, code.co_consts) self.check_lnotab(code) code = compile('a=1<<1000', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', 1000) self.assertNotIn(1<<1000, code.co_consts) self.check_lnotab(code) code = compile('a=2**1000', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', 1000) self.assertNotIn(2**1000, code.co_consts) self.check_lnotab(code) def test_binary_subscr_on_unicode(self): # valid code get optimized code = compile('"foo"[0]', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', 'f') self.assertNotInBytecode(code, 'BINARY_SUBSCR') self.check_lnotab(code) code = compile('"\u0061\uffff"[1]', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', '\uffff') self.assertNotInBytecode(code,'BINARY_SUBSCR') self.check_lnotab(code) # With PEP 393, non-BMP char get optimized code = compile('"\U00012345"[0]', '', 'single') self.assertInBytecode(code, 'LOAD_CONST', '\U00012345') self.assertNotInBytecode(code, 'BINARY_SUBSCR') self.check_lnotab(code) # invalid code doesn't get optimized # out of range code = compile('"fuu"[10]', '', 'single') self.assertInBytecode(code, 'BINARY_SUBSCR') self.check_lnotab(code) def test_folding_of_unaryops_on_constants(self): for line, elem in ( ('-0.5', -0.5), # unary negative ('-0.0', -0.0), # -0.0 ('-(1.0-1.0)', -0.0), # -0.0 after folding ('-0', 0), # -0 ('~-2', 1), # unary invert ('+1', 1), # unary positive ): code = compile(line, '', 'single') self.assertInBytecode(code, 'LOAD_CONST', elem) for instr in dis.get_instructions(code): self.assertFalse(instr.opname.startswith('UNARY_')) self.check_lnotab(code) # Check that -0.0 works after marshaling def negzero(): return -(1.0-1.0) for instr in dis.get_instructions(negzero): self.assertFalse(instr.opname.startswith('UNARY_')) self.check_lnotab(negzero) # Verify that unfoldables are skipped for line, elem, opname in ( ('-"abc"', 'abc', 'UNARY_NEGATIVE'), ('~"abc"', 'abc', 'UNARY_INVERT'), ): code = compile(line, '', 'single') self.assertInBytecode(code, 'LOAD_CONST', elem) self.assertInBytecode(code, opname) self.check_lnotab(code) def test_elim_extra_return(self): # RETURN LOAD_CONST None RETURN --> RETURN def f(x): return x self.assertNotInBytecode(f, 'LOAD_CONST', None) returns = [instr for instr in dis.get_instructions(f) if instr.opname == 'RETURN_VALUE'] self.assertEqual(len(returns), 1) self.check_lnotab(f) def test_elim_jump_to_return(self): # JUMP_FORWARD to RETURN --> RETURN def f(cond, true_value, false_value): # Intentionally use two-line expression to test issue37213. return (true_value if cond else false_value) self.check_jump_targets(f) self.assertNotInBytecode(f, 'JUMP_FORWARD') self.assertNotInBytecode(f, 'JUMP_ABSOLUTE') returns = [instr for instr in dis.get_instructions(f) if instr.opname == 'RETURN_VALUE'] self.assertEqual(len(returns), 2) self.check_lnotab(f) def test_elim_jump_to_uncond_jump(self): # POP_JUMP_IF_FALSE to JUMP_FORWARD --> POP_JUMP_IF_FALSE to non-jump def f(): if a: # Intentionally use two-line expression to test issue37213. if (c or d): foo() else: baz() self.check_jump_targets(f) self.check_lnotab(f) def test_elim_jump_to_uncond_jump2(self): # POP_JUMP_IF_FALSE to JUMP_ABSOLUTE --> POP_JUMP_IF_FALSE to non-jump def f(): while a: # Intentionally use two-line expression to test issue37213. if (c or d): a = foo() self.check_jump_targets(f) self.check_lnotab(f) def test_elim_jump_to_uncond_jump3(self): # Intentionally use two-line expressions to test issue37213. # JUMP_IF_FALSE_OR_POP to JUMP_IF_FALSE_OR_POP --> JUMP_IF_FALSE_OR_POP to non-jump def f(a, b, c): return ((a and b) and c) self.check_jump_targets(f) self.check_lnotab(f) self.assertEqual(count_instr_recursively(f, 'JUMP_IF_FALSE_OR_POP'), 2) # JUMP_IF_TRUE_OR_POP to JUMP_IF_TRUE_OR_POP --> JUMP_IF_TRUE_OR_POP to non-jump def f(a, b, c): return ((a or b) or c) self.check_jump_targets(f) self.check_lnotab(f) self.assertEqual(count_instr_recursively(f, 'JUMP_IF_TRUE_OR_POP'), 2) # JUMP_IF_FALSE_OR_POP to JUMP_IF_TRUE_OR_POP --> POP_JUMP_IF_FALSE to non-jump def f(a, b, c): return ((a and b) or c) self.check_jump_targets(f) self.check_lnotab(f) self.assertNotInBytecode(f, 'JUMP_IF_FALSE_OR_POP') self.assertInBytecode(f, 'JUMP_IF_TRUE_OR_POP') self.assertInBytecode(f, 'POP_JUMP_IF_FALSE') # JUMP_IF_TRUE_OR_POP to JUMP_IF_FALSE_OR_POP --> POP_JUMP_IF_TRUE to non-jump def f(a, b, c): return ((a or b) and c) self.check_jump_targets(f) self.check_lnotab(f) self.assertNotInBytecode(f, 'JUMP_IF_TRUE_OR_POP') self.assertInBytecode(f, 'JUMP_IF_FALSE_OR_POP') self.assertInBytecode(f, 'POP_JUMP_IF_TRUE') def test_elim_jump_after_return1(self): # Eliminate dead code: jumps immediately after returns can't be reached def f(cond1, cond2): if cond1: return 1 if cond2: return 2 while 1: return 3 while 1: if cond1: return 4 return 5 return 6 self.assertNotInBytecode(f, 'JUMP_FORWARD') self.assertNotInBytecode(f, 'JUMP_ABSOLUTE') returns = [instr for instr in dis.get_instructions(f) if instr.opname == 'RETURN_VALUE'] self.assertLessEqual(len(returns), 6) self.check_lnotab(f) def test_elim_jump_after_return2(self): # Eliminate dead code: jumps immediately after returns can't be reached def f(cond1, cond2): while 1: if cond1: return 4 self.assertNotInBytecode(f, 'JUMP_FORWARD') # There should be one jump for the while loop. returns = [instr for instr in dis.get_instructions(f) if instr.opname == 'JUMP_ABSOLUTE'] self.assertEqual(len(returns), 1) returns = [instr for instr in dis.get_instructions(f) if instr.opname == 'RETURN_VALUE'] self.assertLessEqual(len(returns), 2) self.check_lnotab(f) def test_make_function_doesnt_bail(self): def f(): def g()->1+1: pass return g self.assertNotInBytecode(f, 'BINARY_ADD') self.check_lnotab(f) def test_constant_folding(self): # Issue #11244: aggressive constant folding. exprs = [ '3 * -5', '-3 * 5', '2 * (3 * 4)', '(2 * 3) * 4', '(-1, 2, 3)', '(1, -2, 3)', '(1, 2, -3)', '(1, 2, -3) * 6', 'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}', ] for e in exprs: code = compile(e, '', 'single') for instr in dis.get_instructions(code): self.assertFalse(instr.opname.startswith('UNARY_')) self.assertFalse(instr.opname.startswith('BINARY_')) self.assertFalse(instr.opname.startswith('BUILD_')) self.check_lnotab(code) def test_in_literal_list(self): def containtest(): return x in [a, b] self.assertEqual(count_instr_recursively(containtest, 'BUILD_LIST'), 0) self.check_lnotab(containtest) def test_iterate_literal_list(self): def forloop(): for x in [a, b]: pass self.assertEqual(count_instr_recursively(forloop, 'BUILD_LIST'), 0) self.check_lnotab(forloop) def test_condition_with_binop_with_bools(self): def f(): if True or False: return 1 return 0 self.assertEqual(f(), 1) self.check_lnotab(f) def test_if_with_if_expression(self): # Check bpo-37289 def f(x): if (True if x else False): return True return False self.assertTrue(f(True)) self.check_lnotab(f) def test_trailing_nops(self): # Check the lnotab of a function that even after trivial # optimization has trailing nops, which the lnotab adjustment has to # handle properly (bpo-38115). def f(x): while 1: return 3 while 1: return 5 return 6 self.check_lnotab(f) def test_assignment_idiom_in_comprehensions(self): def listcomp(): return [y for x in a for y in [f(x)]] self.assertEqual(count_instr_recursively(listcomp, 'FOR_ITER'), 1) def setcomp(): return {y for x in a for y in [f(x)]} self.assertEqual(count_instr_recursively(setcomp, 'FOR_ITER'), 1) def dictcomp(): return {y: y for x in a for y in [f(x)]} self.assertEqual(count_instr_recursively(dictcomp, 'FOR_ITER'), 1) def genexpr(): return (y for x in a for y in [f(x)]) self.assertEqual(count_instr_recursively(genexpr, 'FOR_ITER'), 1) class TestBuglets(unittest.TestCase): def test_bug_11510(self): # folded constant set optimization was commingled with the tuple # unpacking optimization which would fail if the set had duplicate # elements so that the set length was unexpected def f(): x, y = {1, 1} return x, y with self.assertRaises(ValueError): f() if __name__ == "__main__": unittest.main()
www/src/Lib/test/test_peepholer.py
20,531
Check that the lnotab byte offsets are sensible. Adding a docstring made this test fail in Py2.5.0 jump to unconditional jump unconditional jump to RETURN_VALUE JUMP_IF_*_OR_POP jump to conditional jump Don't bother checking if the line info is sensible, because most of the line info we can get at comes from lnotab. This could conceivably test more (and probably should, as there aren't very many tests of lnotab), if peepholer wasn't scheduled to be replaced anyway. UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE' LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx Long tuples should be folded too. One LOAD_CONST for the tuple, one for the None return value Bug 1053819: Tuple of constants misidentified when presented with: . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . . The following would segfault upon compilation in/not in constants with BUILD_LIST should be folded to a tuple: in/not in constants with BUILD_SET should be folded to a frozenset: Ensure that the resulting code actually works: chained fold check string ops check string ops binary power binary multiply binary floor divide binary modulo binary add binary subtract binary subscr binary lshift binary rshift binary and binary xor binary or Verify that unfoldables are skipped Verify that large sequences do not result from folding valid code get optimized With PEP 393, non-BMP char get optimized invalid code doesn't get optimized out of range unary negative -0.0 -0.0 after folding -0 unary invert unary positive Check that -0.0 works after marshaling Verify that unfoldables are skipped RETURN LOAD_CONST None RETURN --> RETURN JUMP_FORWARD to RETURN --> RETURN Intentionally use two-line expression to test issue37213. POP_JUMP_IF_FALSE to JUMP_FORWARD --> POP_JUMP_IF_FALSE to non-jump Intentionally use two-line expression to test issue37213. POP_JUMP_IF_FALSE to JUMP_ABSOLUTE --> POP_JUMP_IF_FALSE to non-jump Intentionally use two-line expression to test issue37213. Intentionally use two-line expressions to test issue37213. JUMP_IF_FALSE_OR_POP to JUMP_IF_FALSE_OR_POP --> JUMP_IF_FALSE_OR_POP to non-jump JUMP_IF_TRUE_OR_POP to JUMP_IF_TRUE_OR_POP --> JUMP_IF_TRUE_OR_POP to non-jump JUMP_IF_FALSE_OR_POP to JUMP_IF_TRUE_OR_POP --> POP_JUMP_IF_FALSE to non-jump JUMP_IF_TRUE_OR_POP to JUMP_IF_FALSE_OR_POP --> POP_JUMP_IF_TRUE to non-jump Eliminate dead code: jumps immediately after returns can't be reached Eliminate dead code: jumps immediately after returns can't be reached There should be one jump for the while loop. Issue 11244: aggressive constant folding. Check bpo-37289 Check the lnotab of a function that even after trivial optimization has trailing nops, which the lnotab adjustment has to handle properly (bpo-38115). folded constant set optimization was commingled with the tuple unpacking optimization which would fail if the set had duplicate elements so that the set length was unexpected
2,985
en
0.852215
import sys import os import shutil import zipfile ''' Author: Benny Megidish Description: This program extracts all the drawing, image and 3D design files out of an 123dx file Arguments naming conventions is used like in java (camelCase) ''' numOfFileExtracted = 0 def _extract3d(zipFileDir, destDirectory, outputFileName): ''' a wrapper function for the recursive file extraction function ''' with zipfile.ZipFile(zipFileDir) as zipFile: _extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName) def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0): ''' extracts all the illustations and models from the 123dx file recursively ''' imageExtList = ['.jpg', '.png'] fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl'] for member in fileList: if os.path.isdir(member): # traverse zip _extract3dRecursively(os.listdir(member), baseZipFile, destDirectory, outputFileName) else: fileExt = os.path.splitext(member)[1] fileName = os.path.splitext(os.path.basename(member))[0] # extract only drawing images and 3D files if fileExt in (fusionExtList + imageExtList): fullFileName = ''.join([outputFileName, "_", fileName, fileExt]) # find unique file name while os.path.exists(os.path.join(destDirectory, fullFileName)): fileName += "#" fullFileName = ''.join([outputFileName, "_", fileName, fileExt]) # copy file (taken from zipfile's extract) source = baseZipFile.open(member) target = open(os.path.join(destDirectory, fullFileName), "wb") # was file() / test for exceptions with source, target: shutil.copyfileobj(source, target) numOfFileExtracted += 1 def _execute(srcDirectory, destDirectory, filename): ''' converts the file into fusion 360 file (this file might be usable in other CAD software as well) ''' outputFileName = os.path.splitext(os.path.basename(filename))[0] newFileName = outputFileName + '.zip' oldFilePath = os.path.join(srcDirectory, filename) newFilePath = os.path.join(srcDirectory, newFileName) # covert to zip os.rename(oldFilePath, newFilePath) # extract files print('Extracting %s' % oldFilePath) _extract3d(newFilePath, destDirectory, outputFileName) # covert back to 123dx os.rename(newFilePath, oldFilePath) # delete zip # os.remove(newFilePath) def convert(filepath=None): args = sys.argv usage = 'USAGE: %s [123D FILE PATH OR DIRECTORY]' % args[0] directory = os.path.dirname(os.path.realpath(__file__)) succeeded = False # define working directory and file path if filepath: directory = os.path.dirname(filepath) elif len(args) == 2: directory = os.path.dirname(args[1]) filepath = args[1] else: print(usage) print('Using current directory..') extractDirectory = os.path.join(directory, '3DFiles') # ensure all the variables defined correctly if os.path.isdir(directory) or (filepath and filepath.endswith(".123dx")): # create output dir if needed if not os.path.exists(extractDirectory): os.makedirs(extractDirectory) else: print(usage) # exit(-1) # incase we are running as a script, exit it return False # start the convertion process if filepath and filepath.endswith(".123dx"): # single file if os.path.exists(filepath): _execute(directory, extractDirectory, filepath) succeeded = True else: print('Failed, %s does not exist' % filepath) elif os.path.isdir(directory): # directory for filename in os.listdir(directory): if filename.endswith(".123dx"): _execute(directory, extractDirectory, filename) succeeded = True if not succeeded: print('Failed, could not found *.123dx file in %s' % directory) if succeeded: print('Succeeded, you can find you model files inside the 3DFiles folder') return succeeded if __name__ == '__main__': convert()
fusion123/converter.py
4,428
converts the file into fusion 360 file (this file might be usable in other CAD software as well) a wrapper function for the recursive file extraction function extracts all the illustations and models from the 123dx file recursively traverse zip extract only drawing images and 3D files find unique file name copy file (taken from zipfile's extract) was file() / test for exceptions covert to zip extract files covert back to 123dx delete zip os.remove(newFilePath) define working directory and file path ensure all the variables defined correctly create output dir if needed exit(-1) incase we are running as a script, exit it start the convertion process single file directory
690
en
0.842755
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def sumOfLeftLeaves(self, root): """ :type root: TreeNode :rtype: int """ while not root: return 0 if root.left and not root.left.left and not root.left.right: return root.left.val + self.sumOfLeftLeaves(root.right) return self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right)
Python/404sum_of_left_leaves.py
553
:type root: TreeNode :rtype: int Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None
184
en
0.522728
# Copyright (c) 2010 Chris Moyer http://coredumped.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import cgi from boto.compat import six, StringIO class ResponseGroup(xml.sax.ContentHandler): """A Generic "Response Group", which can be anything from the entire list of Items to specific response elements within an item""" def __init__(self, connection=None, nodename=None): """Initialize this Item""" self._connection = connection self._nodename = nodename self._nodepath = [] self._curobj = None self._xml = StringIO() def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.__dict__) # # Attribute Functions # def get(self, name): return self.__dict__.get(name) def set(self, name, value): self.__dict__[name] = value def to_xml(self): return "<%s>%s</%s>" % (self._nodename, self._xml.getvalue(), self._nodename) # # XML Parser functions # def startElement(self, name, attrs, connection): self._xml.write("<%s>" % name) self._nodepath.append(name) if len(self._nodepath) == 1: obj = ResponseGroup(self._connection) self.set(name, obj) self._curobj = obj elif self._curobj: self._curobj.startElement(name, attrs, connection) return None def endElement(self, name, value, connection): self._xml.write("%s</%s>" % (cgi.escape(value).replace("&amp;amp;", "&amp;"), name)) if len(self._nodepath) == 0: return obj = None curval = self.get(name) if len(self._nodepath) == 1: if value or not curval: self.set(name, value) if self._curobj: self._curobj = None #elif len(self._nodepath) == 2: #self._curobj = None elif self._curobj: self._curobj.endElement(name, value, connection) self._nodepath.pop() return None class Item(ResponseGroup): """A single Item""" def __init__(self, connection=None): """Initialize this Item""" ResponseGroup.__init__(self, connection, "Item") class ItemSet(ResponseGroup): """A special ResponseGroup that has built-in paging, and only creates new Items on the "Item" tag""" def __init__(self, connection, action, params, page=0): ResponseGroup.__init__(self, connection, "Items") self.objs = [] self.iter = None self.page = page self.action = action self.params = params self.curItem = None self.total_results = 0 self.total_pages = 0 self.is_valid = False self.errors = [] def startElement(self, name, attrs, connection): if name == "Item": self.curItem = Item(self._connection) elif self.curItem is not None: self.curItem.startElement(name, attrs, connection) return None def endElement(self, name, value, connection): if name == 'TotalResults': self.total_results = value elif name == 'TotalPages': self.total_pages = value elif name == 'IsValid': if value == 'True': self.is_valid = True elif name == 'Code': self.errors.append({'Code': value, 'Message': None}) elif name == 'Message': self.errors[-1]['Message'] = value elif name == 'Item': self.objs.append(self.curItem) self._xml.write(self.curItem.to_xml()) self.curItem = None elif self.curItem is not None: self.curItem.endElement(name, value, connection) return None def __next__(self): """Special paging functionality""" if self.iter is None: self.iter = iter(self.objs) try: return next(self.iter) except StopIteration: self.iter = None self.objs = [] if int(self.page) < int(self.total_pages): self.page += 1 self._connection.get_response(self.action, self.params, self.page, self) return next(self) else: raise next = __next__ def __iter__(self): return self def to_xml(self): """Override to first fetch everything""" for item in self: pass return ResponseGroup.to_xml(self)
desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py
5,510
A single Item A special ResponseGroup that has built-in paging, and only creates new Items on the "Item" tag A Generic "Response Group", which can be anything from the entire list of Items to specific response elements within an item Initialize this Item Initialize this Item Special paging functionality Override to first fetch everything Copyright (c) 2010 Chris Moyer http://coredumped.org/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the fol- lowing conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Attribute Functions XML Parser functionselif len(self._nodepath) == 2:self._curobj = None
1,490
en
0.841497
# -*- coding: utf-8 -*- """DNACenterAPI topology API fixtures and tests. Copyright (c) 2019 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import pytest from tests.environment import DNA_CENTER_VERSION from tests.models.schema_validator import json_schema_validate pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.1', reason='version does not match') def is_valid_get_vlan_details(obj): json_schema_validate('jsd_6284db4649aa8d31_v1_3_1').validate(obj) return True def get_vlan_details(api): endpoint_result = api.topology.get_vlan_details( ) return endpoint_result @pytest.mark.topology def test_get_vlan_details(api): assert is_valid_get_vlan_details( get_vlan_details(api) ) def get_vlan_details_default(api): endpoint_result = api.topology.get_vlan_details( ) return endpoint_result @pytest.mark.topology def test_get_vlan_details_default(api): try: assert is_valid_get_vlan_details( get_vlan_details_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e def is_valid_get_site_topology(obj): json_schema_validate('jsd_9ba14a9e441b8a60_v1_3_1').validate(obj) return True def get_site_topology(api): endpoint_result = api.topology.get_site_topology( ) return endpoint_result @pytest.mark.topology def test_get_site_topology(api): assert is_valid_get_site_topology( get_site_topology(api) ) def get_site_topology_default(api): endpoint_result = api.topology.get_site_topology( ) return endpoint_result @pytest.mark.topology def test_get_site_topology_default(api): try: assert is_valid_get_site_topology( get_site_topology_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e def is_valid_get_physical_topology(obj): json_schema_validate('jsd_b2b8cb91459aa58f_v1_3_1').validate(obj) return True def get_physical_topology(api): endpoint_result = api.topology.get_physical_topology( node_type='string' ) return endpoint_result @pytest.mark.topology def test_get_physical_topology(api): assert is_valid_get_physical_topology( get_physical_topology(api) ) def get_physical_topology_default(api): endpoint_result = api.topology.get_physical_topology( node_type=None ) return endpoint_result @pytest.mark.topology def test_get_physical_topology_default(api): try: assert is_valid_get_physical_topology( get_physical_topology_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e def is_valid_get_topology_details(obj): json_schema_validate('jsd_b9b48ac8463a8aba_v1_3_1').validate(obj) return True def get_topology_details(api): endpoint_result = api.topology.get_topology_details( vlan_id='string' ) return endpoint_result @pytest.mark.topology def test_get_topology_details(api): assert is_valid_get_topology_details( get_topology_details(api) ) def get_topology_details_default(api): endpoint_result = api.topology.get_topology_details( vlan_id='string' ) return endpoint_result @pytest.mark.topology def test_get_topology_details_default(api): try: assert is_valid_get_topology_details( get_topology_details_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e def is_valid_get_l3_topology_details(obj): json_schema_validate('jsd_c2b5fb764d888375_v1_3_1').validate(obj) return True def get_l3_topology_details(api): endpoint_result = api.topology.get_l3_topology_details( topology_type='string' ) return endpoint_result @pytest.mark.topology def test_get_l3_topology_details(api): assert is_valid_get_l3_topology_details( get_l3_topology_details(api) ) def get_l3_topology_details_default(api): endpoint_result = api.topology.get_l3_topology_details( topology_type='string' ) return endpoint_result @pytest.mark.topology def test_get_l3_topology_details_default(api): try: assert is_valid_get_l3_topology_details( get_l3_topology_details_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e def is_valid_get_overall_network_health(obj): json_schema_validate('jsd_ca91da84401abba1_v1_3_1').validate(obj) return True def get_overall_network_health(api): endpoint_result = api.topology.get_overall_network_health( timestamp=0 ) return endpoint_result @pytest.mark.topology def test_get_overall_network_health(api): assert is_valid_get_overall_network_health( get_overall_network_health(api) ) def get_overall_network_health_default(api): endpoint_result = api.topology.get_overall_network_health( timestamp=None ) return endpoint_result @pytest.mark.topology def test_get_overall_network_health_default(api): try: assert is_valid_get_overall_network_health( get_overall_network_health_default(api) ) except Exception as original_e: with pytest.raises(TypeError, match="but instead we received None"): raise original_e
tests/api/v1_3_1/test_topology.py
6,686
DNACenterAPI topology API fixtures and tests. Copyright (c) 2019 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*- coding: utf-8 -*-
1,142
en
0.879781
# Copyright (c) 2020 Huawei Technologies Co., Ltd # Copyright (c) 2019, Facebook CORPORATION. # All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import numpy as np import sys from common_utils import TestCase, run_tests from common_device_type import dtypes, instantiate_device_type_tests from util_test import create_common_tensor class TestTril(TestCase): def test_tril(self, device): dtype_list = [np.float32, np.float16] format_list = [0, 3, 4] shape_list = [[5, 5],[4, 5, 6]] diagonal_list = [-1, 0, 1] shape_format = [ [i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list ] for item in shape_format: cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100) cpu_output = self.cpu_op_exec(cpu_input, item[-1]) npu_output = self.npu_op_exec(npu_input, item[-1]) self.assertRtolEqual(cpu_output, npu_output) def test_tril_inplace(self, device): dtype_list = [np.float32, np.float16] format_list = [0, 3, 4] shape_list = [[5, 5], [4, 5, 6]] diagonal_list = [-1, 0, 1] shape_format = [ [i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list ] for item in shape_format: cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100) cpu_output = self.cpu_op_inplace_exec(cpu_input, item[-1]) npu_output = self.npu_op_inplace_exec(npu_input, item[-1]) self.assertRtolEqual(cpu_output, npu_output) def cpu_op_exec(self, input, diagonal=0): output = torch.tril(input, diagonal) output = output.numpy() return output def npu_op_exec(self, input, diagonal=0): output = torch.tril(input, diagonal) output = output.to("cpu") output = output.numpy() return output def cpu_op_inplace_exec(self, input, diagonal=0): output = input.tril_(diagonal) output = output.numpy() return output def npu_op_inplace_exec(self, input, diagonal=0): output = input.tril_(diagonal) output = output.to("cpu") output = output.numpy() return output instantiate_device_type_tests(TestTril, globals(), except_for="cpu") if __name__ == "__main__": run_tests()
test/test_npu/test_network_ops/test_tril.py
2,923
Copyright (c) 2020 Huawei Technologies Co., Ltd Copyright (c) 2019, Facebook CORPORATION. All rights reserved. Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/BSD-3-Clause Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
625
en
0.873972
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrest.polling import LROPoller, NoPolling from msrestazure.polling.arm_polling import ARMPolling from .. import models class PublicIPPrefixesOperations(object): """PublicIPPrefixesOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2018-10-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2018-10-01" self.config = config def _delete_initial( self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.delete.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def delete( self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the specified public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIpPrefix. :type public_ip_prefix_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} def get( self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config): """Gets the specified public IP prefix in a specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIPPrefx. :type public_ip_prefix_name: str :param expand: Expands referenced resources. :type expand: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PublicIPPrefix or ClientRawResponse if raw=true :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = self.get.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} def _create_or_update_initial( self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.create_or_update.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'PublicIPPrefix') # Construct and send request request = self._client.put(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('PublicIPPrefix', response) if response.status_code == 201: deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def create_or_update( self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config): """Creates or updates a static or dynamic public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param parameters: Parameters supplied to the create or update public IP prefix operation. :type parameters: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} def _update_tags_initial( self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, **operation_config): parameters = models.TagsObject(tags=tags) # Construct URL url = self.update_tags.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'TagsObject') # Construct and send request request = self._client.patch(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def update_tags( self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config): """Updates public IP prefix tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param tags: Resource tags. :type tags: dict[str, str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._update_tags_initial( resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, tags=tags, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} def list_all( self, custom_headers=None, raw=False, **operation_config): """Gets all the public IP prefixes in a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_all.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} def list( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Gets all public IP prefixes in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'}
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
24,488
PublicIPPrefixesOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2018-10-01". Creates or updates a static or dynamic public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param parameters: Parameters supplied to the create or update public IP prefix operation. :type parameters: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` Deletes the specified public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIpPrefix. :type public_ip_prefix_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` Gets the specified public IP prefix in a specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIPPrefx. :type public_ip_prefix_name: str :param expand: Expands referenced resources. :type expand: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PublicIPPrefix or ClientRawResponse if raw=true :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` Gets all public IP prefixes in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` Gets all the public IP prefixes in a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` Updates public IP prefix tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param tags: Resource tags. :type tags: dict[str, str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- Construct URL Construct parameters Construct headers Construct and send request Construct URL Construct parameters Construct headers Construct and send request Construct URL Construct parameters Construct headers Construct body Construct and send request Construct URL Construct parameters Construct headers Construct body Construct and send request Construct URL Construct parameters Construct headers Construct and send request Deserialize response Construct URL Construct parameters Construct headers Construct and send request Deserialize response
6,300
en
0.471362
# -*- coding:utf8 -*- # File : env.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 12/29/16 # # This file is part of TensorArtist. from ...core import get_logger from ...core.event import EventManager, register_event, trigger_event from ...core.utils.meta import notnone_property from ..graph.env import Env from ..graph.node import as_tftensor logger = get_logger(__file__) __all__ = ['TrainerEnvBase', 'SimpleTrainerEnv'] class TrainerEnvBase(Env): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._snapshot_parts = dict() self._runtime = dict() self.add_snapshot_part('variables', self.__dump_network_variable, self.__load_network_variable) self.add_snapshot_part('runtime', self.__dump_runtime, self.__load_runtime) def __dump_network_variable(self): return self.network.fetch_all_variables_dict() def __load_network_variable(self, variables): self.network.assign_all_variables_dict(variables) def __dump_runtime(self): return self._runtime.copy() def __load_runtime(self, runtime): self._runtime = runtime @property def runtime(self): return self._runtime def add_snapshot_part(self, identifier, dump, load): self._snapshot_parts[identifier] = (dump, load) def get_snapshot_parts_ref(self): return self._snapshot_parts def load_snapshot(self, snapshot): for k, v in snapshot.items(): if k not in self._snapshot_parts: logger.warning('Ignored snapshot part: {}.'.format(k)) else: loader = self._snapshot_parts[k][1] loader(v) return self def dump_snapshot(self): snapshot = dict() for identifier, (d, l) in self._snapshot_parts.items(): snapshot[identifier] = d() return snapshot def register_event(self, name, callback, *args, priority=EventManager.DEF_PRIORITY, **kwargs): register_event(self, name, callback, *args, priority=priority, **kwargs) return self def trigger_event(self, name, *args, **kwargs): trigger_event(self, name, self, *args, **kwargs) return self class SimpleTrainerEnv(TrainerEnvBase): _optimizer = None @notnone_property def optimizer(self): return self._optimizer def set_optimizer(self, opt): self._optimizer = opt return self def make_optimizable_func(self, loss=None): loss = loss or self.network.loss loss = as_tftensor(loss) func = self.make_func() func.add_extra_op(self.optimizer.minimize(loss)) return func
TensorArtist/tartist/nn/train/env.py
2,706
-*- coding:utf8 -*- File : env.py Author : Jiayuan Mao Email : maojiayuan@gmail.com Date : 12/29/16 This file is part of TensorArtist.
140
en
0.705652
"""Tcp client for synchronous uhd message tcp port""" import threading import Queue import time import socket import struct import numpy as np class _TcpSyncClient(threading.Thread): """Thead for message polling""" queue = Queue.Queue() q_quit = Queue.Queue() ip_address = None port = None def __init__(self, ip_address, port, packet_size, packet_type): super(_TcpSyncClient, self).__init__() self.ip_address = ip_address self.port = port self.packet_size = packet_size self.packet_type = packet_type def __exit__(self): self.stop() def run(self): """connect and poll messages to queue""" #Establish connection sock = None print("Connecting to synchronous uhd message tcp port " + str(self.port)) while self.q_quit.empty(): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.ip_address, self.port)) break except socket.error: print("connecting to synchronous uhd message tcp port " + str(self.port)) #traceback.print_exc() sock.close() time.sleep(0.5) print("Connected to synchronous uhd message tcp port " + str(self.port)) #Read messages sock.settimeout(None) s = "" while self.q_quit.empty(): try: #concatenate to one package while self.q_quit.empty(): s += sock.recv(self.packet_size) if (len(s)) >= self.packet_size: break res_tuple = struct.unpack( self.packet_type, s[:self.packet_size]) s = s[self.packet_size:] self.queue.put(res_tuple) except socket.timeout: self.stop() traceback.print_exc() pass sock.close() def stop(self): """stop thread""" print("stop tcp_sync uhd message tcp thread") self.q_quit.put("end") class UhdSyncMsg(object): """Creates a thread to connect to the synchronous uhd messages tcp port""" def __init__(self, ip_address = "127.0.0.1", port = 47009, packet_size = 3, packet_type = "fff"): self.tcpa = _TcpSyncClient(ip_address, port, packet_size, packet_type) self.tcpa.start() def __exit__(self): self.tcpa.stop() def stop(self): """stop tcp thread""" self.tcpa.stop() def get_msgs(self, num): """get received messages as string of integer""" out = [] while len(out) < num: out.append(self.tcpa.queue.get()) return out def get_msgs_fft(self, num): """ get received messages as string of integer apply fftshift to message """ out = [] while len(out) < num: out.append(self.tcpa.queue.get()) return [np.fft.fftshift(np.array(o)) for o in out] def get_res(self): """get received messages as string of integer""" out = [] while not self.tcpa.queue.empty(): out.append(self.tcpa.queue.get()) return out def has_msg(self): """Checks if one or more messages were received and empties the message queue""" return self.get_res() != ""
src/tcp_sync.py
3,403
Creates a thread to connect to the synchronous uhd messages tcp port Thead for message polling get received messages as string of integer get received messages as string of integer apply fftshift to message get received messages as string of integer Checks if one or more messages were received and empties the message queue connect and poll messages to queue stop thread stop tcp thread Tcp client for synchronous uhd message tcp port Establish connectiontraceback.print_exc()Read messagesconcatenate to one package
517
en
0.872349
#!/usr/bin/env python3 # Packet MAC Sniffer # Author Yehia Elghaly import socket import textwrap import struct from colorama import Fore, Back, Style def main(): connection = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3)) while True: read_data, addr = connection.recvfrom(65536) send_mac, recv_mac, protocol, packet_data = ethernet(read_data) print ('\nEthernet Data:') print (Fore.GREEN + 'Destination: {}, Source: {}, Protocol: {}'. format (send_mac, recv_mac, protocol)) def ethernet(packet_data): send_mac, recv_mac, protocol = struct.unpack('!6s 6s H', packet_data[:14]) return read_mac_addr(send_mac), read_mac_addr(recv_mac), socket.htons(protocol), packet_data[:14] def read_mac_addr(bytes): bytes_s = map('{:02x}'.format, bytes) return ':'.join(bytes_s).upper() main()
Chapter 06/Packet-Sniffer-MAC.py
817
!/usr/bin/env python3 Packet MAC Sniffer Author Yehia Elghaly
61
en
0.361829
# -*- coding: utf-8 -*- from .domainconfig import DomainConfig # noqa from .resourceconfig import ResourceConfig # noqa
eve_sqlalchemy/config/__init__.py
123
-*- coding: utf-8 -*- noqa noqa
31
en
0.606218
#!/usr/bin/env python3 # coding: utf-8 # Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """sparse softmax cross entropy with logits""" from akg.ops.nn import sparse_softmax_cross_entropy_with_logits as loss from akg.ops.nn import sparse_softmax_cross_entropy_with_logits_ad as loss_ad def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0): """sparse softmax cross entropy with logits""" if is_grad: return loss_ad.sparse_softmax_cross_entropy_with_logits_ad(labels, features, reduction='mean', grad_scale=sens) return loss.sparse_softmax_cross_entropy_with_logits(labels, features, reduction='mean')
python/akg/ms/cce/sparse_softmax_cross_entropy_with_logits.py
1,190
sparse softmax cross entropy with logits sparse softmax cross entropy with logits !/usr/bin/env python3 coding: utf-8 Copyright 2019 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
680
en
0.799611
import ast import copy import json import os import re from collections import OrderedDict from dataclasses import fields from urllib.parse import urlparse import supervisely_lib as sly import sly_globals as g from functools import lru_cache def camel_to_snake(string_to_process): return re.sub(r'(?<!^)(?=[A-Z])', ' ', string_to_process).lower() def process_info_for_showing(info_data): processed_info = {} for key, value in info_data.items(): processed_info[camel_to_snake(key).title()] = value return processed_info def remove_keys_from_dict(keys_to_remove, data): for key in keys_to_remove: data.pop(key, None) def sly_annotation_to_bbox(sly_label): rect: sly.Rectangle = sly_label.geometry.to_bbox() return [rect.top, rect.left, rect.bottom - rect.top, rect.right - rect.left] def generate_data_for_nn_app(images_ids, figures_ids, annotations, padding): data_for_inference = [] for index, (image_id, figure_id, label) in enumerate(zip(images_ids, figures_ids, annotations)): if label is None: raise ValueError( f"Label with id={figure_id} not found. Maybe cached annotation differs from the actual one. " f"Please clear cache on settings tab") image_info = g.spawn_api.image.get_info_by_id(image_id) image_url = image_info.full_storage_url bbox = sly_annotation_to_bbox(label) data_for_inference.append( { 'index': index, 'url': image_url, 'bbox': bbox, 'figure_id': figure_id } ) return data_for_inference def generate_data_for_calculator_app(embeddings_by_indexes, top_n): data_for_calculator = { 'embeddings': [current_row['embedding'] for current_row in embeddings_by_indexes], 'top_k': top_n } return data_for_calculator def add_embeddings_to_cache_by_figures(embeddings_by_indexes, data_for_nn): for current_embedding in embeddings_by_indexes: current_figure_id = data_for_nn[current_embedding['index']]['figure_id'] g.figures2embeddings[current_figure_id] = current_embedding['embedding'] def calculate_nearest_labels(images_ids, annotations, figures_ids, top_n=5, padding=0): data_for_nn = generate_data_for_nn_app(images_ids=images_ids, annotations=annotations, figures_ids=figures_ids, padding=padding) response = g.api.task.send_request(g.nn_session_id, "inference", data={ 'input_data': data_for_nn }, timeout=99999) embeddings_by_indexes = ast.literal_eval(json.loads(response)) # [{'index': 0, 'embedding': [...], ..}, ..] if len(embeddings_by_indexes) != len(data_for_nn): raise ValueError(f'Data error. Check that the label is selected correctly.') add_embeddings_to_cache_by_figures(embeddings_by_indexes, data_for_nn) data_for_calculator = generate_data_for_calculator_app(embeddings_by_indexes, top_n) response = g.api.task.send_request(g.calculator_session_id, "calculate_similarity", data={ 'input_data': data_for_calculator }, timeout=99999) nearest_labels = ast.literal_eval(json.loads(response)) # { # 'pred_dist': [[1.0, ..], ..], # 'pred_labels': [['label1', ..], ..], # 'pred_urls': [['image_url1', ..], ..], # } return nearest_labels def get_resized_image(image_storage_url, height): parsed_link = urlparse(image_storage_url) return f'{parsed_link.scheme}://{parsed_link.netloc}' \ f'/previews/q/ext:jpeg/resize:fill:0:{height}:0/q:0/plain{parsed_link.path}' def get_unique_elements(elements_list): used = set() return [x for x in elements_list if x not in used and (used.add(x) or True)] def generate_data_to_show(nearest_labels): unique_labels = get_unique_elements(nearest_labels['pred_labels']) data_to_show = {pred_label: {} for pred_label in unique_labels} data_to_show = OrderedDict(data_to_show) for dist, label in zip(nearest_labels['pred_dist'], nearest_labels['pred_labels']): data_to_show[label]['dist'] = data_to_show[label].get('dist', 0) + dist if data_to_show[label].get('url', None) is None: data_to_show[label]['url'] = get_urls_by_label(label) if data_to_show[label].get('description', None) is None: data_to_show[label]['description'] = get_item_description_by_label(label) return dict(data_to_show) def add_info_to_disable_buttons(data_to_show, assigned_tags, fields, state): reference_disabled = True selected_figure_id = fields.get('state.selectedFigureId', -1) if selected_figure_id not in g.figures_in_reference: reference_disabled = False data_to_show = OrderedDict(data_to_show) for label, data in data_to_show.items(): if label in assigned_tags or (len(assigned_tags) > 0 and state['tagPerImage']): data_to_show[label].update({'assignDisabled': True, 'referenceDisabled': reference_disabled}) else: data_to_show[label].update({'assignDisabled': False, 'referenceDisabled': reference_disabled}) return dict(data_to_show) def get_meta(project_id, from_server=False): if from_server is True or project_id not in g.project2meta: meta_json = g.spawn_api.project.get_meta(project_id) meta = sly.ProjectMeta.from_json(meta_json) g.project2meta[project_id] = meta else: meta = g.project2meta[project_id] return meta def update_project_meta(project_id, project_meta: sly.ProjectMeta): sly.logger.info(f'update_project_meta: {project_id=}, {g.spawn_user_login=}') g.spawn_api.project.update_meta(project_id, project_meta.to_json()) def _get_or_create_tag_meta(project_id, tag_meta): for get_from_server_flag in [False, True]: # check tag in local and remote metas project_meta = get_meta(project_id, from_server=get_from_server_flag) project_tag_meta: sly.TagMeta = project_meta.get_tag_meta(tag_meta.name) sly.logger.info(f'_get_or_create_tag_meta: {project_tag_meta is None=}, {get_from_server_flag=}') if project_tag_meta is not None: break if project_tag_meta is None: project_meta = project_meta.add_tag_meta(tag_meta) # add tag to newest meta update_project_meta(project_id, project_meta) project_meta = get_meta(project_id, from_server=True) project_tag_meta = project_meta.get_tag_meta(tag_meta.name) return project_tag_meta def _assign_tag_to_object(project_id, figure_id, tag_meta): project_tag_meta: sly.TagMeta = _get_or_create_tag_meta(project_id, tag_meta) g.api.advanced.add_tag_to_object(project_tag_meta.sly_id, figure_id) def assign_to_object(project_id, figure_id, class_name): sly.logger.info(f'assign_to_object: {project_id=}, {figure_id=}, {class_name=}') tag_meta = sly.TagMeta(class_name, sly.TagValueType.NONE) _assign_tag_to_object(project_id, figure_id, tag_meta) def get_image_path(image_id): info = get_image_info(image_id) local_path = os.path.join(g.cache_path, f"{info.id}{sly.fs.get_file_name_with_ext(info.name)}") if not sly.fs.file_exists(local_path): g.spawn_api.image.download_path(image_id, local_path) return local_path # @lru_cache(maxsize=10) def get_annotation(project_id, image_id, optimize=False): if image_id not in g.image2ann or not optimize: ann_json = g.spawn_api.annotation.download(image_id).annotation ann = sly.Annotation.from_json(ann_json, get_meta(project_id)) g.image2ann[image_id] = ann else: ann = g.image2ann[image_id] g.figures_on_frame_count = len(ann.labels) return ann def get_image_info(image_id): info = None if image_id not in g.image2info: info = g.spawn_api.image.get_info_by_id(image_id) g.image2info[image_id] = info else: info = g.image2info[image_id] return info def clear(): g.project2meta.clear() # image2info.clear() g.image2ann.clear() def convert_dict_to_list(data_to_show): data_to_show_list = [] for key, value in data_to_show.items(): value['current_label'] = key data_to_show_list.append(value) return data_to_show_list def get_assigned_tags_names_by_label_annotation(label_annotation): assigned_tags = label_annotation.tags.to_json() return [assigned_tag.get('name', None) for assigned_tag in assigned_tags if assigned_tag.get('name', None) is not None] def get_tag_id_by_tag_name(label_annotation, tag_name): assigned_tags = label_annotation.tags for current_tag in assigned_tags: if current_tag.name == tag_name: return current_tag.sly_id # return None return None def sort_by_dist(data_to_show): sorted_predictions_by_dist = sorted(data_to_show, key=lambda d: d['dist'], reverse=True) for index, row in enumerate(sorted_predictions_by_dist): row['index'] = index sorted_predictions_by_dist[index] = row return sorted_predictions_by_dist def get_item_description_by_label(current_label): item = copy.deepcopy(g.items_database.get(current_label, {})) keys_to_clear = ['url'] for current_key in keys_to_clear: try: item.pop(current_key) except: pass return item def update_review_tags_tab(assigned_tags, fields): items_for_review = [] for current_tag in assigned_tags: items_for_review.append({ 'current_label': current_tag, 'url': get_urls_by_label(current_tag), 'removingDisabled': False, 'description': get_item_description_by_label(current_tag) }) if len(items_for_review) == 0: fields['state.tagsForReview'] = None else: fields['state.tagsForReview'] = items_for_review def update_card_buttons(card_name, assigned_tags, fields, state): current_card = fields.get(f"state.{card_name}", None) if current_card is None: current_card = g.api.task.get_field(g.task_id, f"state.{card_name}") if current_card: assign_disabled = True reference_disabled = True if current_card.get('current_label', '') not in assigned_tags and not ( len(assigned_tags) > 0 and state['tagPerImage']): assign_disabled = False selected_figure_id = fields.get('state.selectedFigureId', -1) if selected_figure_id not in g.figures_in_reference: reference_disabled = False set_buttons(assign_disabled=assign_disabled, reference_disabled=reference_disabled, card_name=card_name, fields=fields) def upload_data_to_tabs(nearest_labels, label_annotation, fields, state): assigned_tags = get_assigned_tags_names_by_label_annotation(label_annotation) update_review_tags_tab(assigned_tags, fields) # Review tags tab update_card_buttons('lastAssignedTag', assigned_tags, fields, state) # Last assigned tab update_card_buttons('selectedDatabaseItem', assigned_tags, fields, state) # Database tab nearest_labels = {key: value[0] for key, value in nearest_labels.items()} # NN Prediction tab data_to_show = generate_data_to_show(nearest_labels) data_to_show = add_info_to_disable_buttons(data_to_show, assigned_tags, fields, state) data_to_show = convert_dict_to_list(data_to_show) data_to_show = sort_by_dist(data_to_show) fields['data.predicted'] = data_to_show def get_urls_by_label(selected_label): label_info = g.items_database[selected_label] return [{'preview': get_resized_image(current_url, g.items_preview_size)} for current_url in label_info['url']][:g.items_preview_count] def remove_from_object(project_id, figure_id, tag_name, tag_id): project_meta = get_meta(project_id) project_tag_meta: sly.TagMeta = project_meta.get_tag_meta(tag_name) if project_tag_meta is None: raise RuntimeError(f"Tag {tag_name} not found in project meta") g.api.advanced.remove_tag_from_object(project_tag_meta.sly_id, figure_id, tag_id) def set_button_flag(card_name, flag_name, flag_value, fields): current_card = g.api.task.get_field(g.task_id, f"state.{card_name}") if current_card: fields[f"state.{card_name}.{flag_name}"] = flag_value def set_buttons(assign_disabled, reference_disabled, card_name, fields): set_button_flag(flag_name='assignDisabled', flag_value=assign_disabled, card_name=card_name, fields=fields) set_button_flag(flag_name='referenceDisabled', flag_value=reference_disabled, card_name=card_name, fields=fields) def get_tagged_objects_count_on_frame(annotation): tagged_objects = 0 for label in annotation.labels: if len(label.tags) > 0: tagged_objects += 1 return tagged_objects
supervisely/labeling-tool/src/sly_functions.py
13,053
[{'index': 0, 'embedding': [...], ..}, ..] { 'pred_dist': [[1.0, ..], ..], 'pred_labels': [['label1', ..], ..], 'pred_urls': [['image_url1', ..], ..], } check tag in local and remote metas add tag to newest meta @lru_cache(maxsize=10) image2info.clear() return None Review tags tab Last assigned tab Database tab NN Prediction tab
342
en
0.246448
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Denis Engemann <denis.engemann@gmail.com> # Andrew Dykstra <andrew.r.dykstra@gmail.com> # Mads Jensen <mje.mads@gmail.com> # # License: BSD (3-clause) import os.path as op from copy import deepcopy import warnings import numpy as np from scipy import fftpack from numpy.testing import (assert_array_almost_equal, assert_equal, assert_array_equal, assert_allclose) from nose.tools import assert_true, assert_raises, assert_not_equal from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds, grand_average, combine_evoked) from mne.evoked import _get_peak, EvokedArray from mne.epochs import EpochsArray from mne.utils import _TempDir, requires_pandas, slow_test, requires_version from mne.io.meas_info import create_info from mne.externals.six.moves import cPickle as pickle warnings.simplefilter('always') fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data', 'test-ave.fif') fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data', 'test-ave.fif.gz') @requires_version('scipy', '0.14') def test_savgol_filter(): """Test savgol filtering """ h_freq = 10. evoked = read_evokeds(fname, 0) freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq']) data = np.abs(fftpack.fft(evoked.data)) match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.) mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.) assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq']) evoked.savgol_filter(h_freq) data_filt = np.abs(fftpack.fft(evoked.data)) # decent in pass-band assert_allclose(np.mean(data[:, match_mask], 0), np.mean(data_filt[:, match_mask], 0), rtol=1e-4, atol=1e-2) # suppression in stop-band assert_true(np.mean(data[:, mismatch_mask]) > np.mean(data_filt[:, mismatch_mask]) * 5) def test_hash_evoked(): """Test evoked hashing """ ave = read_evokeds(fname, 0) ave_2 = read_evokeds(fname, 0) assert_equal(hash(ave), hash(ave_2)) # do NOT use assert_equal here, failing output is terrible assert_true(pickle.dumps(ave) == pickle.dumps(ave_2)) ave_2.data[0, 0] -= 1 assert_not_equal(hash(ave), hash(ave_2)) @slow_test def test_io_evoked(): """Test IO for evoked data (fif + gz) with integer and str args """ tempdir = _TempDir() ave = read_evokeds(fname, 0) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave) ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0] # This not being assert_array_equal due to windows rounding assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3)) assert_array_almost_equal(ave.times, ave2.times) assert_equal(ave.nave, ave2.nave) assert_equal(ave._aspect_kind, ave2._aspect_kind) assert_equal(ave.kind, ave2.kind) assert_equal(ave.last, ave2.last) assert_equal(ave.first, ave2.first) assert_true(repr(ave)) # test compressed i/o ave2 = read_evokeds(fname_gz, 0) assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8)) # test str access condition = 'Left Auditory' assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr') assert_raises(ValueError, read_evokeds, fname, condition, kind='standard_error') ave3 = read_evokeds(fname, condition) assert_array_almost_equal(ave.data, ave3.data, 19) # test read_evokeds and write_evokeds types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual'] aves1 = read_evokeds(fname) aves2 = read_evokeds(fname, [0, 1, 2, 3]) aves3 = read_evokeds(fname, types) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1) aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif')) for aves in [aves2, aves3, aves4]: for [av1, av2] in zip(aves1, aves): assert_array_almost_equal(av1.data, av2.data) assert_array_almost_equal(av1.times, av2.times) assert_equal(av1.nave, av2.nave) assert_equal(av1.kind, av2.kind) assert_equal(av1._aspect_kind, av2._aspect_kind) assert_equal(av1.last, av2.last) assert_equal(av1.first, av2.first) assert_equal(av1.comment, av2.comment) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') fname2 = op.join(tempdir, 'test-bad-name.fif') write_evokeds(fname2, ave) read_evokeds(fname2) assert_true(len(w) == 2) def test_shift_time_evoked(): """ Test for shifting of time scale """ tempdir = _TempDir() # Shift backward ave = read_evokeds(fname, 0) ave.shift_time(-0.1, relative=True) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave) # Shift forward twice the amount ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) ave_bshift.shift_time(0.2, relative=True) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift) # Shift backward again ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) ave_fshift.shift_time(-0.1, relative=True) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift) ave_normal = read_evokeds(fname, 0) ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) assert_true(np.allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)) assert_array_almost_equal(ave_normal.times, ave_relative.times, 10) assert_equal(ave_normal.last, ave_relative.last) assert_equal(ave_normal.first, ave_relative.first) # Absolute time shift ave = read_evokeds(fname, 0) ave.shift_time(-0.3, relative=False) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave) ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) assert_true(np.allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)) assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq'])) def test_evoked_resample(): """Test for resampling of evoked data """ tempdir = _TempDir() # upsample, write it out, read it in ave = read_evokeds(fname, 0) sfreq_normal = ave.info['sfreq'] ave.resample(2 * sfreq_normal) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave) ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) # compare it to the original ave_normal = read_evokeds(fname, 0) # and compare the original to the downsampled upsampled version ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0) ave_new.resample(sfreq_normal) assert_array_almost_equal(ave_normal.data, ave_new.data, 2) assert_array_almost_equal(ave_normal.times, ave_new.times) assert_equal(ave_normal.nave, ave_new.nave) assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind) assert_equal(ave_normal.kind, ave_new.kind) assert_equal(ave_normal.last, ave_new.last) assert_equal(ave_normal.first, ave_new.first) # for the above to work, the upsampling just about had to, but # we'll add a couple extra checks anyway assert_true(len(ave_up.times) == 2 * len(ave_normal.times)) assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1]) def test_evoked_detrend(): """Test for detrending evoked data """ ave = read_evokeds(fname, 0) ave_normal = read_evokeds(fname, 0) ave.detrend(0) ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis] picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads') assert_true(np.allclose(ave.data[picks], ave_normal.data[picks], rtol=1e-8, atol=1e-16)) @requires_pandas def test_to_data_frame(): """Test evoked Pandas exporter""" ave = read_evokeds(fname, 0) assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400)) df = ave.to_data_frame() assert_true((df.columns == ave.ch_names).all()) df = ave.to_data_frame(index=None).reset_index('time') assert_true('time' in df.columns) assert_array_equal(df.values[:, 1], ave.data[0] * 1e13) assert_array_equal(df.values[:, 3], ave.data[2] * 1e15) def test_evoked_proj(): """Test SSP proj operations """ for proj in [True, False]: ave = read_evokeds(fname, condition=0, proj=proj) assert_true(all(p['active'] == proj for p in ave.info['projs'])) # test adding / deleting proj if proj: assert_raises(ValueError, ave.add_proj, [], {'remove_existing': True}) assert_raises(ValueError, ave.del_proj, 0) else: projs = deepcopy(ave.info['projs']) n_proj = len(ave.info['projs']) ave.del_proj(0) assert_true(len(ave.info['projs']) == n_proj - 1) ave.add_proj(projs, remove_existing=False) assert_true(len(ave.info['projs']) == 2 * n_proj - 1) ave.add_proj(projs, remove_existing=True) assert_true(len(ave.info['projs']) == n_proj) ave = read_evokeds(fname, condition=0, proj=False) data = ave.data.copy() ave.apply_proj() assert_allclose(np.dot(ave._projector, data), ave.data) def test_get_peak(): """Test peak getter """ evoked = read_evokeds(fname, condition=0, proj=True) assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1) assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9) assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02, tmax=0.01) assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo') assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo') assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo') ch_idx, time_idx = evoked.get_peak(ch_type='mag') assert_true(ch_idx in evoked.ch_names) assert_true(time_idx in evoked.times) ch_idx, time_idx = evoked.get_peak(ch_type='mag', time_as_index=True) assert_true(time_idx < len(evoked.times)) data = np.array([[0., 1., 2.], [0., -3., 0]]) times = np.array([.1, .2, .3]) ch_idx, time_idx = _get_peak(data, times, mode='abs') assert_equal(ch_idx, 1) assert_equal(time_idx, 1) ch_idx, time_idx = _get_peak(data * -1, times, mode='neg') assert_equal(ch_idx, 0) assert_equal(time_idx, 2) ch_idx, time_idx = _get_peak(data, times, mode='pos') assert_equal(ch_idx, 0) assert_equal(time_idx, 2) assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg') assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos') def test_drop_channels_mixin(): """Test channels-dropping functionality """ evoked = read_evokeds(fname, condition=0, proj=True) drop_ch = evoked.ch_names[:3] ch_names = evoked.ch_names[3:] ch_names_orig = evoked.ch_names dummy = evoked.drop_channels(drop_ch, copy=True) assert_equal(ch_names, dummy.ch_names) assert_equal(ch_names_orig, evoked.ch_names) assert_equal(len(ch_names_orig), len(evoked.data)) evoked.drop_channels(drop_ch) assert_equal(ch_names, evoked.ch_names) assert_equal(len(ch_names), len(evoked.data)) def test_pick_channels_mixin(): """Test channel-picking functionality """ evoked = read_evokeds(fname, condition=0, proj=True) ch_names = evoked.ch_names[:3] ch_names_orig = evoked.ch_names dummy = evoked.pick_channels(ch_names, copy=True) assert_equal(ch_names, dummy.ch_names) assert_equal(ch_names_orig, evoked.ch_names) assert_equal(len(ch_names_orig), len(evoked.data)) evoked.pick_channels(ch_names) assert_equal(ch_names, evoked.ch_names) assert_equal(len(ch_names), len(evoked.data)) evoked = read_evokeds(fname, condition=0, proj=True) assert_true('meg' in evoked) assert_true('eeg' in evoked) evoked.pick_types(meg=False, eeg=True) assert_true('meg' not in evoked) assert_true('eeg' in evoked) assert_true(len(evoked.ch_names) == 60) def test_equalize_channels(): """Test equalization of channels """ evoked1 = read_evokeds(fname, condition=0, proj=True) evoked2 = evoked1.copy() ch_names = evoked1.ch_names[2:] evoked1.drop_channels(evoked1.ch_names[:1]) evoked2.drop_channels(evoked2.ch_names[1:2]) my_comparison = [evoked1, evoked2] equalize_channels(my_comparison) for e in my_comparison: assert_equal(ch_names, e.ch_names) def test_evoked_arithmetic(): """Test evoked arithmetic """ ev = read_evokeds(fname, condition=0) ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20) ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10) # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2: # data should be added according to their `nave` weights # nave = ev1.nave + ev2.nave ev = ev1 + ev2 assert_equal(ev.nave, ev1.nave + ev2.nave) assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data)) ev = ev1 - ev2 assert_equal(ev.nave, ev1.nave + ev2.nave) assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment) assert_allclose(ev.data, np.ones_like(ev1.data)) # default comment behavior if evoked.comment is None old_comment1 = ev1.comment old_comment2 = ev2.comment ev1.comment = None with warnings.catch_warnings(record=True): warnings.simplefilter('always') ev = ev1 - ev2 assert_equal(ev.comment, 'unknown') ev1.comment = old_comment1 ev2.comment = old_comment2 # equal weighting ev = combine_evoked([ev1, ev2], weights='equal') assert_allclose(ev.data, np.zeros_like(ev1.data)) # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1 ev = combine_evoked([ev1, ev2], weights=[1, 0]) assert_equal(ev.nave, ev1.nave) assert_allclose(ev.data, ev1.data) # simple subtraction (like in oddball) ev = combine_evoked([ev1, ev2], weights=[1, -1]) assert_allclose(ev.data, 2 * np.ones_like(ev1.data)) assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo') assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1]) # grand average evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True) ch_names = evoked1.ch_names[2:] evoked1.info['bads'] = ['EEG 008'] # test interpolation evoked1.drop_channels(evoked1.ch_names[:1]) evoked2.drop_channels(evoked2.ch_names[1:2]) gave = grand_average([evoked1, evoked2]) assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]]) assert_equal(ch_names, gave.ch_names) assert_equal(gave.nave, 2) def test_array_epochs(): """Test creating evoked from array """ tempdir = _TempDir() # creating rng = np.random.RandomState(42) data1 = rng.randn(20, 60) sfreq = 1e3 ch_names = ['EEG %03d' % (i + 1) for i in range(20)] types = ['eeg'] * 20 info = create_info(ch_names, sfreq, types) evoked1 = EvokedArray(data1, info, tmin=-0.01) # save, read, and compare evokeds tmp_fname = op.join(tempdir, 'evkdary-ave.fif') evoked1.save(tmp_fname) evoked2 = read_evokeds(tmp_fname)[0] data2 = evoked2.data assert_allclose(data1, data2) assert_allclose(evoked1.times, evoked2.times) assert_equal(evoked1.first, evoked2.first) assert_equal(evoked1.last, evoked2.last) assert_equal(evoked1.kind, evoked2.kind) assert_equal(evoked1.nave, evoked2.nave) # now compare with EpochsArray (with single epoch) data3 = data1[np.newaxis, :, :] events = np.c_[10, 0, 1] evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average() assert_allclose(evoked1.data, evoked3.data) assert_allclose(evoked1.times, evoked3.times) assert_equal(evoked1.first, evoked3.first) assert_equal(evoked1.last, evoked3.last) assert_equal(evoked1.kind, evoked3.kind) assert_equal(evoked1.nave, evoked3.nave) # test match between channels info and data ch_names = ['EEG %03d' % (i + 1) for i in range(19)] types = ['eeg'] * 19 info = create_info(ch_names, sfreq, types) assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01) def test_add_channels(): """Test evoked splitting / re-appending channel types """ evoked = read_evokeds(fname, condition=0) evoked.info['buffer_size_sec'] = None evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True) evoked_meg = evoked.pick_types(meg=True, copy=True) evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True) evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True) evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True) assert_true(all(ch in evoked_new.ch_names for ch in evoked_stim.ch_names + evoked_meg.ch_names)) evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True) assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names) assert_array_equal(evoked_new.data, evoked_eeg_meg.data) assert_true(all(ch not in evoked_new.ch_names for ch in evoked_stim.ch_names)) # Now test errors evoked_badsf = evoked_eeg.copy() evoked_badsf.info['sfreq'] = 3.1415927 evoked_eeg = evoked_eeg.crop(-.1, .1) assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf]) assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg]) assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg]) assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
python-packages/mne-python-0.10/mne/tests/test_evoked.py
17,942
Test evoked splitting / re-appending channel types Test creating evoked from array Test channels-dropping functionality Test equalization of channels Test evoked arithmetic Test for detrending evoked data Test SSP proj operations Test for resampling of evoked data Test peak getter Test evoked hashing Test IO for evoked data (fif + gz) with integer and str args Test channel-picking functionality Test savgol filtering Test for shifting of time scale Test evoked Pandas exporter Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> Denis Engemann <denis.engemann@gmail.com> Andrew Dykstra <andrew.r.dykstra@gmail.com> Mads Jensen <mje.mads@gmail.com> License: BSD (3-clause) decent in pass-band suppression in stop-band do NOT use assert_equal here, failing output is terrible This not being assert_array_equal due to windows rounding test compressed i/o test str access test read_evokeds and write_evokeds test warnings on bad filenames Shift backward Shift forward twice the amount Shift backward again Absolute time shift upsample, write it out, read it in compare it to the original and compare the original to the downsampled upsampled version for the above to work, the upsampling just about had to, but we'll add a couple extra checks anyway test adding / deleting proj combine_evoked([ev1, ev2]) should be the same as ev1 + ev2: data should be added according to their `nave` weights nave = ev1.nave + ev2.nave default comment behavior if evoked.comment is None equal weighting combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1 simple subtraction (like in oddball) grand average test interpolation creating save, read, and compare evokeds now compare with EpochsArray (with single epoch) test match between channels info and data Now test errors
1,888
en
0.720923
import sqlite3 import mmap import os import sys import copy import math import tempfile from tqdm import tqdm from scipy.stats import norm from expiringdict import ExpiringDict cache = ExpiringDict(max_len=100000,max_age_seconds=600) def get_num_lines(file_path): fp = open(file_path, "r+") buf = mmap.mmap(fp.fileno(), 0) lines = 0 while buf.readline(): lines += 1 return lines class ExtendedNaiveBayes: def __init__(self,family,path_to_data="."): self.family = family self.db_name = os.path.join(path_to_data,family+".nb.db") def fit(self,csv_file): db = sqlite3.connect(":memory:") c = db.cursor() try: c.execute('''CREATE TABLE data (loc TEXT, mac TEXT, val INTEGER, count INTEGER)''') db.commit() except sqlite3.OperationalError: pass headers = [] with open(csv_file,"r") as f: for i,line in enumerate(tqdm(f, total=get_num_lines(csv_file))): line = line.strip() if i == 0: headers = line.split(",") continue loc = "" for j,signal in enumerate(line.split(",")): if j == 0: loc = signal continue if signal.strip() == "": continue mac = headers[j] val = int(round(float(signal.strip()))) c.execute('''SELECT count FROM data WHERE loc = ? AND mac = ? AND val = ?''',(loc, mac, val )) count = c.fetchone() if count == None: c.execute('''INSERT INTO data(loc,mac,val,count) VALUES(?,?,?,?)''', (loc,mac,val,1)) else: c.execute('''UPDATE data SET count = ? WHERE loc = ? AND mac = ? AND val = ?''',(count[0]+1,loc,mac,val)) db.commit() # with open("dump.sql","w") as f: # for line in db.iterdump(): # f.write('%s\n' % line) f = tempfile.TemporaryFile() for line in db.iterdump(): f.write('{}\n'.format(line).encode('utf-8')) db.close() # Write disk to file try: os.remove(self.db_name) except: pass db = sqlite3.connect(self.db_name) c = db.cursor() f.seek(0) c.executescript(f.read().decode('utf-8')) f.close() db.commit() db.close() # os.remove("dump.sql") def get_locations(self): db = sqlite3.connect(self.db_name) c = db.cursor() c.execute('''SELECT loc FROM data GROUP BY loc''') locs = c.fetchall() db.close() locations = [] for l in locs: locations.append(l[0]) return locations def prob_mac_given_loc(self,mac,val,loc,positive): """ Determine the P(mac=val | loc) (positive) Determine the P(mac=val | ~loc) (not positive) """ name = "{}{}{}{}".format(mac,val,loc,positive) cached = cache.get(name) if cached != None: return cached P = 0.005 nameData = "{}{}{}".format(mac,loc,positive) cached = cache.get(nameData) if cached != None: if val in cached: P = cached[val] return P # First find all the values for mac at loc db = sqlite3.connect(self.db_name) c = db.cursor() if positive: c.execute('''SELECT val,count FROM data WHERE loc = ? AND mac = ?''',(loc,mac)) else: c.execute('''SELECT val,count FROM data WHERE loc != ? AND mac = ?''',(loc,mac)) val_to_count = {} for row in c.fetchall(): val_to_count[row[0]] = row[1] db.close() # apply gaussian filter new_val_to_count = copy.deepcopy(val_to_count) width = 3 for v in val_to_count: for x in range(-1*width**3,width**3+1): addend = int(round(100*norm.pdf(0,loc=x,scale=width))) if addend <= 0 : continue if v+x not in new_val_to_count: new_val_to_count[v+x] = 0 new_val_to_count[v+x] = new_val_to_count[v+x]+addend total = 0 for v in new_val_to_count: total += new_val_to_count[v] for v in new_val_to_count: new_val_to_count[v] = new_val_to_count[v] / total # 0.5% chance for anything P = 0.005 if val in new_val_to_count: P = new_val_to_count[val] cache[name] = P cache[nameData] = new_val_to_count return P def predict_proba(self,header_unfiltered,csv_data_unfiltered): header = [] csv_data = [] for i,dat in enumerate(csv_data_unfiltered): if dat == 0: continue csv_data.append(dat) header.append(header_unfiltered[i]) locations = self.get_locations() num_locations = len(locations) NA = 1/num_locations NnotA = 1-NA Ps = {} for i,mac in enumerate(header): val = int(round(float(csv_data[i]))) for location in locations: if location not in Ps: Ps[location] = [] PA = self.prob_mac_given_loc(mac,val,location,True) PnotA = self.prob_mac_given_loc(mac,val,location,False) P = PA*NA / (PA*NA + PnotA*NnotA) Ps[location].append(math.log(P)) P_sum = 0 for location in Ps: P_sum += math.exp(sum(Ps[location])) d = {} for location in Ps: d[location] = math.exp(sum(Ps[location]))/P_sum return [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)] def testit(): a =ExtendedNaiveBayes("testing1") print("fitting data") file_to_test = "reverse.csv" a.fit(file_to_test) print("done") with open(file_to_test,"r") as f: for i,line in enumerate(f): line = line.strip() if i == 0: headers = line.split(",") continue headers_submit = [] csv_data_submit = [] loc = "" for j,signal in enumerate(line.split(",")): if j == 0: loc = signal continue if signal.strip() == "": continue headers_submit.append(headers[j]) csv_data_submit.append(int(round(float(signal.strip())))) print(loc) a.predict_proba(headers_submit,csv_data_submit)
server/ai/src/naive_bayes.py
6,872
Determine the P(mac=val | loc) (positive) Determine the P(mac=val | ~loc) (not positive) with open("dump.sql","w") as f: for line in db.iterdump(): f.write('%s\n' % line) Write disk to file os.remove("dump.sql") First find all the values for mac at loc apply gaussian filter 0.5% chance for anything
313
en
0.660589
from pathlib import Path import tvm from tvm import autotvm from tvm import relay from tvm.autotvm.tuner import GATuner from tvm.autotvm.tuner import GridSearchTuner from tvm.autotvm.tuner import RandomTuner from tvm.autotvm.tuner import XGBTuner from rl_tuner.ga_dqn_tuner import GADQNTuner from rl_tuner.ga_dqn_tuner_debug import GADQNTuner as GADQNTunerDebug from .get_model import get_model def tune_model(mod, params, tune_settings, target, model_name): """ Tune a model for a specified number of trials along with other tune settings. Tune settings are specified using a json configuration, as per the TVM tools readme. """ early_stopping = tune_settings['early_stopping'] number = tune_settings["number"] save_path = tune_settings["save_path"] save_name = tune_settings["save_name"] repeat = tune_settings["repeat"] debug = tune_settings.get("debug_gadqn") or False trials = tune_settings["trials"] tuner = tune_settings["tuner"] target = tvm.target.Target(target) tasks = autotvm.task.extract_from_program( mod["main"], target=target, target_host="llvm", params=params) runner = autotvm.LocalRunner( number=number, repeat=repeat) measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="default"), runner=runner) for i, tsk in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # Create a tuner if tuner in ("xgb", "xgb-rank"): tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) elif tuner == "ga-dqn" and debug: tuner_obj = GADQNTunerDebug(tsk) elif tuner == "ga-dqn": tuner_obj = GADQNTuner(tsk) else: raise ValueError("invalid tuner: %s " % tuner) abs_path = Path(save_path + save_name).resolve() abs_path.mkdir(exist_ok=True, parents=True) abs_path_str = str(abs_path) tuner_obj.tune( n_trial=min(trials, len(tsk.config_space)), early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(trials, prefix=prefix), autotvm.callback.log_to_file(abs_path_str + f"/tuning_record_model={model_name}.json"), ], ) # Save debug info for rl tuner only if tuner == "ga-dqn" and debug: tuner_obj.save_model(save_path, save_name + f"_model={model_name}_layer={i}") del tuner_obj def tune_models(data): """ Auto tune all models referenced in the json configuration. """ target_string = data['target'] tune_settings = data['autotuner_settings'] for model in data['models']: trace, input_shapes = get_model(model['name'], model['type']) mod, params = relay.frontend.from_pytorch(trace, input_shapes) print(f"Tuning model {model['name']}, using strategy {tune_settings['tuner']}") tune_model(mod, params, tune_settings, target_string, model['name'])
tools/tune_model.py
3,430
Tune a model for a specified number of trials along with other tune settings. Tune settings are specified using a json configuration, as per the TVM tools readme. Auto tune all models referenced in the json configuration. Create a tuner Save debug info for rl tuner only
272
en
0.766502
#!/usr/bin/env python # Author: Nick Zwart # Date: 2016jun01 # Backup all the projects of a git-hub style website via git mirroring. # https://www.garron.me/en/bits/backup-git-bare-repo.html import os import sys import time import gitlab # external GitLab API import github # external GitHub API import shutil import hashlib import optparse import subprocess class GitWebsiteTypeAPI: '''The abstract class to template each git-based website api. ''' def __init__(self, token, url): self._token = token self._url = url def numProjects(self): # return the number of projects pass def projectPath(self, index): # return the full path for each project including group i.e. # <user/group-directory>/<repository-name> # e.g. # nckz/BackupHub pass def projectURL(self, index): # return the ssh-url that assumes ssh-keys have been distributed e.g. # git@git<lab/hub>.com:<user/group>/<repo-name>.git # e.g. # git@github.com:nckz/BackupHub.git pass class GitLabAPI(GitWebsiteTypeAPI): def __init__(self, token, url): GitWebsiteTypeAPI.__init__(self, token, url) # authenticate a gitlab session self._gl = gitlab.Gitlab(self._url, self._token) self._gl.auth() # list all projects self._projects = self._gl.projects.list(all=True) def numProjects(self): return len(self._projects) def projectPath(self, index): return self._projects[index].path_with_namespace def projectURL(self, index): return self._projects[index].ssh_url_to_repo class GitHubAPI(GitWebsiteTypeAPI): def __init__(self, token, url=''): GitWebsiteTypeAPI.__init__(self, token, url) # authenticate a gitlab session self._gh = github.Github(self._token) # list all projects self._projects = self._gh.get_user().get_repos() def numProjects(self): return len([i for i in self._projects]) def projectPath(self, index): return self._projects[index].full_name def projectURL(self, index): return self._projects[index].ssh_url class GitBareMirror: '''A simple git interface for managing bare-mirroed repos that backup url accessible upstream repos. ''' def __init__(self, path, url, overwrite=False, moveAside=False): self._path = path self._origin_url = url self._overwrite = overwrite self._moveAside = moveAside if self.validExistingRepo(): self.update() else: self.createMirroredRepo() def validExistingRepo(self): try: assert os.path.isdir(self._path), ('The supplied directory ' 'does not exist.') # move to the existing repo and check if its bare os.chdir(self._path) cmd = subprocess.Popen('git rev-parse --is-bare-repository', shell=True, stdout=subprocess.PIPE) cmd.wait() # Error checking assert cmd.returncode != 128, ('The supplied directory ' 'exists, but is not a git repo.') assert cmd.returncode == 0, 'There was an unhandled git error.' firstline = cmd.stdout.readlines()[0].decode('utf8') assert 'false' not in firstline, ('The supplied directory ' 'is NOT a bare repo.') assert 'true' in firstline, ('Unable to verify that the repo is ' 'bare.') # check if the existing repo has the same origin url # -prevent name collision if group/org namespace isn't used cmd = subprocess.Popen('git config --get remote.origin.url', shell=True, stdout=subprocess.PIPE) cmd.wait() firstline = cmd.stdout.readlines()[0].decode('utf8') assert self._origin_url in firstline, ('The existing repo ' 'has a url that differs from the supplied origin url.') return True except AssertionError as err: print('The given path does not contain a valid repo by:', err) return False def update(self): cmd = subprocess.Popen('git remote update', shell=True, stdout=subprocess.PIPE) cmd.wait() assert cmd.returncode == 0, 'ERROR: git error' print('SUCCESS (updated)') def createMirroredRepo(self): # Handle existing directories based on user options: # move the dir to a unique name, remove it, or fail w/ exception if self._moveAside and os.path.exists(self._path): parentPath = os.path.dirname(self._path) dirContents = str(os.listdir(parentPath)).encode('utf8') newNameExt = hashlib.md5(dirContents).hexdigest() newName = self._path+'_'+newNameExt+'_bu' print('MOVING PATH', self._path, newName) shutil.move(self._path, newName) elif self._overwrite and os.path.exists(self._path): print('REMOVING PATH', self._path) shutil.rmtree(self._path) else: assert not os.path.exists(self._path), ('ERROR: the supplied path ' 'already exists, unable to create mirror.') os.makedirs(self._path) os.chdir(self._path) cmd = subprocess.Popen('git clone --mirror ' + str(self._origin_url) + ' .', shell=True, stdout=subprocess.PIPE) cmd.wait() print('SUCCESS (new mirror)') if __name__ == '__main__': # parse input args parser = optparse.OptionParser() parser.add_option('--path', dest='backupPath', action='store', type='string', default=os.path.expanduser('~/backup'), help='The directory to store the backups.') parser.add_option('--ignore-errors', dest='ignoreErrors', action='store_true', default=False, help='Continue to backup other repos if one has failed.') parser.add_option('--overwrite', dest='overwrite', action='store_true', default=False, help='Overwrite existing directories.') parser.add_option('--move-aside', dest='moveAside', action='store_true', default=False, help='Move existing directories aside with a tempfile extension.') parser.add_option('--token', dest='token', action='store', type='string', default=None, help='The token required to access the target git web api.') parser.add_option('--website', dest='website', action='store', type='string', default=None, help='The hub website where the git repos are stored.') parser.add_option('--github', dest='github', action='store_true', default=False, help='Connect to GitHub.') parser.add_option('--gitlab', dest='gitlab', action='store_true', default=True, help='Connect to GitLab (default).') options, args = parser.parse_args(sys.argv) localtime = time.asctime( time.localtime(time.time()) ) print("BackupHub Start:", localtime) assert options.token is not None if options.github: options.gitlab = False if options.gitlab: assert options.website is not None # Check for existing backup directory and make one if it doesn't exist. if not os.path.isdir(options.backupPath): print('The specified backup path doesn\'t exist.') sys.exit(1) # Get the repository info from the git web api. if options.github: webapi = GitHubAPI(options.token) elif options.gitlab: webapi = GitLabAPI(options.token, options.website) # Display whats going on as the repos get either updated or newly mirrored. print('Repository:') for i in range(webapi.numProjects()): try: curPath = os.path.join(options.backupPath, webapi.projectPath(i)) curURL = webapi.projectURL(i) print('\nSyncing: ', curURL, curPath) repo = GitBareMirror(curPath, curURL, overwrite=options.overwrite, moveAside=options.moveAside) except Exception as err: if options.ignoreErrors: print(err) else: raise localtime = time.asctime( time.localtime(time.time()) ) print("BackupHub Finished:", localtime)
BackupHub.py
8,467
A simple git interface for managing bare-mirroed repos that backup url accessible upstream repos. The abstract class to template each git-based website api. !/usr/bin/env python Author: Nick Zwart Date: 2016jun01 Backup all the projects of a git-hub style website via git mirroring. https://www.garron.me/en/bits/backup-git-bare-repo.html external GitLab API external GitHub API return the number of projects return the full path for each project including group i.e. <user/group-directory>/<repository-name> e.g. nckz/BackupHub return the ssh-url that assumes ssh-keys have been distributed e.g. git@git<lab/hub>.com:<user/group>/<repo-name>.git e.g. git@github.com:nckz/BackupHub.git authenticate a gitlab session list all projects authenticate a gitlab session list all projects move to the existing repo and check if its bare Error checking check if the existing repo has the same origin url -prevent name collision if group/org namespace isn't used Handle existing directories based on user options: move the dir to a unique name, remove it, or fail w/ exception parse input args Check for existing backup directory and make one if it doesn't exist. Get the repository info from the git web api. Display whats going on as the repos get either updated or newly mirrored.
1,293
en
0.608423
import os from src.multi_site_inputs_parser import multi_site_csv_parser from src.parse_api_responses_to_csv import parse_responses_to_csv_with_template from src.post_and_poll import get_api_results from src.parse_api_responses_to_excel import parse_api_responses_to_excel """ Change these values """ ############################################################################################################## API_KEY = 'DEMO KEY' # REPLACE WITH YOUR API KEY inputs_path = os.path.join('inputs') outputs_path = os.path.join('Ref_Profile_Full_Service_Restaurant') output_template = os.path.join(outputs_path, 'results_template.csv') output_file = os.path.join(outputs_path, 'results_summary.csv') ############################################################################################################## server = 'https://developer.nrel.gov/api/reopt/v1' path_to_inputs = os.path.join(inputs_path, 'baseline_scenarios_full_service_restaurant.csv') list_of_posts = multi_site_csv_parser(path_to_inputs, api_url=server, API_KEY=API_KEY) responses = [] for post in list_of_posts: responses.append(get_api_results( post, results_file=os.path.join(outputs_path, post['Scenario']['description'] + '.json'), api_url=server, API_KEY=API_KEY) ) """ Two options for making a summary of scenarios: 1. Write to a csv using a template with column headers for desired summary keys (scalar values only) 2. Write all inputs, outputs, and dispatch to an Excel spreadsheet """ parse_responses_to_csv_with_template(csv_template=output_template, responses=responses, output_csv=output_file, input_csv=path_to_inputs, n_custom_columns=2) parse_api_responses_to_excel(responses, spreadsheet='results_summary.xlsx')
multi_site/baseline_scenario_full_service_restaurant.py
1,767
REPLACE WITH YOUR API KEY
25
en
0.701228
import math import sys def example_1(): """ THIS IS A LONG COMMENT AND should be wrapped to fit within a 72 character limit """ long_1 = """LONG CODE LINES should be wrapped within 79 character to prevent page cutoff stuff""" long_2 = """This IS a long string that looks gross and goes beyond what it should""" some_tuple =(1, 2, 3, 'a') some_variable={"long": long_1, 'other':[math.pi, 100,200, 300, 9999292929292, long_2], "more": {"inner": "THIS whole logical line should be wrapped"}, "data": [444,5555,222,3,3,4,4,5,5,5,5,5,5,5]} return (some_tuple, some_variable) def example_2(): return {"has_key() is deprecated": True} class Example_3(object): def __init__(self, bar): self.bar = bar def bar_func(self): if self.bar: self.bar += 1 self.bar = self.bar * self.bar return self.bar else: some_string = """ INDENTATION IN MULTIPLE STRINGS SHOULD NOT BE TOUCHED only actual code should be reindented, THIS IS MORE CODE """ return (sys.path, some_string)
lambdata/code_review.py
1,233
THIS IS A LONG COMMENT AND should be wrapped to fit within a 72 character limit
80
en
0.913089
from rest_framework.test import APIRequestFactory from rest_framework import status from django.test import TestCase from django.urls import reverse from ..models import User from ..serializer import UserSerializer from ..views import UserViewSet import ipapi class UsersApiRootTestCase(TestCase): def test_api_root_should_reply_200(self): """ GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK. """ request = APIRequestFactory().get("/api/v1/") user_list_view = UserViewSet.as_view({"get": "list"}) response = user_list_view(request) self.assertEqual(status.HTTP_200_OK, response.status_code) class UsersApiTestCase(TestCase): """ Factorize the tests setup to use a pool of existing users. """ def setUp(self): self.factory = APIRequestFactory() self.users = [ User.objects.create( first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy"), User.objects.create( first_name="Fifi", last_name="Duck", email="fifi.duck@ricardo.ch", password="dummy"), User.objects.create( first_name="Loulou", last_name="Duck", email="loulou.duck@ricardo.ch", password="dummy") ] class GetAllUsersTest(UsersApiTestCase): """ Test GET /api/v1/users """ def test_list_all_users_should_retrieve_all_users_and_reply_200(self): """ GET /api/v1/users should return all the users (or empty if no users found) and return a successful status 200 OK. """ users = User.objects.all().order_by("id") request = self.factory.get(reverse("v1:user-list")) serializer = UserSerializer(users, many=True, context={'request': request}) user_list_view = UserViewSet.as_view({"get": "list"}) response = user_list_view(request) self.assertEqual(len(self.users), len(response.data["results"])) self.assertEqual(serializer.data, response.data["results"]) self.assertEqual(status.HTTP_200_OK, response.status_code) class GetSingleUserTest(UsersApiTestCase): """ Test GET /api/v1/users/:id """ def test_get_user_when_id_valid_should_retrieve_user_and_reply_200(self): riri = User.objects.create( first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy") user = User.objects.get(pk=riri.pk) request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": riri.pk})) serializer = UserSerializer(user, context={'request': request}) user_detail_view = UserViewSet.as_view({"get": "retrieve"}) response = user_detail_view(request, pk=riri.pk) self.assertEqual(serializer.data, response.data) self.assertEqual(status.HTTP_200_OK, response.status_code) def test_get_user_when_id_invalid_should_reply_404(self): request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": 100})) user_detail_view = UserViewSet.as_view({"get": "retrieve"}) response = user_detail_view(request, pk=100) self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code) class CreateNewUserTest(UsersApiTestCase): """ Test POST /api/v1/users Override 'REMOTE_ADDR' to set IP address to Switzerland or another country for testing purpose. """ def test_post_user_when_from_Switzerland_and_data_valid_should_create_user_and_reply_201(self): initial_users_count = len(self.users) valid_data = { "first_name": "Casper", "last_name": "Canterville", "email": "c@sper.com", "password": "dummy", } request = self.factory.post( reverse("v1:user-list"), data=valid_data, REMOTE_ADDR='2.16.92.0' ) user_detail_view = UserViewSet.as_view({"post": "create"}) response = user_detail_view(request) self.assertEqual(status.HTTP_201_CREATED, response.status_code) new_users_count = User.objects.count() self.assertEqual(initial_users_count+1, new_users_count) def test_post_user_when_id_invalid_should_not_create_user_and_reply_400(self): initial_users_count = len(self.users) invalid_data = { "first_name": "Casper", "last_name": "Canterville", "email": "", "password": "dummy", } request = self.factory.post( reverse("v1:user-list"), data=invalid_data, REMOTE_ADDR='2.16.92.0' ) user_detail_view = UserViewSet.as_view({"post": "create"}) response = user_detail_view(request) self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code) users_count = User.objects.count() self.assertEqual(initial_users_count, users_count) def test_post_user_when_data_valid_but_email_already_used_should_not_create_user_and_reply_400(self): initial_users_count = len(self.users) valid_data_with_used_email = { "first_name": "Casper", "last_name": "Canterville", "email": "riri.duck@ricardo.ch", "password": "dummy", } request = self.factory.post( reverse("v1:user-list"), data=valid_data_with_used_email, REMOTE_ADDR='2.16.92.0' ) user_detail_view = UserViewSet.as_view({"post": "create"}) response = user_detail_view(request) self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code) new_users_count = User.objects.count() self.assertEqual(initial_users_count, new_users_count) def test_post_user_when_IP_not_in_Switzerland_should_not_create_user_and_reply_403(self): initial_users_count = len(self.users) valid_data = { "first_name": "Casper", "last_name": "Canterville", "email": "c@sper.com", "password": "dummy", } request = self.factory.post( reverse("v1:user-list"), data=valid_data, REMOTE_ADDR='2.16.8.0' # Spain ) user_detail_view = UserViewSet.as_view({"post": "create"}) response = user_detail_view(request) self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code) self.assertTrue(len(response.data['detail']) > 0) users_count = User.objects.count() self.assertEqual(initial_users_count, users_count) class UpdateSinglUserTest(UsersApiTestCase): """ Test PUT|PATCH /api/v1/user/:id """ def test_patch_user_when_id_valid_should_patch_user_and_reply_200(self): riri = User.objects.create( first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy") request = self.factory.patch( reverse("v1:user-detail", kwargs={"pk": riri.pk}), data={"email": "riri@ricardo.ch"} ) user_detail_view = UserViewSet.as_view({"patch": "partial_update"}) response = user_detail_view(request, pk=riri.pk) self.assertEqual(status.HTTP_200_OK, response.status_code) def test_patch_user_when_id_invalid_should_not_patch_user_and_reply_404(self): riri = User.objects.create( first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy") request = self.factory.patch( reverse("v1:user-detail", kwargs={"pk": 100}), data={"email": "riri@ricardo.ch"} ) user_detail_view = UserViewSet.as_view({"patch": "partial_update"}) response = user_detail_view(request, pk=100) self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code) def test_put_when_invalid_data_should_not_update_user_and_reply_400(self): riri = User.objects.create( first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy") invalid_payload = { "first_name": "", "last_name": "Duck", "email": "riri.duck@ricardo.ch" } request = self.factory.put( reverse("v1:user-detail", kwargs={"pk": riri.pk}), data=invalid_payload ) user_detail_view = UserViewSet.as_view({"put": "update"}) response = user_detail_view(request, pk=riri.pk) self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code) class DeleteSinglePuppyTest(UsersApiTestCase): """ Test DELETE /api/v1/user/:id """ def test_delete_user_when_id_valid_should_delete_user_and_reply_204(self): initial_users_count = len(self.users) user_to_delete = self.users[0] request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": user_to_delete.pk})) user_detail_view = UserViewSet.as_view({"delete": "destroy"}) response = user_detail_view(request, pk=user_to_delete.pk) self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code) new_users_count = User.objects.count() self.assertEqual(initial_users_count-1, new_users_count) def test_delete_user_when_id_invalid_should_reply_404(self): request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": 100})) user_detail_view = UserViewSet.as_view({"delete": "destroy"}) response = user_detail_view(request, pk=100) self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
users_django/users/tests/test_views.py
9,607
Test POST /api/v1/users Override 'REMOTE_ADDR' to set IP address to Switzerland or another country for testing purpose. Test DELETE /api/v1/user/:id Test GET /api/v1/users Test GET /api/v1/users/:id Test PUT|PATCH /api/v1/user/:id Factorize the tests setup to use a pool of existing users. GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK. GET /api/v1/users should return all the users (or empty if no users found) and return a successful status 200 OK. Spain
522
en
0.603275
# Credits to Ozan Sener # https://github.com/intel-isl/MultiObjectiveOptimization import numpy as np import torch class MGDASolver: MAX_ITER = 250 STOP_CRIT = 1e-5 @staticmethod def _min_norm_element_from2(v1v1, v1v2, v2v2): """ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2 d is the distance (objective) optimzed v1v1 = <x1,x1> v1v2 = <x1,x2> v2v2 = <x2,x2> """ if v1v2 >= v1v1: # Case: Fig 1, third column gamma = 0.999 cost = v1v1 return gamma, cost if v1v2 >= v2v2: # Case: Fig 1, first column gamma = 0.001 cost = v2v2 return gamma, cost # Case: Fig 1, second column gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2)) cost = v2v2 + gamma * (v1v2 - v2v2) return gamma, cost @staticmethod def _min_norm_2d(vecs: list, dps): """ Find the minimum norm solution as combination of two points This is correct only in 2D ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j """ dmin = 1e8 sol = 0 for i in range(len(vecs)): for j in range(i + 1, len(vecs)): if (i, j) not in dps: dps[(i, j)] = 0.0 for k in range(len(vecs[i])): dps[(i, j)] += torch.dot(vecs[i][k].view(-1), vecs[j][k].view(-1)).detach() dps[(j, i)] = dps[(i, j)] if (i, i) not in dps: dps[(i, i)] = 0.0 for k in range(len(vecs[i])): dps[(i, i)] += torch.dot(vecs[i][k].view(-1), vecs[i][k].view(-1)).detach() if (j, j) not in dps: dps[(j, j)] = 0.0 for k in range(len(vecs[i])): dps[(j, j)] += torch.dot(vecs[j][k].view(-1), vecs[j][k].view(-1)).detach() c, d = MGDASolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)]) if d < dmin: dmin = d sol = [(i, j), c, d] return sol, dps @staticmethod def _projection2simplex(y): """ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i """ m = len(y) sorted_y = np.flip(np.sort(y), axis=0) tmpsum = 0.0 tmax_f = (np.sum(y) - 1.0) / m for i in range(m - 1): tmpsum += sorted_y[i] tmax = (tmpsum - 1) / (i + 1.0) if tmax > sorted_y[i + 1]: tmax_f = tmax break return np.maximum(y - tmax_f, np.zeros(y.shape)) @staticmethod def _next_point(cur_val, grad, n): proj_grad = grad - (np.sum(grad) / n) tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0] tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0]) skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7) t = 1 if len(tm1[tm1 > 1e-7]) > 0: t = np.min(tm1[tm1 > 1e-7]) if len(tm2[tm2 > 1e-7]) > 0: t = min(t, np.min(tm2[tm2 > 1e-7])) next_point = proj_grad * t + cur_val next_point = MGDASolver._projection2simplex(next_point) return next_point @staticmethod def find_min_norm_element(vecs: list): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})Hence, we find the best 2-task solution , and then run the projected gradient descent until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MGDASolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MGDASolver.MAX_ITER: grad_dir = -1.0 * np.dot(grad_mat, sol_vec) new_point = MGDASolver._next_point(sol_vec, grad_dir, n) # Re-compute the inner products for line search v1v1 = 0.0 v1v2 = 0.0 v2v2 = 0.0 for i in range(n): for j in range(n): v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)] v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)] v2v2 += new_point[i] * new_point[j] * dps[(i, j)] nc, nd = MGDASolver._min_norm_element_from2(v1v1.item(), v1v2.item(), v2v2.item()) # try: new_sol_vec = nc * sol_vec + (1 - nc) * new_point # except AttributeError: # print(sol_vec) change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec @staticmethod def find_min_norm_element_FW(vecs): """ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence """ # Solution lying at the combination of two points dps = {} init_sol, dps = MGDASolver._min_norm_2d(vecs, dps) n = len(vecs) sol_vec = np.zeros(n) sol_vec[init_sol[0][0]] = init_sol[1] sol_vec[init_sol[0][1]] = 1 - init_sol[1] if n < 3: # This is optimal for n=2, so return the solution return sol_vec, init_sol[2] iter_count = 0 grad_mat = np.zeros((n, n)) for i in range(n): for j in range(n): grad_mat[i, j] = dps[(i, j)] while iter_count < MGDASolver.MAX_ITER: t_iter = np.argmin(np.dot(grad_mat, sol_vec)) v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec)) v1v2 = np.dot(sol_vec, grad_mat[:, t_iter]) v2v2 = grad_mat[t_iter, t_iter] nc, nd = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2) new_sol_vec = nc * sol_vec new_sol_vec[t_iter] += 1 - nc change = new_sol_vec - sol_vec if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT: return sol_vec, nd sol_vec = new_sol_vec @classmethod def get_scales(cls, grads, losses, normalization_type, tasks): scale = {} gn = gradient_normalizers(grads, losses, normalization_type) for t in tasks: for gr_i in range(len(grads[t])): grads[t][gr_i] = grads[t][gr_i] / (gn[t] + 1e-5) sol, min_norm = cls.find_min_norm_element([grads[t] for t in tasks]) for zi, t in enumerate(tasks): scale[t] = float(sol[zi]) return scale def gradient_normalizers(grads, losses, normalization_type): gn = {} if normalization_type == 'l2': for t in grads: gn[t] = torch.sqrt( torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()) elif normalization_type == 'loss': for t in grads: gn[t] = min(losses[t].mean(), 10.0) elif normalization_type == 'loss+': for t in grads: gn[t] = min(losses[t].mean() * torch.sqrt( torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()), 10) elif normalization_type == 'none' or normalization_type == 'eq': for t in grads: gn[t] = 1.0 else: raise ValueError('ERROR: Invalid Normalization Type') return gn
utils/min_norm_solvers.py
8,857
Find the minimum norm solution as combination of two points This is correct only in 2D ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2 d is the distance (objective) optimzed v1v1 = <x1,x1> v1v2 = <x1,x2> v2v2 = <x2,x2> Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})Hence, we find the best 2-task solution , and then run the projected gradient descent until convergence Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1. It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence Credits to Ozan Sener https://github.com/intel-isl/MultiObjectiveOptimization Case: Fig 1, third column Case: Fig 1, first column Case: Fig 1, second column Solution lying at the combination of two points This is optimal for n=2, so return the solution Re-compute the inner products for line search try: except AttributeError: print(sol_vec) Solution lying at the combination of two points This is optimal for n=2, so return the solution
1,617
en
0.816489
"""This module contains logic for refreshing materialized views. Materialized views don't get refreshed automatically after a bucardo initial sync. This module detects them and refreshes them. Classes exported: MatViews: Identify materialized views and refresh them on the secondary database. """ import psycopg2 from psycopg2 import sql from plugins import Plugin class MatViews(Plugin): """Identify materialized views and refresh them on the secondary database. Materialized views are identified based on the namespaces specified in the config. Methods exported: refresh: find and refresh materialized views """ def __init__(self, cfg): """Create configuration settings that may not already be set. The user can either define the relevant namespaces specifically for the mat_views plugin, or the mat_views plugin can draw on the settings in the bucardo section of the config. If neither exists, the script will throw an error. Keyword arguments: cfg: contents of the config file as a dictionary """ super(MatViews, self).__init__(cfg) # Override or inherit certain params from the parent, depending on the config. self._set_inheritable_params('mat_views') def refresh(self): """Refresh materialized views. First, this method finds the namespaces being replicated, by referring to the config for schemas and tables. Then it finds any materialized views in the namespaces. Then it refreshes the materialized views. """ print('Finding materialized views.') # 'm' is for "materialized view". views = self._find_objects('m', self.repl_objects) if views: conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format) for view in views: print(f'Refreshing {view[0]}.{view[1]}') query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format( schema=sql.Identifier(view[0]), table=sql.Identifier(view[1]) ) try: with conn.cursor() as cur: cur.execute(query) conn.commit() except Exception: conn.close() raise conn.close() print('Done refreshing views.') else: print('No materialized views found.')
plugins/mat_views/__init__.py
2,518
Identify materialized views and refresh them on the secondary database. Materialized views are identified based on the namespaces specified in the config. Methods exported: refresh: find and refresh materialized views Create configuration settings that may not already be set. The user can either define the relevant namespaces specifically for the mat_views plugin, or the mat_views plugin can draw on the settings in the bucardo section of the config. If neither exists, the script will throw an error. Keyword arguments: cfg: contents of the config file as a dictionary Refresh materialized views. First, this method finds the namespaces being replicated, by referring to the config for schemas and tables. Then it finds any materialized views in the namespaces. Then it refreshes the materialized views. This module contains logic for refreshing materialized views. Materialized views don't get refreshed automatically after a bucardo initial sync. This module detects them and refreshes them. Classes exported: MatViews: Identify materialized views and refresh them on the secondary database. Override or inherit certain params from the parent, depending on the config. 'm' is for "materialized view".
1,219
en
0.800008
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: list_translate_rule.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from monitor_sdk.model.monitor import translate_rule_pb2 as monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_translate_rule.proto', package='translate', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x19list_translate_rule.proto\x12\ttranslate\x1a.monitor_sdk/model/monitor/translate_rule.proto\";\n\x18ListTranslateRuleRequest\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\"\x8c\x01\n\x19ListTranslateRuleResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x0c\n\x04page\x18\x04 \x01(\x05\x12\x11\n\tpage_size\x18\x05 \x01(\x05\x12$\n\x04\x64\x61ta\x18\x06 \x03(\x0b\x32\x16.monitor.TransalteRule\"\x88\x01\n ListTranslateRuleResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.translate.ListTranslateRuleResponseb\x06proto3') , dependencies=[monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2.DESCRIPTOR,]) _LISTTRANSLATERULEREQUEST = _descriptor.Descriptor( name='ListTranslateRuleRequest', full_name='translate.ListTranslateRuleRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='translate.ListTranslateRuleRequest.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='translate.ListTranslateRuleRequest.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=88, serialized_end=147, ) _LISTTRANSLATERULERESPONSE = _descriptor.Descriptor( name='ListTranslateRuleResponse', full_name='translate.ListTranslateRuleResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='translate.ListTranslateRuleResponse.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='msg', full_name='translate.ListTranslateRuleResponse.msg', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='translate.ListTranslateRuleResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page', full_name='translate.ListTranslateRuleResponse.page', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='translate.ListTranslateRuleResponse.page_size', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='translate.ListTranslateRuleResponse.data', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=150, serialized_end=290, ) _LISTTRANSLATERULERESPONSEWRAPPER = _descriptor.Descriptor( name='ListTranslateRuleResponseWrapper', full_name='translate.ListTranslateRuleResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='translate.ListTranslateRuleResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='translate.ListTranslateRuleResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='translate.ListTranslateRuleResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='translate.ListTranslateRuleResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=293, serialized_end=429, ) _LISTTRANSLATERULERESPONSE.fields_by_name['data'].message_type = monitor__sdk_dot_model_dot_monitor_dot_translate__rule__pb2._TRANSALTERULE _LISTTRANSLATERULERESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTRANSLATERULERESPONSE DESCRIPTOR.message_types_by_name['ListTranslateRuleRequest'] = _LISTTRANSLATERULEREQUEST DESCRIPTOR.message_types_by_name['ListTranslateRuleResponse'] = _LISTTRANSLATERULERESPONSE DESCRIPTOR.message_types_by_name['ListTranslateRuleResponseWrapper'] = _LISTTRANSLATERULERESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListTranslateRuleRequest = _reflection.GeneratedProtocolMessageType('ListTranslateRuleRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTRANSLATERULEREQUEST, '__module__' : 'list_translate_rule_pb2' # @@protoc_insertion_point(class_scope:translate.ListTranslateRuleRequest) }) _sym_db.RegisterMessage(ListTranslateRuleRequest) ListTranslateRuleResponse = _reflection.GeneratedProtocolMessageType('ListTranslateRuleResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTRANSLATERULERESPONSE, '__module__' : 'list_translate_rule_pb2' # @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponse) }) _sym_db.RegisterMessage(ListTranslateRuleResponse) ListTranslateRuleResponseWrapper = _reflection.GeneratedProtocolMessageType('ListTranslateRuleResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTRANSLATERULERESPONSEWRAPPER, '__module__' : 'list_translate_rule_pb2' # @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponseWrapper) }) _sym_db.RegisterMessage(ListTranslateRuleResponseWrapper) # @@protoc_insertion_point(module_scope)
monitor_sdk/api/translate/list_translate_rule_pb2.py
9,182
-*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: list_translate_rule.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:translate.ListTranslateRuleRequest) @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponse) @@protoc_insertion_point(class_scope:translate.ListTranslateRuleResponseWrapper) @@protoc_insertion_point(module_scope)
413
en
0.324519
"""NAO robot class""" from .robot import Robot import torch class Pose_Assumption(Robot): def __init__(self, env_params): super(Pose_Assumption, self).__init__(env_params) env_params = self.ingest_params2(env_params) self.target = env_params["target error"] self.joints = env_params["joints to move"] self.target_angles = env_params["target angles"] self.default_pose = "LyingBack" self.penalty = 0 # State self.error = float('inf') # Initial state self.assume_pose(self.default_pose) self.set_stiffness() def ingest_params2(self, env_params): if "target error" not in env_params: env_params["target error"] = 0.1 if "joints to move" not in env_params: env_params["joints to move"] = ["HeadYaw", "HeadPitch", "RShoulderPitch","RShoulderRoll", "RElbowYaw", "RElbowRoll", "RWristYaw", "RHipYawPitch", "RHipRoll", "RHipPitch", "RKneePitch", "RAnklePitch", "RAnkleRoll", "LShoulderPitch","LShoulderRoll", "LElbowYaw", "LElbowRoll", "LWristYaw", "LHipYawPitch", "LHipRoll", "LHipPitch", "LKneePitch", "LAnklePitch", "LAnkleRoll" ] # NOTE: joints must be named individually if "target angles" not in env_params: env_params["target angles"] = [0.0, 0.153, 0.66, 0.914, 0.994, 0.721, 0.08432, -0.512, -0.04, -0.8299, 0.317, 0.288, -0.268, 0.99, 0.175, -1.234, -0.819, -1.286, -0.58287, 0.118, 0.2899, -0.09, 0.6, -0.046 ] return env_params def set_stiffness(self): time = 1.0 # Seconds value = 0.7 # Stiffness (max 1/min 0, higher is looser) self.motion.stiffnessInterpolation(self.joints, value, time) def step(self): """In this function the robot will return to default pose, to be ready for the new command. """ origin = [0.4] # Arbitrary input self.observation = torch.tensor(origin, dtype=self.precision, device = self.device) def evaluate(self, inference): """Evaluates the predicted pose.""" self.reset_state() values = self.process_inference(inference) self.apply(values) angles = self.get_joints() self.calc_error(angles) return self.error def reset_state(self): self.penalty = 0 self.error = float('inf') def process_inference(self, inference): """Ensures safety of the predicted angles.""" values = [a.item() for a in inference] for idx, value in enumerate(values): name = self.joints[idx] limits = self.motion.getLimits(name) min_angle = limits[0][0] max_angle = limits[0][1] max_vel = limits[0][2] # Unenforced max_tor = limits[0][3] # Unenforced value = self.cap_angle(value, min_angle, max_angle) values[idx] = [value] return values def apply(self, angles): """Applies the pose to the robot.""" self.set_joints(angles) def cap_angle(self, x, a, b): penalty = 10 # Safety penalty if x<a: self.penalty += penalty x = a elif x>b: self.penalty += penalty x = b return x def calc_error(self, angles): """Calculate the error between predicted and target angles, and add the safety penalties. """ errors = [abs(x-y) for x,y in zip(angles, self.target_angles)] error = sum(errors) error += self.penalty self.error = torch.tensor(error) #
environments/nao/pose_assumption.py
4,637
Applies the pose to the robot. Calculate the error between predicted and target angles, and add the safety penalties. Evaluates the predicted pose. Ensures safety of the predicted angles. In this function the robot will return to default pose, to be ready for the new command. NAO robot class State Initial state NOTE: joints must be named individually Seconds Stiffness (max 1/min 0, higher is looser) Arbitrary input Unenforced Unenforced Safety penalty
457
en
0.82033
__package__ = "blackhat.bin.installable" from ...helpers import Result from ...lib.input import ArgParser from ...lib.output import output from ...lib.ifaddrs import getifaddrs __COMMAND__ = "ifconfig" __DESCRIPTION__ = "" __DESCRIPTION_LONG__ = "" __VERSION__ = "1.2" def parse_args(args=[], doc=False): """ Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage """ parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}") parser.add_argument("--version", action="store_true", help=f"output version information and exit") args = parser.parse_args(args) arg_helps_with_dups = parser._actions arg_helps = [] [arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps] NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}" SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... " DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION__}\n\n" for item in arg_helps: # Its a positional argument if len(item.option_strings) == 0: # If the argument is optional: if item.nargs == "?": SYNOPSIS += f"[{item.dest.upper()}] " else: SYNOPSIS += f"{item.dest.upper()} " else: # Boolean flag if item.nargs == 0: if len(item.option_strings) == 1: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n" elif item.nargs == "+": DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n" if doc: return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n" else: return args, parser def main(args: list, pipe: bool) -> Result: """ # TODO: Add docstring for manpage """ args, parser = parse_args(args) if parser.error_message: if not args.version: return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False) if args.version: return output(f"ifconfig (blackhat netutils) {__VERSION__}", pipe) # If we specific -h/--help, args will be empty, so exit gracefully if not args: return output("", pipe) else: result = getifaddrs() return output(result.data.ifa_addr, pipe)
client/blackhat/bin/installable/ifconfig.py
2,858
# TODO: Add docstring for manpage Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage Its a positional argument If the argument is optional: Boolean flag If we specific -h/--help, args will be empty, so exit gracefully
495
en
0.413812
# -*- coding: utf-8 -*- import numpy as np import astropy.units as u import pkg_resources from astropy.io import ascii from astropy.modeling.tabular import tabular_model from .baseclasses import BaseAtttauVModel from .helpers import _test_valid_x_range __all__ = ["WG00"] x_range_WG00 = [0.1, 3.0001] class WG00(BaseAtttauVModel): r""" Attenuation curve of Witt & Gordon (2000) Parameters ---------- tau_v: float optical depth in V band Raises ------ InputParameterError Input Av values outside of defined range Notes ----- From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816) Example: .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt import astropy.units as u from dust_attenuation.radiative_transfer import WG00 fig, ax = plt.subplots(1,2, figsize=(10,6)) # generate the curves and plot them # Use 1/microns for a better sampling x = np.arange(0.35,10.0,0.1)/u.micron x_Vband = 0.55 # microns tau_Vs = [0.25,0.4,1.1,17.0,46.0] for tau_V in tau_Vs[::-1]: att_model = WG00(tau_V = tau_V, geometry = 'cloudy', dust_type = 'mw', dust_distribution = 'clumpy') ax[0].plot(x,att_model(1/x),label=r'$\tau_V$ = %.2f mag' % (tau_V)) ax[1].plot(x,att_model(1/x)/att_model(x_Vband), label=r'$\tau_V$ = %.2f mag' % (tau_V)) ax[0].set_xlabel(r'$x$ [$\mu m^{-1}$]') ax[0].set_ylabel(r'$Att(x)$ [mag]') ax[1].set_xlabel(r'$x$ [$\mu m^{-1}$]') ax[1].set_ylabel(r'$Att(x)/Att_V$') ax[0].legend(loc='best') ax[1].legend(loc='best') fig.suptitle(r'CLOUDY / MW / clumpy model',size=15) plt.tight_layout() fig.subplots_adjust(top=0.88) plt.show() """ tau_V_range = [0.25, 50.0] x_range = x_range_WG00 def __init__( self, tau_V, geometry="dusty", dust_type="mw", dust_distribution="clumpy" ): """ Load the attenuation curves for a given geometry, dust type and dust distribution. Parameters ---------- tau_V: float optical depth in V band geometry: string 'shell', 'cloudy' or 'dusty' dust_type: string 'mw' or 'smc' dust_distribution: string 'homogeneous' or 'clumpy' Returns ------- Attx: np array (float) Att(x) attenuation curve [mag] """ # Ensure strings are lower cases self.geometry = geometry.lower() self.dust_type = dust_type.lower() self.dust_distribution = dust_distribution.lower() data_path = pkg_resources.resource_filename("dust_attenuation", "data/WG00/") data = ascii.read(data_path + self.geometry + ".txt", header_start=0) if self.dust_type == "mw": start = 0 elif self.dust_type == "smc": start = 25 # Column names tau_colname = "tau" tau_att_colname = "tau_att" fsca_colname = "f(sca)" fdir_colname = "f(dir)" fesc_colname = "f(esc)" if self.dust_distribution == "clumpy": tau_att_colname += "_c" fsca_colname += "_c" fdir_colname += "_c" fesc_colname += "_c" elif self.dust_distribution == "homogeneous": tau_att_colname += "_h" fsca_colname += "_h" fdir_colname += "_h" fesc_colname += "_h" tau_att_list = [] tau_list = [] fsca_list = [] fdir_list = [] fesc_list = [] len_data = len(data["lambda"]) # number of lines between 2 models steps = 25 counter = start while counter < len_data: tau_att_list.append( np.array(data[tau_att_colname][counter : counter + steps]) ) tau_list.append(np.array(data[tau_colname][counter : counter + steps])) fsca_list.append(np.array(data[fsca_colname][counter : counter + steps])) fdir_list.append(np.array(data[fdir_colname][counter : counter + steps])) fesc_list.append(np.array(data[fesc_colname][counter : counter + steps])) counter += int(2 * steps) # Convert to np.array and take transpose to have (wvl, tau_V) tau_att_table = np.array(tau_att_list).T tau_table = np.array(tau_list).T fsca_table = np.array(fsca_list).T fdir_table = np.array(fdir_list).T fesc_table = np.array(fesc_list).T # wavelength grid. It is the same for all the models wvl = np.array(data["lambda"][0:25]) self.wvl_grid = wvl # Grid for the optical depth tau_V_grid = np.array( [ 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, ] ) # Create a 2D tabular model for tau_att and all flux fraction tab = tabular_model(2, name="2D_table") # Values corresponding to the x and y grid points gridpoints = (wvl, tau_V_grid) self.model = tab( gridpoints, lookup_table=tau_att_table, name="tau_att_WG00", bounds_error=False, fill_value=None, method="linear", ) self.tau = tab( gridpoints, lookup_table=tau_table, name="tau_WG00", bounds_error=False, fill_value=None, method="linear", ) self.fsca = tab( gridpoints, lookup_table=fsca_table, name="fsca_WG00", bounds_error=False, fill_value=None, method="linear", ) self.fdir = tab( gridpoints, lookup_table=fdir_table, name="fdir_WG00", bounds_error=False, fill_value=None, method="linear", ) self.fesc = tab( gridpoints, lookup_table=fesc_table, name="fesc_WG00", bounds_error=False, fill_value=None, method="linear", ) # In Python 2: super(WG00, self) # In Python 3: super() but super(WG00, self) still works super(WG00, self).__init__(tau_V=tau_V) def evaluate(self, x, tau_V): """ WG00 function Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- Attx: np array (float) Att(x) attenuation curve [mag] Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors n_x = len(x) xinterp = 1e4 * x yinterp = tau_V * np.ones(n_x) taux = self.model(xinterp, yinterp) # Convert optical depth to attenuation Attx = 1.086 * taux return Attx def get_extinction(self, x, tau_V): """ Return the extinction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- ext: np array (float) ext(x) extinction curve [mag] Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) n_x = len(x) xinterp = 1e4 * x yinterp = tau_V * np.ones(n_x) return self.tau(xinterp, yinterp) * 1.086 def get_fsca(self, x, tau_V): """ Return the scattered flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) n_x = len(x) xinterp = 1e4 * x yinterp = tau_V * np.ones(n_x) return self.fsca(xinterp, yinterp) def get_fdir(self, x, tau_V): """ Return the direct attenuated stellar flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) n_x = len(x) xinterp = 1e4 * x yinterp = tau_V * np.ones(n_x) return self.fdir(xinterp, yinterp) def get_fesc(self, x, tau_V): """ Return the total escaping flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) n_x = len(x) xinterp = 1e4 * x yinterp = tau_V * np.ones(n_x) return self.fesc(xinterp, yinterp) def get_albedo(self, x): """ Return the albedo in function of wavelength for the corresponding dust type (SMC or MW). The albedo gives the probability a photon is scattered from a dust grain. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used Returns ------- albedo: np array (float) alb(x) albedo Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) alb_MW = np.array( [ 0.320, 0.409, 0.481, 0.526, 0.542, 0.536, 0.503, 0.432, 0.371, 0.389, 0.437, 0.470, 0.486, 0.499, 0.506, 0.498, 0.502, 0.491, 0.481, 0.500, 0.473, 0.457, 0.448, 0.424, 0.400, ] ) alb_SMC = np.array( [ 0.400, 0.449, 0.473, 0.494, 0.508, 0.524, 0.529, 0.528, 0.523, 0.520, 0.516, 0.511, 0.505, 0.513, 0.515, 0.498, 0.494, 0.489, 0.484, 0.493, 0.475, 0.465, 0.439, 0.417, 0.400, ] ) if self.dust_type == "smc": albedo = alb_SMC elif self.dust_type == "mw": albedo = alb_MW tab = tabular_model(1, name="Tabular1D") alb_fit = tab( self.wvl_grid, lookup_table=albedo, name="albedo", bounds_error=False, fill_value=None, method="linear", ) xinterp = 1e4 * x return alb_fit(xinterp) def get_scattering_phase_function(self, x): """ Return the scattering phase function in function of wavelength for the corresponding dust type (SMC or MW). The scattering phase function gives the angle at which the photon scatters. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used Returns ------- g: np array (float) g(x) scattering phase function Raises ------ ValueError Input x values outside of defined range """ # convert to wavenumbers (1/micron) if x input in units # otherwise, assume x in appropriate wavenumber units with u.add_enabled_equivalencies(u.spectral()): x_quant = u.Quantity(x, u.micron, dtype=np.float64) # strip the quantity to avoid needing to add units to all the # polynomical coefficients x = x_quant.value # check that the wavenumbers are within the defined range _test_valid_x_range(x, self.x_range, "WG00") # setup the ax vectors x = np.atleast_1d(x) g_MW = np.array( [ 0.800, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.720, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.480, 0.445, 0.420, ] ) g_SMC = np.array( [ 0.800, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.720, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.480, 0.445, 0.420, ] ) if self.dust_type == "smc": g = g_SMC elif self.dust_type == "mw": g = g_MW tab = tabular_model(1, name="Tabular1D") g_fit = tab( self.wvl_grid, lookup_table=g, name="albedo", bounds_error=False, fill_value=None, method="linear", ) xinterp = 1e4 * x return g_fit(xinterp)
dust_attenuation/radiative_transfer.py
19,491
Attenuation curve of Witt & Gordon (2000) Parameters ---------- tau_v: float optical depth in V band Raises ------ InputParameterError Input Av values outside of defined range Notes ----- From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816) Example: .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt import astropy.units as u from dust_attenuation.radiative_transfer import WG00 fig, ax = plt.subplots(1,2, figsize=(10,6)) # generate the curves and plot them # Use 1/microns for a better sampling x = np.arange(0.35,10.0,0.1)/u.micron x_Vband = 0.55 # microns tau_Vs = [0.25,0.4,1.1,17.0,46.0] for tau_V in tau_Vs[::-1]: att_model = WG00(tau_V = tau_V, geometry = 'cloudy', dust_type = 'mw', dust_distribution = 'clumpy') ax[0].plot(x,att_model(1/x),label=r'$\tau_V$ = %.2f mag' % (tau_V)) ax[1].plot(x,att_model(1/x)/att_model(x_Vband), label=r'$\tau_V$ = %.2f mag' % (tau_V)) ax[0].set_xlabel(r'$x$ [$\mu m^{-1}$]') ax[0].set_ylabel(r'$Att(x)$ [mag]') ax[1].set_xlabel(r'$x$ [$\mu m^{-1}$]') ax[1].set_ylabel(r'$Att(x)/Att_V$') ax[0].legend(loc='best') ax[1].legend(loc='best') fig.suptitle(r'CLOUDY / MW / clumpy model',size=15) plt.tight_layout() fig.subplots_adjust(top=0.88) plt.show() Load the attenuation curves for a given geometry, dust type and dust distribution. Parameters ---------- tau_V: float optical depth in V band geometry: string 'shell', 'cloudy' or 'dusty' dust_type: string 'mw' or 'smc' dust_distribution: string 'homogeneous' or 'clumpy' Returns ------- Attx: np array (float) Att(x) attenuation curve [mag] WG00 function Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- Attx: np array (float) Att(x) attenuation curve [mag] Raises ------ ValueError Input x values outside of defined range Return the albedo in function of wavelength for the corresponding dust type (SMC or MW). The albedo gives the probability a photon is scattered from a dust grain. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used Returns ------- albedo: np array (float) alb(x) albedo Raises ------ ValueError Input x values outside of defined range Return the extinction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- ext: np array (float) ext(x) extinction curve [mag] Raises ------ ValueError Input x values outside of defined range Return the direct attenuated stellar flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range Return the total escaping flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range Return the scattered flux fraction at a given wavelength and V-band optical depth. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used tau_V: float optical depth in V band Returns ------- fsca: np array (float) fsca(x) scattered flux fraction Raises ------ ValueError Input x values outside of defined range Return the scattering phase function in function of wavelength for the corresponding dust type (SMC or MW). The scattering phase function gives the angle at which the photon scatters. Parameters ---------- x: float expects either x in units of wavelengths or frequency or assumes wavelengths in [micron] internally microns are used Returns ------- g: np array (float) g(x) scattering phase function Raises ------ ValueError Input x values outside of defined range -*- coding: utf-8 -*- Ensure strings are lower cases Column names number of lines between 2 models Convert to np.array and take transpose to have (wvl, tau_V) wavelength grid. It is the same for all the models Grid for the optical depth Create a 2D tabular model for tau_att and all flux fraction Values corresponding to the x and y grid points In Python 2: super(WG00, self) In Python 3: super() but super(WG00, self) still works convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors Convert optical depth to attenuation convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors convert to wavenumbers (1/micron) if x input in units otherwise, assume x in appropriate wavenumber units strip the quantity to avoid needing to add units to all the polynomical coefficients check that the wavenumbers are within the defined range setup the ax vectors
7,138
en
0.565069
""" Extract CLOS / NLOS lookup. Written by Ed Oughton. March 2021 """ import os import configparser import json import math import glob import random import numpy as np import pandas as pd import geopandas as gpd import pyproj from shapely.geometry import Point, Polygon, box, LineString from shapely.ops import transform import rasterio # import networkx as nx from rasterio.warp import calculate_default_transform, reproject, Resampling from rasterio.mask import mask from rasterstats import zonal_stats, gen_zonal_stats from tqdm import tqdm grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"' os.environ['GRASSBIN'] = grass7bin os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib" from grass_session import Session from grass.script import core as gcore CONFIG = configparser.ConfigParser() CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini")) BASE_PATH = CONFIG["file_locations"]["base_path"] DATA_RAW = os.path.join(BASE_PATH, "raw") DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate") DATA_PROCESSED = os.path.join(BASE_PATH, "processed") def load_raster_tile_lookup(iso3): """ Load in the preprocessed raster tile lookup. Parameters ---------- iso3 : string Country iso3 code. Returns ------- lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. """ path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv') data = pd.read_csv(path) data = data.to_records('dicts') lookup = {} for item in data: coords = (item['x1'], item['y1'], item['x2'], item['y2']) lookup[coords] = item['path'] return lookup def generate_grid(iso3, side_length): """ Generate a spatial grid for the chosen country. """ directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid') if not os.path.exists(directory): os.makedirs(directory) filename = 'grid_{}_{}_km.shp'.format(side_length, side_length) path_output = os.path.join(directory, filename) if os.path.exists(path_output): return filename = 'national_outline.shp' path = os.path.join(DATA_INTERMEDIATE, iso3, filename) country_outline = gpd.read_file(path, crs="epsg:4326") country_outline.crs = "epsg:4326" country_outline = country_outline.to_crs("epsg:3857") xmin, ymin, xmax, ymax = country_outline.total_bounds polygons = manually_create_grid( xmin, ymin, xmax, ymax, side_length, side_length ) grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100] intersection = gpd.overlay(grid, country_outline, how='intersection') intersection.crs = "epsg:3857" intersection['area_km2'] = intersection['geometry'].area / 1e6 intersection = intersection.to_crs("epsg:4326") intersection.to_file(path_output, crs="epsg:4326") return intersection def manually_create_grid(xmin, ymin, xmax, ymax, length, wide): """ """ cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide))) rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length))) polygons = [] for x in cols: for y in rows: polygons.append( Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)]) ) return polygons def find_tile(polygon, tile_lookup): """ Parameters ---------- polygon : tuple The bounds of the modeling region. tile_lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. Return ------ output : list Contains the file path to the correct raster tile. Note: only the first element is returned and if there are more than one paths, an error is returned. """ output = [] poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3]) for key, value in tile_lookup.items(): bbox = box(key[0], key[1], key[2], key[3]) if bbox.intersects(poly_bbox): output.append(value) if len(output) == 1: return output[0] elif len(output) > 1: print('Problem with find_tile returning more than 1 path') return output[0] else: print('Problem with find_tile: Unable to find raster path') def add_id_range_data_to_grid(iso3, tile_lookup, side_length): """ Query the Digital Elevation Model to get an estimated interdecile range for each grid square. """ directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid') filename = 'grid_final.shp' path_output = os.path.join(directory, filename) if os.path.exists(path_output): return gpd.read_file(path_output, crs='epsg:4328') filename = 'grid_{}_{}_km.shp'.format(side_length, side_length) path = os.path.join(directory, filename) grid = gpd.read_file(path, crs='epsg:4328') output = [] for idx, grid_tile in grid.iterrows(): path_input = find_tile( grid_tile['geometry'].bounds, tile_lookup ) stats = next(gen_zonal_stats( grid_tile['geometry'], path_input, add_stats={ 'interdecile_range': interdecile_range }, nodata=0 )) id_range_m = stats['interdecile_range'] output.append({ 'type': 'Feature', 'geometry': grid_tile['geometry'], 'properties': { 'id_range_m': id_range_m, 'area_km2': grid_tile['area_km2'], # 'pop_density_km2': grid_tile['pop_densit'], # 'population': grid_tile['population'], } }) output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326') output = output.replace([np.inf, -np.inf], np.nan) output = output[output.geometry.notnull()] output.to_file(path_output, crs="epsg:4326") return output def interdecile_range(x): """ Get range between bottom 10% and top 10% of values. This is from the Longley-Rice Irregular Terrain Model. Code here: https://github.com/edwardoughton/itmlogic Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf Parameters ---------- x : list Terrain profile values. Returns ------- interdecile_range : int The terrain irregularity parameter. """ q90, q10 = np.percentile(x, [90, 10]) interdecile_range = int(round(q90 - q10, 0)) return interdecile_range def estimate_terrain_deciles(grid): """ """ # terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index() terrain_lookup = grid terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False) terrain_lookup = terrain_lookup[['decile', 'id_range_m']] terrain_lookup = terrain_lookup.groupby(['decile']).min() terrain_lookup = terrain_lookup['id_range_m'].to_list() return terrain_lookup def select_grid_sampling_areas(iso3, grid, lut): """ """ for i in range(1, 11): if i == 1: grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i) value_name = '0-{}'.format(str(lut[1])) grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name elif i <= 9: grid.loc[( grid['id_range_m'] >= lut[i-1]) & (grid['id_range_m'] <= lut[i]), 'decile'] = str(i) value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i])) grid.loc[( grid['id_range_m'] >= lut[i-1]) & (grid['id_range_m'] <= lut[i]), 'value'] = value_name elif i == 10: grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i) value_name = '>{}'.format(str(lut[i-1])) grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name else: continue np.random.seed(2) grid = grid.loc[grid['area_km2'] > 2400].reset_index() sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True) directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area') if not os.path.exists(directory): os.makedirs(directory) sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp')) sampling_areas.crs = 'epsg:4326' return sampling_areas def get_points(iso3, sampling_areas, tile_lookup, point_spacing): """ """ directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points') if not os.path.exists(directory): os.makedirs(directory) sampling_areas = sampling_areas.to_crs("epsg:3857") for idx, sampling_area in sampling_areas.iterrows(): lon = sampling_area['geometry'].representative_point().coords[0][0] lat = sampling_area['geometry'].representative_point().coords[0][1] filename = "{}-{}".format(lon, lat) xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing) #make geopandas dataframes grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857") boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']}, crs="epsg:3857", index=[0]) #only get points within the tile boundary grid_sample = gpd.overlay(grid_sample, boundary, how='intersection') grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat ##get the highest points in each grid sample tile sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1] ##convert to projected for viewsheding sampling_points = sampling_points.to_crs("epsg:4326") path_output = os.path.join(directory, filename + '.shp') sampling_points.to_file(path_output) return sampling_points def find_points(iso3, grid_sample, tile_lookup, filename): """ """ filename_2 = filename + '.shp' directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points') path_output = os.path.join(directory, filename_2) if os.path.exists(path_output): return gpd.read_file(path_output, crs='epsg:4326') output = [] for idx, grid_tile in grid_sample.iterrows(): min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y)) output.append({ 'type': 'Feature', 'geometry': geom, 'properties': { } }) output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326') return output def generate_viewsheds(iso3, sampling_areas, sampling_points): """ """ sampling_areas = sampling_areas.to_crs("epsg:3857") #set output folder folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds') if not os.path.exists(folder_out_viewsheds): os.makedirs(folder_out_viewsheds) for idx, sampling_area in tqdm(sampling_areas.iterrows(), total=sampling_areas.shape[0]): output = [] lon = sampling_area['geometry'].representative_point().coords[0][0] lat = sampling_area['geometry'].representative_point().coords[0][1] area_filename = "{}-{}".format(lon, lat) print('--Working on {}'.format(area_filename)) ##load sampling points directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points') points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2] ##convert to lon lat to get correct raster tile sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']}, crs="epsg:3857", index=[0]) sampling_area_df = sampling_area_df.to_crs("epsg:4326") for idx, item in sampling_area_df.iterrows(): #needs a loop because the data structure needs a series path_input = find_tile(item['geometry'].bounds, tile_lookup) for idx, point in tqdm(points.iterrows(), total=points.shape[0]): results = [] lon = point['geometry'].representative_point().coords[0][0] lat = point['geometry'].representative_point().coords[0][1] filename2 = "{}-{}".format(lon, lat) path_output = os.path.join(folder_out_viewsheds, filename2) file_path = os.path.join(path_output, 'location', 'PERMANENT', 'viewsheds', filename2 + '.tif') x = point['geometry'].coords[0][0] y = point['geometry'].coords[0][1] if not os.path.exists(file_path): try: viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326') except: print('--Viewshed already exists') seen = set() for idx, node in tqdm(points.iterrows(), total=points.shape[0]): x2 = node['geometry'].coords[0][0] y2 = node['geometry'].coords[0][1] link = '{}_{}_{}_{}'.format(x, y, x2, y2) if link in seen: continue dist = find_distance((x, y), (x2, y2)) if dist < 10: continue los = check_los(file_path, (x2, y2)) results.append({ 'sampling_area': area_filename, 'point_id': filename2, 'node_id': '{}_{}'.format(x2, y2), 'distance': dist, 'id_range_m': sampling_area['id_range_m'], 'decile': sampling_area['decile'], 'los': los, }) seen.add('{}_{}_{}_{}'.format(x, y, x2, y2)) seen.add('{}_{}_{}_{}'.format(x2, y2, x, y)) output = output + results output = pd.DataFrame(output) folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results') if not os.path.exists(folder): os.makedirs(folder) output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False) def viewshed(point, path_input, path_output, tile_name, max_distance, crs): """ Perform a viewshed using GRASS. Parameters --------- point : tuple The point being queried. tile_lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. path_output : string The directory path for the output folder. tile_name : string The name allocated to the viewshed tile. max_distance : int The maximum distance a path can be. crs : string The coordinate reference system in use. Returns ------- grid : dataframe A geopandas dataframe containing the created grid. """ with Session(gisdb=path_output, location="location", create_opts=crs): # print('parse command') # print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3" # print('r.external') # now link a GDAL supported raster file to a binary raster map layer, # from any GDAL supported raster map format, with an optional title. # The file is not imported but just registered as GRASS raster map. gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True) # print('r.external.out') #write out as geotiff gcore.run_command('r.external.out', directory='viewsheds', format="GTiff") # print('r.region') #manage the settings of the current geographic region gcore.run_command('g.region', raster=tile_name) # print('r.viewshed') #for each point in the output that is NULL: No LOS gcore.run_command('r.viewshed', #flags='e', input=tile_name, output='{}.tif'.format(tile_name), coordinate= [point[0], point[1]], observer_elevation=30, target_elevation=30, memory=5000, overwrite=True, quiet=True, max_distance=max_distance, # verbose=True ) def check_los(path_input, point): """ Find potential LOS high points. Parameters ---------- path_input : string File path for the digital elevation raster tile. point : tuple Coordinate point being queried. Returns ------- los : string The Line of Sight (los) of the path queried. """ with rasterio.open(path_input) as src: x = point[0] y = point[1] for val in src.sample([(x, y)]): if np.isnan(val): # print('is nan: {} therefore nlos'.format(val)) los = 'nlos' return los else: # print('is not nan: {} therefore los'.format(val)) los ='clos' return los def find_distance(point1, point2): """ """ point1 = Point(point1) point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0]) point1 = point1.set_crs('epsg:4326') point1 = point1.to_crs('epsg:3857') point2 = Point(point2) point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0]) point2 = point2.set_crs('epsg:4326') point2 = point2.to_crs('epsg:3857') dist = LineString([ (point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]), (point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1]) ]).length return dist def collect_results(iso3, sampling_areas): """ """ sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1] output = [] #set output folder for idx, sampling_area in sampling_areas.iterrows(): lon = sampling_area['geometry'].representative_point().coords[0][0] lat = sampling_area['geometry'].representative_point().coords[0][1] filename = "{}-{}".format(lon, lat) directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results') data = pd.read_csv(os.path.join(directory, filename + '.csv')) seen = set() interval_size = 2500 for distance_lower in range(0, 45000, interval_size): distance_upper = distance_lower + interval_size clos = 0 nlos = 0 for idx, item in data.iterrows(): path_id = '{}_{}_{}'.format( item['point_id'], item['node_id'], item['distance'] ) if not path_id in seen: if item['distance'] < distance_upper: if item['los'] == 'clos': clos += 1 elif item['los'] == 'nlos': nlos += 1 else: print('Did not recognize los') seen.add(path_id) if clos > 0: clos_probability = (clos / (clos + nlos)) else: clos_probability = 'no data' if nlos > 0: nlos_probability = (nlos / (clos + nlos)) else: nlos_probability = 'no data' output.append({ 'decile': item['decile'], 'id_range_m': item['id_range_m'], 'distance_lower': distance_lower, 'distance_upper': distance_upper, 'total_samples': clos + nlos, 'clos_probability': clos_probability, 'nlos_probability': nlos_probability, }) output = pd.DataFrame(output) folder = os.path.join(DATA_INTERMEDIATE, iso3) output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False) if __name__ == "__main__": countries = [ ("PER", 5e4, 25e2), ("IDN", 5e4, 25e2), ] for country in countries: iso3 = country[0] side_length = country[1] point_spacing = country[2] ##Load the raster tile lookup tile_lookup = load_raster_tile_lookup(iso3) ##Generate grids generate_grid(iso3, side_length) #1e5 # ##Add interdecile range to grid grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length) ##Get the terrain deciles terrain_values = estimate_terrain_deciles(grid) ##Get the grid tile samples sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1] ##Generate the terrain lookup sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1] ##Process viewsheds generate_viewsheds(iso3, sampling_areas, sampling_points) ## Collect results collect_results(iso3, sampling_areas)
scripts/los.py
21,121
Query the Digital Elevation Model to get an estimated interdecile range for each grid square. Find potential LOS high points. Parameters ---------- path_input : string File path for the digital elevation raster tile. point : tuple Coordinate point being queried. Returns ------- los : string The Line of Sight (los) of the path queried. Parameters ---------- polygon : tuple The bounds of the modeling region. tile_lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. Return ------ output : list Contains the file path to the correct raster tile. Note: only the first element is returned and if there are more than one paths, an error is returned. Generate a spatial grid for the chosen country. Get range between bottom 10% and top 10% of values. This is from the Longley-Rice Irregular Terrain Model. Code here: https://github.com/edwardoughton/itmlogic Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf Parameters ---------- x : list Terrain profile values. Returns ------- interdecile_range : int The terrain irregularity parameter. Load in the preprocessed raster tile lookup. Parameters ---------- iso3 : string Country iso3 code. Returns ------- lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. Perform a viewshed using GRASS. Parameters --------- point : tuple The point being queried. tile_lookup : dict A lookup table containing raster tile boundary coordinates as the keys, and the file paths as the values. path_output : string The directory path for the output folder. tile_name : string The name allocated to the viewshed tile. max_distance : int The maximum distance a path can be. crs : string The coordinate reference system in use. Returns ------- grid : dataframe A geopandas dataframe containing the created grid. Extract CLOS / NLOS lookup. Written by Ed Oughton. March 2021 import networkx as nx[:100] 'pop_density_km2': grid_tile['pop_densit'], 'population': grid_tile['population'], terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()make geopandas dataframesonly get points within the tile boundaryconvert to lon latget the highest points in each grid sample tile[:1]convert to projected for viewshedingset output folderload sampling points[:2]convert to lon lat to get correct raster tileneeds a loop because the data structure needs a series print('parse command') print(gcore.parse_command("g.gisenv", flags="s")), set="DEBUG=3" print('r.external') now link a GDAL supported raster file to a binary raster map layer, from any GDAL supported raster map format, with an optional title. The file is not imported but just registered as GRASS raster map. print('r.external.out')write out as geotiff print('r.region')manage the settings of the current geographic region print('r.viewshed')for each point in the output that is NULL: No LOSflags='e', verbose=True print('is nan: {} therefore nlos'.format(val)) print('is not nan: {} therefore los'.format(val))[:1]set output folderLoad the raster tile lookupGenerate grids1e5 Add interdecile range to gridGet the terrain decilesGet the grid tile samples[:1]Generate the terrain lookup[:1]Process viewsheds Collect results
3,401
en
0.646248
# Generated by Django 3.1.4 on 2021-01-03 18:02 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('home', '0001_initial'), ] operations = [ migrations.CreateModel( name='Attribute', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200, null=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='Training', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200, null=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ('result', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.lab')), ], ), migrations.CreateModel( name='TrainingValue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.IntegerField(null=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ('attribute_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.attribute')), ('training_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.training')), ], ), ]
home/migrations/0002_attribute_training_trainingvalue.py
1,732
Generated by Django 3.1.4 on 2021-01-03 18:02
45
en
0.658968
# This script is used to parse BOOST special function test data into something # we can easily import in numpy. import re import os # Where to put the data (directory will be created) DATA_DIR = 'scipy/special/tests/data/boost' # Where to pull out boost data BOOST_SRC = "boostmath/test" CXX_COMMENT = re.compile(r'^\s+//') DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_') ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?') HEADER_REGEX = re.compile( r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)') IGNORE_PATTERNS = [ # Makes use of ldexp and casts "hypergeometric_1F1_big_double_limited.ipp", "hypergeometric_1F1_big_unsolved.ipp", # Makes use of numeric_limits and ternary operator "beta_small_data.ipp", # Doesn't contain any data "almost_equal.ipp", # Derivatives functions don't exist "bessel_y01_prime_data.ipp", "bessel_yn_prime_data.ipp", "sph_bessel_prime_data.ipp", "sph_neumann_prime_data.ipp", # Data files not needed by scipy special tests. "ibeta_derivative_", r"ellint_r[cdfjg]_[^d]", r"ellint_d2?_", "jacobi_", "heuman_lambda_", "hypergeometric_", "nct_", r".*gammap1m1_", "trig_", "powm1_data.ipp", ] def _raw_data(line): items = line.split(',') l = [] for item in items: m = ITEM_REGEX.search(item) if m: q = m.group(0) l.append(q) return l def parse_ipp_file(filename): print(filename) a = open(filename, 'r') lines = a.readlines() data = {} i = 0 while (i < len(lines)): line = lines[i] m = HEADER_REGEX.search(line) if m: d = int(m.group(1)) n = int(m.group(2)) print(f"d = {d}, n = {n}") cdata = [] i += 1 line = lines[i] # Skip comments while CXX_COMMENT.match(line): i += 1 line = lines[i] while DATA_REGEX.match(line): cdata.append(_raw_data(line)) i += 1 line = lines[i] # Skip comments while CXX_COMMENT.match(line): i += 1 line = lines[i] if not len(cdata) == n: raise ValueError("parsed data: %d, expected %d" % (len(cdata), n)) data[m.group(3)] = cdata else: i += 1 return data def dump_dataset(filename, data): fid = open(filename, 'w') try: for line in data: fid.write(f"{' '.join(line)}\n") finally: fid.close() def dump_datasets(filename): base, ext = os.path.splitext(os.path.basename(filename)) base += f'_{ext[1:]}' datadir = os.path.join(DATA_DIR, base) os.makedirs(datadir) datasets = parse_ipp_file(filename) for k, d in datasets.items(): print(k, len(d)) dfilename = os.path.join(datadir, k) + '.txt' dump_dataset(dfilename, d) if __name__ == '__main__': for filename in sorted(os.listdir(BOOST_SRC)): # Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp) if filename.endswith(".ipp"): if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS): continue path = os.path.join(BOOST_SRC, filename) print(f"================= {path} ===============") dump_datasets(path)
scipy/special/utils/convert.py
3,467
This script is used to parse BOOST special function test data into something we can easily import in numpy. Where to put the data (directory will be created) Where to pull out boost data Makes use of ldexp and casts Makes use of numeric_limits and ternary operator Doesn't contain any data Derivatives functions don't exist Data files not needed by scipy special tests. Skip comments Skip comments Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
459
en
0.868502
import json import os from time import sleep import requests import pyrominfo.pyrominfo.snes as snes from shutil import copy from pyrominfo.pyrominfo import nintendo64 def n64_info(filename): n64_parser = nintendo64.Nintendo64Parser() props = n64_parser.parse(filename) return props def snes_info(filename): snes_parser = snes.SNESParser() props = snes_parser.parse(filename) return props def get_console(argument): switcher = { 'sfc': 'SNES', 'smc': 'SNES', 'md': '', 'bin': '', 'gb': 'GB', 'gbc': 'GBC', 'nes': 'NES', 'z64': 'N64', } return switcher.get(argument) def giant_bomb_request(title, api_key): headers = {'User-Agent': 'gripper'} params = { 'resources': 'game', 'query': title, 'api_key': api_key, 'format': 'json' } response = requests.get(url='http://www.giantbomb.com/api/search/', headers=headers, params=params) return json.loads(response.text) def rip_game(): while True: path = '/RETRODE' api_key = os.environ['api-key'] files = os.listdir(path) files.remove('RETRODE.CFG') breakout = False console = get_console(files[0].split('.')[-1]) filename = f'{path}/{files[0]}' if console == 'N64': rom_info = n64_info(filename) if console == 'SNES': rom_info = snes_info(filename) title = rom_info["title"] search_results = giant_bomb_request(title, api_key) for results in search_results['results']: if breakout is True: break aliases = str(results.get('aliases')).lower().splitlines() if title.lower() in aliases or title.lower() == results['name']: for platform in results['platforms']: if platform['abbreviation'] == 'SNES': if not os.path.exists(f'./{title}'): os.mkdir(f'./{title} - {rom_info["region"]}') for file in files: destination_file = f'./{title} - {rom_info["region"]}/{title}.{file.split(".")[-1]}' if not os.path.exists(destination_file): copy(filename, destination_file) breakout = True break sleep(5) #dont run code while testing container if __name__ == '__main__': sleep(900) #rip_game()
main.py
2,598
dont run code while testing containerrip_game()
47
en
0.298762
import numpy as np import os import time import argparse import PruneAndSearch as algs def get_args(): parser = argparse.ArgumentParser ( prog='PruneAndSearch', description='Implementation of the Prune and Search Algorithm. ', usage='python main.py { --rand RAND | --file FILE | --list LIST | --test [--trial TRIAL] [--vals VALS] [--verb] } [--seed SEED]' ) parser.add_argument('-n', '--small', default=None, type=int, help='The N-th smallest element to find in the values. (default: {})'.format('MEDIAN')) parser.add_argument('-r', '--rand', default=None, type=int, help='Generate N random numbers in range 1 - 10,000. (default: {})'.format('DISABLED')) parser.add_argument('-f', '--file', default=None, help='Read in a list from a text file. (default: {})'.format('DISABLED')) parser.add_argument('-l', '--list', default=None, type=int, nargs='+', help='Provide input as a list from the command line. (default: {})'.format('DISABLED')) parser.add_argument('-x', '--seed', default=123, type=int, help='Seed for Numpy RNG. (default: {})'.format(123)) parser.add_argument('-t', '--test', default=False, action='store_true', help='Perform a timed test, random trials T times. (default: {})'.format('DISABLED')) parser.add_argument('-T', '--trial', default=1000, type=int, help='Number of timed trials to conduct. (default: {})'.format(1000)) parser.add_argument('-v', '--vals', default=100, type=int, help='Number of random values to during testing. (default: {})'.format(100)) parser.add_argument('-V', '--verb', default=False, action='store_true', help='Verbose output. (default: {})'.format('DISABLED')) args = parser.parse_args() count = 0 if args.rand != None: count += 1 if args.file != None: count += 1 if args.list != None: count += 1 if args.test: count += 1 if count > 1: print("\n[ERROR] Too many arguments provided!!\n") if count == 0: print("\n[ERROR] No arguments provided!!\n") if count != 1: parser.print_help() print("\n Please provide the program with an argument using one of the following:\n") print("\t python main.py --rand 20") print("\t python main.py --file a.data") print("\t python main.py --list 1 2 3 4 5 6 7 8") print("\t python main.py --test --trial 300 --vals 100 --verb --seed 123") print(" ") exit() return args def get_list(args): # Simple getter function to get some list # based on the arguments passed in. if args.rand != None: values = np.random.randint(1, 10000, size=args.rand) print("Generated {} random values between 1 - 10,000.".format(args.rand)) return values if args.file != None: if not os.path.exists(args.file): print("[ERROR] File ``{}`` does not exist!!".format(args.file)) print("\t Please provide the path to a file.") exit() values = np.loadtxt(args.file, dtype=np.int32) return values if args.list != None: values = np.asarray(args.list, dtype=np.int32) return values def test_algorithm(seed, numTrials=1000, numVals=100, maxVal=10000, verbose=True): # Run a series of trials on both algorithms. numVals = int(numVals) # 1e6 maxVal = int(maxVal) # 1e10 if verbose: print("\n") print(" -- Prune and Search Algorithm -- ") print(" ================================ ") print(" Random Numbers Seed = {} ".format(seed) ) print(" Number of Trials = {} ".format(numTrials)) print(" Number of Values in List = {} ".format(numVals) ) print(" Maximum Value in List = {} ".format(maxVal) ) print("\n") # Seed The first trial for consistency. np.random.seed( seed ) # Keep a buffer of the returned finds for later comparison. SortAndSearchAnsBuffer = [] SortAndSearchTimeBuffer = [] # Begin the trials! print("Beginning {} Trial on {} elements for Sort And Search . . . ".format(numTrials, numVals), end='', flush=True) for _ in range(numTrials): randomList = np.random.randint(maxVal, size=numVals) findVal = np.random.randint(1, numVals+1) startTime = time.time() ansVal = algs.SortAndSearch(randomList, findVal) endTime = time.time() SortAndSearchAnsBuffer.append(ansVal) SortAndSearchTimeBuffer.append( endTime - startTime ) print("\u0394 : {:.4f}, \u03bc : {:.6f} \u00B1 {:.6f} ".format( np.sum( SortAndSearchTimeBuffer ), np.mean( SortAndSearchTimeBuffer ), np.std( SortAndSearchTimeBuffer ) )) # Seed The first trial for consistency. np.random.seed( seed ) # Keep a buffer of the returned finds for later comparison. PruneAndSearchAnsBuffer = [] PruneAndSearchTimeBuffer = [] # Begin the trials! print("Beginning {} Trial on {} elements for Prune And Search . . . ".format(numTrials, numVals), end='', flush=True) for _ in range(numTrials): randomList = np.random.randint(maxVal, size=numVals) findVal = np.random.randint(1, numVals+1) startTime = time.time() ansVal = algs.PruneAndSearch(randomList, findVal) endTime = time.time() PruneAndSearchAnsBuffer.append(ansVal) PruneAndSearchTimeBuffer.append( endTime - startTime ) print("\u0394 : {:.4f}, \u03bc : {:.6f} \u00B1 {:.6f} ".format( np.sum( PruneAndSearchTimeBuffer ), np.mean( PruneAndSearchTimeBuffer ), np.std( PruneAndSearchTimeBuffer ) )) #for a,b in zip(SortAndSearchAnsBuffer, PruneAndSearchAnsBuffer): # print(a, b, " " if a == b else "\t!!X!!") print("\nDid the Algorithms find the same solutions? ==> {}\n".format(PruneAndSearchAnsBuffer == SortAndSearchAnsBuffer)) return def main(): # Fetch Arguments. args = get_args() # Seed the RNG. np.random.seed(args.seed) # Perform a timed trial and return. if args.test: test_algorithm(args.seed, numTrials=args.trial, numVals=args.vals, verbose=args.verb) return # From the args get the list. values = get_list(args) # Sent the n-value to find, median if small was not set. findVal = args.small if args.small != None else len(values) // 2 print("\n") print(" -- Prune and Search Algorithm -- ") print(" ================================ ") print(" Find The {}-Smallest Value ".format(findVal)) print(" In The List = ") elPerRow = 5 for idx in range(0, len(values), elPerRow): print(" ", *values[ idx : idx+elPerRow ]) print("\n") # Naive solution in O( n log n ). print("Beginning Sort And Search . . . ", end='', flush=True) startTime = time.time() ansVal_A = algs.SortAndSearch(values, findVal) endTime = time.time() print("\u0394 : {:.6f}".format( endTime - startTime )) print("Beginning Prune And Search . . . ", end='', flush=True) startTime = time.time() ansVal_B = algs.PruneAndSearch(values, findVal) endTime = time.time() print("\u0394 : {:.6f}".format( endTime - startTime )) print("\nDid the Algorithms find the same solutions? ==> {}\n".format(ansVal_A == ansVal_B)) print("The {}-Smallest Value is {}".format(findVal, ansVal_A)) return if __name__ == '__main__': main()
main.py
7,915
Simple getter function to get some list based on the arguments passed in. Run a series of trials on both algorithms. 1e6 1e10 Seed The first trial for consistency. Keep a buffer of the returned finds for later comparison. Begin the trials! Seed The first trial for consistency. Keep a buffer of the returned finds for later comparison. Begin the trials!for a,b in zip(SortAndSearchAnsBuffer, PruneAndSearchAnsBuffer): print(a, b, " " if a == b else "\t!!X!!") Fetch Arguments. Seed the RNG. Perform a timed trial and return. From the args get the list. Sent the n-value to find, median if small was not set. Naive solution in O( n log n ).
642
en
0.878793
# Copyright 2017 Square, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pylink.jlock as jlock import mock import errno import os import unittest class TestJLock(unittest.TestCase): """Tests the ``jlock`` submodule.""" def setUp(self): """Called before each test. Performs setup. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` """ assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None) self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp) def tearDown(self): """Called after each test. Performs teardown. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` """ pass @mock.patch('tempfile.tempdir', new='tmp') def test_jlock_init_and_delete(self): """Tests initialization and deleting a ``JLock``. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` """ serial_no = 0xdeadbeef lock = jlock.JLock(serial_no) lock.release = mock.Mock() del lock @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.close') @mock.patch('os.path.exists') @mock.patch('os.open') @mock.patch('os.write') @mock.patch('os.remove') @mock.patch('pylink.jlock.psutil') @mock.patch('pylink.jlock.open') def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close): """Tests trying to acquire when the lock exists for an active process. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` """ pid = 42 serial_no = 0xdeadbeef mock_open.side_effect = [ mock.mock_open(read_data='%s\n' % pid).return_value, ] mock_exists.side_effect = [True, True] mock_util.pid_exists.return_value = True mock_op.side_effect = [OSError(errno.EEXIST, '')] lock = jlock.JLock(serial_no) lock.release = mock.Mock() self.assertFalse(lock.acquired) self.assertFalse(lock.acquire()) self.assertFalse(lock.acquired) mock_open.assert_called_once() mock_util.pid_exists.assert_called_with(pid) mock_op.assert_called_once() mock_rm.assert_not_called() mock_wr.assert_not_called() @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.close') @mock.patch('os.path.exists') @mock.patch('os.open') @mock.patch('os.write') @mock.patch('os.remove') @mock.patch('pylink.jlock.psutil') @mock.patch('pylink.jlock.open') def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close): """Tests trying to acquire the lock but generating an os-level error. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` """ serial_no = 0xdeadbeef mock_exists.side_effect = [False, False] mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')] lock = jlock.JLock(serial_no) lock.release = mock.Mock() self.assertFalse(lock.acquired) with self.assertRaisesRegexp(OSError, 'Message'): lock.acquire() self.assertFalse(lock.acquired) mock_open.assert_not_called() mock_util.pid_exists.assert_not_called() mock_op.assert_called_once() mock_rm.assert_not_called() mock_wr.assert_not_called() @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.close') @mock.patch('os.path.exists') @mock.patch('os.open') @mock.patch('os.write') @mock.patch('os.remove') @mock.patch('pylink.jlock.psutil') @mock.patch('pylink.jlock.open') def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close): """Tests acquiring the lockfile when the current lockfile is invallid. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` """ pid = 42 fd = 1 serial_no = 0xdeadbeef mock_open.side_effect = [ IOError() ] mock_exists.return_value = True mock_op.return_value = fd lock = jlock.JLock(serial_no) lock.release = mock.Mock() self.assertFalse(lock.acquired) self.assertTrue(lock.acquire()) self.assertTrue(lock.acquired) mock_exists.assert_called_once() mock_open.assert_called_once() mock_util.pid_exists.assert_not_called() mock_rm.assert_not_called() mock_op.assert_called_once() mock_wr.assert_called_once() @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.close') @mock.patch('os.path.exists') @mock.patch('os.open') @mock.patch('os.write') @mock.patch('os.remove') @mock.patch('pylink.jlock.psutil') @mock.patch('pylink.jlock.open') def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close): """Tests acquiring the lockfile when the pid in the lockfile is invalid. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` """ fd = 1 serial_no = 0xdeadbeef mock_open.side_effect = [ mock.mock_open(read_data='dog\n').return_value, ] mock_op.return_value = fd lock = jlock.JLock(serial_no) lock.release = mock.Mock() self.assertFalse(lock.acquired) self.assertTrue(lock.acquire()) self.assertTrue(lock.acquired) mock_exists.assert_called_once() mock_open.assert_called_once() mock_util.pid_exists.assert_not_called() mock_rm.assert_called_once() mock_op.assert_called_once() mock_wr.assert_called_once() @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.close') @mock.patch('os.path.exists') @mock.patch('os.open') @mock.patch('os.write') @mock.patch('os.remove') @mock.patch('pylink.jlock.psutil') @mock.patch('pylink.jlock.open') def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close): """Tests acquiring when the PID in the lockfile does not exist. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` """ fd = 1 serial_no = 0xdeadbeef mock_open.side_effect = [ mock.mock_open(read_data='42\n').return_value, ] mock_op.return_value = fd mock_util.pid_exists.return_value = False lock = jlock.JLock(serial_no) lock.release = mock.Mock() self.assertFalse(lock.acquired) self.assertTrue(lock.acquire()) self.assertTrue(lock.acquired) mock_exists.assert_called_once() mock_open.assert_called_once() mock_util.pid_exists.assert_called_once_with(42) mock_rm.assert_called() mock_op.assert_called_once() mock_wr.assert_called_once() @mock.patch('tempfile.tempdir', new='tmp') @mock.patch('os.path.exists') @mock.patch('os.close') @mock.patch('os.remove') def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists): """Tests releasing a held lock. Args: self (TestJLock): the ``TestJLock`` instance mock_remove (Mock): mock file removal method mock_close (Mock): mocked close method mock_exists (Mock): mocked path exist method Returns: ``None`` """ lock = jlock.JLock(0xdeadbeef) lock.acquired = True lock.fd = 1 lock.path = os.sep self.assertTrue(lock.release()) mock_exists.return_value = True mock_remove.assert_called_once_with(os.sep) mock_close.assert_called_once_with(1) mock_exists.assert_called_once_with(os.sep) self.assertEqual(False, lock.acquired) @mock.patch('tempfile.tempdir', new='tmp') def test_jlock_release_not_held(self): """Tests calling release when lock not held. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` """ lock = jlock.JLock(0xdeadbeef) self.assertFalse(lock.release()) if __name__ == '__main__': unittest.main()
tests/unit/test_jlock.py
11,024
Tests the ``jlock`` submodule. Called before each test. Performs setup. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` Called after each test. Performs teardown. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` Tests acquiring the lockfile when the current lockfile is invallid. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` Tests trying to acquire when the lock exists for an active process. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` Tests acquiring the lockfile when the pid in the lockfile is invalid. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` Tests acquiring when the PID in the lockfile does not exist. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` Tests trying to acquire the lock but generating an os-level error. Args: self (TestJLock): the ``TestJLock`` instance mock_open (Mock): mocked built-in open method mock_util (Mock): mocked ``psutil`` module mock_rm (Mock): mocked os remove method mock_wr (Mock): mocked os write method mock_op (Mock): mocked os open method mock_exists (Mock): mocked path exist method mock_close (Mock): mocked os file close method Returns: ``None`` Tests initialization and deleting a ``JLock``. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` Tests releasing a held lock. Args: self (TestJLock): the ``TestJLock`` instance mock_remove (Mock): mock file removal method mock_close (Mock): mocked close method mock_exists (Mock): mocked path exist method Returns: ``None`` Tests calling release when lock not held. Args: self (TestJLock): the ``TestJLock`` instance Returns: ``None`` Copyright 2017 Square, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
3,565
en
0.818808
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.AlipayUserMpointPreconsultModel import AlipayUserMpointPreconsultModel class AlipayUserMpointPreconsultRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, AlipayUserMpointPreconsultModel): self._biz_content = value else: self._biz_content = AlipayUserMpointPreconsultModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'alipay.user.mpoint.preconsult' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
alipay/aop/api/request/AlipayUserMpointPreconsultRequest.py
3,954
!/usr/bin/env python -*- coding: utf-8 -*-
42
en
0.34282
########################################################################## # # Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import unittest import IECore class TestTurbulence( unittest.TestCase ) : def testConstructors( self ) : t = IECore.TurbulenceV2ff() self.assertEqual( t.octaves, 4 ) self.assertEqual( t.gain, 0.5 ) self.assertEqual( t.lacunarity, 2 ) self.assertEqual( t.turbulent, True ) t = IECore.TurbulenceV2ff( 2, 1, 3, False ) self.assertEqual( t.octaves, 2 ) self.assertEqual( t.gain, 1 ) self.assertEqual( t.lacunarity, 3 ) self.assertEqual( t.turbulent, False ) t = IECore.TurbulenceV2ff( octaves = 3, gain = 1.4, lacunarity = 3, turbulent = False ) self.assertEqual( t.octaves, 3 ) self.assertAlmostEqual( t.gain, 1.4 ) self.assertEqual( t.lacunarity, 3 ) self.assertEqual( t.turbulent, False ) def test2d( self ) : t = IECore.TurbulenceV2ff( octaves = 4, gain = 0.35, lacunarity = 2, turbulent = False ) width = 400 height = 400 f = IECore.FloatVectorData( width * height ) o = 0 for i in range( 0, height ) : for j in range( 0, width ) : f[o] = 0.5 + t.turbulence( IECore.V2f( i/50.0, j/50.0 ) ) o += 1 b = IECore.Box2i( IECore.V2i( 0, 0 ), IECore.V2i( width-1, height-1 ) ) i = IECore.ImagePrimitive( b, b ) i["r"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f ) i["g"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f ) i["b"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f ) e = IECore.Reader.create( "test/IECore/data/expectedResults/turbulence2d.exr" ).read() op = IECore.ImageDiffOp() res = op( imageA = i, imageB = e, maxError = 0.0005 ) self.failIf( res.value ) def testNaN( self ) : t = IECore.TurbulenceV2ff( octaves = 28, gain = 0.35, lacunarity = 2, turbulent = True ) f = t.turbulence( IECore.V2f( 21.3, 51.2 ) ) self.assert_( f == f ) if __name__ == "__main__": unittest.main()
test/IECore/Turbulence.py
3,731
Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Image Engine Design nor the names of any other contributors to this software may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,574
en
0.889544
# -*- coding: utf-8 -*- from .deprecated_code import (chi2_bin, best_ks_bin, make_bin, feature_analysis, calc_bin_cond)
__init__.py
121
-*- coding: utf-8 -*-
21
en
0.767281
#!/usr/bin/env python3 import ast from collections import namedtuple from functools import partial import itertools import logging import os from pathlib import Path import re from tempfile import NamedTemporaryFile, TemporaryDirectory import time import traceback from typing import ( Any, Iterator, List, Optional, Pattern, Tuple, Type, TYPE_CHECKING, Union, ) import attr import mypy.api if TYPE_CHECKING: import flake8.options.manager.OptionManager # noqa __version__ = '17.8.0' noqa = re.compile(r'# noqa\b', re.I).search Error = namedtuple('Error', 'lineno col message type vars') def make_arguments(**kwargs: Union[str, bool]) -> List[str]: result = [] for k, v in kwargs.items(): k = k.replace('_', '-') if v is True: result.append('--' + k) elif v is False: continue else: result.append('--{}={}'.format(k, v)) return result def calculate_mypypath() -> List[str]: """Return MYPYPATH so that stubs have precedence over local sources.""" typeshed_root = None count = 0 started = time.time() for parent in itertools.chain( # Look in current script's parents, useful for zipapps. Path(__file__).parents, # Look around site-packages, useful for virtualenvs. Path(mypy.api.__file__).parents, # Look in global paths, useful for globally installed. Path(os.__file__).parents, ): count += 1 candidate = parent / 'lib' / 'mypy' / 'typeshed' if candidate.is_dir(): typeshed_root = candidate break # Also check the non-installed path, useful for `setup.py develop`. candidate = parent / 'typeshed' if candidate.is_dir(): typeshed_root = candidate break LOG.debug( 'Checked %d paths in %.2fs looking for typeshed. Found %s', count, time.time() - started, typeshed_root, ) if not typeshed_root: return [] stdlib_dirs = ('3.7', '3.6', '3.5', '3.4', '3.3', '3.2', '3', '2and3') stdlib_stubs = [ typeshed_root / 'stdlib' / stdlib_dir for stdlib_dir in stdlib_dirs ] third_party_dirs = ('3.7', '3.6', '3', '2and3') third_party_stubs = [ typeshed_root / 'third_party' / tp_dir for tp_dir in third_party_dirs ] return [ str(p) for p in stdlib_stubs + third_party_stubs ] # invalid_types.py:5: error: Missing return statement MYPY_ERROR_TEMPLATE = r""" ^ .* # whatever at the beginning {filename}: # this needs to be provided in run() (?P<lineno>\d+) # necessary for the match (:(?P<column>\d+))? # optional but useful column info :[ ] # ends the preamble ((?P<class>error|warning|note):)? # optional class [ ](?P<message>.*) # the rest $""" LOG = logging.getLogger('flake8.mypy') DEFAULT_ARGUMENTS = make_arguments( platform='linux', # flake8-mypy expects the two following for sensible formatting show_column_numbers=True, show_error_context=False, # suppress error messages from unrelated files follow_imports='skip', # since we're ignoring imports, writing .mypy_cache doesn't make any sense cache_dir=os.devnull, # suppress errors about unsatisfied imports ignore_missing_imports=True, # allow untyped calls as a consequence of the options above disallow_untyped_calls=False, # allow returning Any as a consequence of the options above warn_return_any=False, # treat Optional per PEP 484 strict_optional=True, # ensure all execution paths are returning warn_no_return=True, # lint-style cleanliness for typing needs to be disabled; returns more errors # than the full run. warn_redundant_casts=False, warn_unused_ignores=False, # The following are off by default. Flip them on if you feel # adventurous. disallow_untyped_defs=False, check_untyped_defs=False, ) _Flake8Error = Tuple[int, int, str, Type['MypyChecker']] @attr.s(hash=False) class MypyChecker: name = 'flake8-mypy' version = __version__ tree = attr.ib(default=None) filename = attr.ib(default='(none)') lines = attr.ib(default=[]) # type: List[int] options = attr.ib(default=None) visitor = attr.ib(default=attr.Factory(lambda: TypingVisitor)) def run(self) -> Iterator[_Flake8Error]: if not self.lines: return # empty file, no need checking. visitor = self.visitor() visitor.visit(self.tree) if not visitor.should_type_check: return # typing not used in the module if not self.options.mypy_config and 'MYPYPATH' not in os.environ: os.environ['MYPYPATH'] = ':'.join(calculate_mypypath()) # Always put the file in a separate temporary directory to avoid # unexpected clashes with other .py and .pyi files in the same original # directory. with TemporaryDirectory(prefix='flake8mypy_') as d: file = NamedTemporaryFile( 'w', encoding='utf8', prefix='tmpmypy_', suffix='.py', dir=d, delete=False, ) try: self.filename = file.name for line in self.lines: file.write(line) file.close() yield from self._run() finally: os.remove(file.name) def _run(self) -> Iterator[_Flake8Error]: mypy_cmdline = self.build_mypy_cmdline(self.filename, self.options.mypy_config) mypy_re = self.build_mypy_re(self.filename) last_t499 = 0 try: stdout, stderr, returncode = mypy.api.run(mypy_cmdline) except Exception as exc: # Pokémon exception handling to guard against mypy's internal errors last_t499 += 1 yield self.adapt_error(T498(last_t499, 0, vars=(type(exc), str(exc)))) for line in traceback.format_exc().splitlines(): last_t499 += 1 yield self.adapt_error(T499(last_t499, 0, vars=(line,))) else: # FIXME: should we make any decision based on `returncode`? for line in stdout.splitlines(): try: e = self.make_error(line, mypy_re) except ValueError: # unmatched line last_t499 += 1 yield self.adapt_error(T499(last_t499, 0, vars=(line,))) continue if self.omit_error(e): continue yield self.adapt_error(e) for line in stderr.splitlines(): last_t499 += 1 yield self.adapt_error(T499(last_t499, 0, vars=(line,))) @classmethod def adapt_error(cls, e: Any) -> _Flake8Error: """Adapts the extended error namedtuple to be compatible with Flake8.""" return e._replace(message=e.message.format(*e.vars))[:4] def omit_error(self, e: Error) -> bool: """Returns True if error should be ignored.""" if ( e.vars and e.vars[0] == 'No parent module -- cannot perform relative import' ): return True return bool(noqa(self.lines[e.lineno - 1])) @classmethod def add_options(cls, parser: 'flake8.options.manager.OptionManager') -> None: parser.add_option( '--mypy-config', parse_from_config=True, help="path to a custom mypy configuration file", ) def make_error(self, line: str, regex: Pattern) -> Error: m = regex.match(line) if not m: raise ValueError("unmatched line") lineno = int(m.group('lineno')) column = int(m.group('column') or 0) message = m.group('message').strip() if m.group('class') == 'note': return T400(lineno, column, vars=(message,)) return T484(lineno, column, vars=(message,)) def build_mypy_cmdline( self, filename: str, mypy_config: Optional[str] ) -> List[str]: if mypy_config: return ['--config-file=' + mypy_config, filename] return DEFAULT_ARGUMENTS + [filename] def build_mypy_re(self, filename: Union[str, Path]) -> Pattern: filename = Path(filename) if filename.is_absolute(): prefix = Path('.').absolute() try: filename = filename.relative_to(prefix) except ValueError: pass # not relative to the cwd re_filename = re.escape(str(filename)) if re_filename.startswith(r'\./'): re_filename = re_filename[3:] return re.compile( MYPY_ERROR_TEMPLATE.format(filename=re_filename), re.VERBOSE, ) @attr.s class TypingVisitor(ast.NodeVisitor): """Used to determine if the file is using annotations at all.""" should_type_check = attr.ib(default=False) def visit_FunctionDef(self, node: ast.FunctionDef) -> None: if node.returns: self.should_type_check = True return for arg in itertools.chain(node.args.args, node.args.kwonlyargs): if arg.annotation: self.should_type_check = True return va = node.args.vararg kw = node.args.kwarg if (va and va.annotation) or (kw and kw.annotation): self.should_type_check = True def visit_Import(self, node: ast.Import) -> None: for name in node.names: if ( isinstance(name, ast.alias) and name.name == 'typing' or name.name.startswith('typing.') ): self.should_type_check = True break def visit_ImportFrom(self, node: ast.ImportFrom) -> None: if ( node.level == 0 and node.module == 'typing' or node.module and node.module.startswith('typing.') ): self.should_type_check = True def generic_visit(self, node: ast.AST) -> None: """Called if no explicit visitor function exists for a node.""" for _field, value in ast.iter_fields(node): if self.should_type_check: break if isinstance(value, list): for item in value: if self.should_type_check: break if isinstance(item, ast.AST): self.visit(item) elif isinstance(value, ast.AST): self.visit(value) # Generic mypy error T484 = partial( Error, message="T484 {}", type=MypyChecker, vars=(), ) # Generic mypy note T400 = partial( Error, message="T400 note: {}", type=MypyChecker, vars=(), ) # Internal mypy error (summary) T498 = partial( Error, message="T498 Internal mypy error '{}': {}", type=MypyChecker, vars=(), ) # Internal mypy error (traceback, stderr, unmatched line) T499 = partial( Error, message="T499 {}", type=MypyChecker, vars=(), )
flake8_mypy.py
11,426
Used to determine if the file is using annotations at all. Adapts the extended error namedtuple to be compatible with Flake8. Return MYPYPATH so that stubs have precedence over local sources. Called if no explicit visitor function exists for a node. Returns True if error should be ignored. !/usr/bin/env python3 noqa Look in current script's parents, useful for zipapps. Look around site-packages, useful for virtualenvs. Look in global paths, useful for globally installed. Also check the non-installed path, useful for `setup.py develop`. invalid_types.py:5: error: Missing return statement flake8-mypy expects the two following for sensible formatting suppress error messages from unrelated files since we're ignoring imports, writing .mypy_cache doesn't make any sense suppress errors about unsatisfied imports allow untyped calls as a consequence of the options above allow returning Any as a consequence of the options above treat Optional per PEP 484 ensure all execution paths are returning lint-style cleanliness for typing needs to be disabled; returns more errors than the full run. The following are off by default. Flip them on if you feel adventurous. type: List[int] empty file, no need checking. typing not used in the module Always put the file in a separate temporary directory to avoid unexpected clashes with other .py and .pyi files in the same original directory. Pokémon exception handling to guard against mypy's internal errors FIXME: should we make any decision based on `returncode`? unmatched line not relative to the cwd Generic mypy error Generic mypy note Internal mypy error (summary) Internal mypy error (traceback, stderr, unmatched line)
1,675
en
0.779559
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class LineageBackend(object): def send_lineage(self, operator=None, inlets=None, outlets=None, context=None): """ Sends lineage metadata to a backend :param operator: the operator executing a transformation on the inlets and outlets :param inlets: the inlets to this operator :param outlets: the outlets from this operator :param context: the current context of the task instance """ raise NotImplementedError()
airflow/lineage/backend/__init__.py
1,292
Sends lineage metadata to a backend :param operator: the operator executing a transformation on the inlets and outlets :param inlets: the inlets to this operator :param outlets: the outlets from this operator :param context: the current context of the task instance Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
1,020
en
0.865842
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT from verta._swagger.base_type import BaseType class ModeldbAddProjectTags(BaseType): def __init__(self, id=None, tags=None): required = { "id": False, "tags": False, } self.id = id self.tags = tags for k, v in required.items(): if self[k] is None and v: raise ValueError('attribute {} is required'.format(k)) @staticmethod def from_json(d): tmp = d.get('id', None) if tmp is not None: d['id'] = tmp tmp = d.get('tags', None) if tmp is not None: d['tags'] = [tmp for tmp in tmp] return ModeldbAddProjectTags(**d)
client/verta/verta/_swagger/_public/modeldb/model/ModeldbAddProjectTags.py
654
THIS FILE IS AUTO-GENERATED. DO NOT EDIT
40
en
0.883199
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='AllFieldsModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('char_field', models.CharField(help_text=b'write something', max_length=500)), ('int_field', models.IntegerField(help_text=b'Put a number, magic number')), ('text_field', models.TextField(help_text=b'Put a large test here')), ('big_integer_field', models.BigIntegerField(help_text=b'An big integer field')), ('binary_field', models.BinaryField(help_text=b'A binary field')), ('date_field', models.DateField(help_text=b'A date field')), ('datetime_field', models.DateTimeField(help_text=b'A datetime field')), ('boolean_field', models.BooleanField(help_text=b'A boolean field')), ('comma_separated_integer_field', models.CommaSeparatedIntegerField(help_text=b'A comma sepparated integer field', max_length=200)), ('decimal_field', models.DecimalField(help_text=b'A decimal field', max_digits=100, decimal_places=10)), ('duration_field', models.DurationField(help_text=b'A duration field')), ('email_field', models.EmailField(help_text=b'A email field', max_length=254)), ('file_field', models.FileField(help_text=b'A file field', upload_to=b'')), ('file_path_field', models.FilePathField(help_text=b'A file path field')), ('float_field', models.FloatField(help_text=b'A float field')), ('generic_ip_addr_field', models.GenericIPAddressField(help_text=b'A generic ip addr field')), ('image_field', models.ImageField(help_text=b'A image field', upload_to=b'')), ('null_boolean_field', models.NullBooleanField(help_text=b'A null boolean field')), ('positive_integer_field', models.PositiveIntegerField(help_text=b'A positive integer')), ('positive_small_integer_field', models.PositiveSmallIntegerField(help_text=b'A positive small integer field')), ('slug_field', models.SlugField(help_text=b'A slug field')), ('small_integer_field', models.SmallIntegerField(help_text=b'A small integer field')), ('time_field', models.TimeField(help_text=b'A time field')), ('url_field', models.URLField(help_text=b'A url field')), ('uuid_field', models.UUIDField(help_text=b'A uuid field')), ], ), migrations.CreateModel( name='ForeingModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text=b'write something', max_length=500)), ('age', models.PositiveSmallIntegerField()), ('birthday', models.DateField()), ('foreign_key_field', models.ForeignKey(help_text=b'A foreign_key field', to='second_app.AllFieldsModel')), ], ), migrations.CreateModel( name='ManyToManyModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text=b'write something', max_length=500)), ], ), migrations.AddField( model_name='allfieldsmodel', name='many_to_many_field', field=models.ManyToManyField(help_text=b'A many to many field', to='second_app.ManyToManyModel'), ), ]
example/second_app/migrations/0001_initial.py
3,907
-*- coding: utf-8 -*-
21
en
0.767281
import time import random import numpy as np import gym from rlkit.scripted_experts.scripted_policy import ScriptedPolicy ACT_MAG = 0.275 ACT_NOISE_SCALE = 0.1 ACT_SLOW_NOISE_SCALE = 0.05 SLOW_DOWN_RADIUS = 0.01 def get_linear_pos_act(cur_pos, reach_pos): cur_pos = cur_pos.copy() reach_pos = reach_pos.copy() move_dir = reach_pos - cur_pos dist = np.linalg.norm(move_dir, axis=-1) # if dist > ACT_MAG: # if dist < ACT_MAG: # move_dir = move_dir # else: move_dir *= (ACT_MAG / dist) return move_dir class ScriptedLinearFewShotReachPolicy(ScriptedPolicy): def __init__(self): super().__init__() def reset(self, env): # # first make the gripper go slightly above the object self.correct_obj_idx = env.correct_obj_idx if self.correct_obj_idx == 0: self.correct_obj_abs_pos = env.sim.data.get_site_xpos('object0') else: self.correct_obj_abs_pos = env.sim.data.get_site_xpos('object1') self.init_grip_pos = env.sim.data.get_site_xpos('robot0:grip') X_Y_FRAC = np.random.uniform(0.7, 0.8) Z_FRAC = np.random.uniform(0.2, 0.3) self.waypoint = np.zeros(3) self.waypoint[:2] = (self.correct_obj_abs_pos[:2] - self.init_grip_pos[:2]) * X_Y_FRAC self.waypoint[2] = (self.correct_obj_abs_pos[2] - self.init_grip_pos[2]) * Z_FRAC self.waypoint += self.init_grip_pos self.waypoint += np.random.uniform(-0.01, 0.01, 3) # first go to a way-point def cond_0(obs): grip_pos = env.sim.data.get_site_xpos('robot0:grip') return 0.01 > np.linalg.norm(grip_pos - self.waypoint, axis=-1) self.milestone_0_cond = cond_0 # now actually go to the object def cond_1(obs): grip_pos = env.sim.data.get_site_xpos('robot0:grip') goal = env.goal return 0.01 > np.linalg.norm(grip_pos - goal) self.milestone_1_cond = cond_1 # reset the milestones self.milestone_0_complete = False self.milestone_1_complete = False self.first_time_all_complete = -1 def get_action(self, obs, env, timestep): # first find out what stage we are in and update milestone info cur_stage = -1 if not self.milestone_0_complete: # check if milestone 0 was completed by the last step action if self.milestone_0_cond(obs): self.milestone_0_complete = True cur_stage = 1 else: cur_stage = 0 else: if not self.milestone_1_complete: # check if milestone 1 was completed by the last step action if self.milestone_1_cond(obs): self.milestone_1_complete = True self.first_time_all_complete = timestep print('solved') cur_stage = 1 # now perform the action corresponding to the current stage if cur_stage == 0: grip_pos = env.sim.data.get_site_xpos('robot0:grip') action = [0, 0, 0, 0] pos_act = get_linear_pos_act(grip_pos, self.waypoint) pos_act += np.random.uniform(0.0, ACT_NOISE_SCALE, 3) for i in range(len(pos_act)): action[i] = pos_act[i] action[len(action)-1] = np.random.uniform(-0.005, -0.015) # close else: action = [0, 0, 0, 0] # print(np.linalg.norm(correct_obj_rel_target, axis=-1)) grip_pos = env.sim.data.get_site_xpos('robot0:grip') target_rel_pos = env.goal - grip_pos if np.linalg.norm(target_rel_pos, axis=-1) < SLOW_DOWN_RADIUS: # pos_act = ACT_MAG*target_rel_pos*10 pos_act = 0.25*get_linear_pos_act(np.zeros(3), target_rel_pos) pos_act += np.random.uniform(0.0, ACT_SLOW_NOISE_SCALE, 3) # print(pos_act) else: pos_act = get_linear_pos_act(np.zeros(3), target_rel_pos) pos_act += np.random.uniform(0.0, ACT_NOISE_SCALE, 3) # pos_act = get_linear_pos_act(np.zeros(3), correct_obj_rel_target) for i in range(len(pos_act)): action[i] = pos_act[i] action[len(action)-1] = np.random.uniform(-0.005, -0.015) # close action = np.clip(action, -1.0, 1.0) return action, {}
rlkit/scripted_experts/linear_few_shot_reach_env_expert.py
4,462
if dist > ACT_MAG: if dist < ACT_MAG: move_dir = move_dir else: first make the gripper go slightly above the object first go to a way-point now actually go to the object reset the milestones first find out what stage we are in and update milestone info check if milestone 0 was completed by the last step action check if milestone 1 was completed by the last step action now perform the action corresponding to the current stage close print(np.linalg.norm(correct_obj_rel_target, axis=-1)) pos_act = ACT_MAG*target_rel_pos*10 print(pos_act) pos_act = get_linear_pos_act(np.zeros(3), correct_obj_rel_target) close
617
en
0.785117
# -*- coding: utf-8 -*- def calculate_map(gt_path, my_path): id2videos = dict() with open(gt_path, 'r') as fin: lines = fin.readlines() for line in lines: terms = line.strip().split(' ') id2videos[terms[0]] = terms[1:] id_num = len(lines) my_id2videos = dict() with open(my_path, 'r') as fin: lines = fin.readlines() assert (len(lines) <= id_num) for line in lines: terms = line.strip().split(' ') tmp_list = [] for video in terms[1:]: if video not in tmp_list: tmp_list.append(video) my_id2videos[terms[0]] = tmp_list ap_total = 0. for cid in id2videos: videos = id2videos[cid] if cid not in my_id2videos: continue my_videos = my_id2videos[cid] # recall number upper bound assert (len(my_videos) <= 100) ap = 0. ind = 0. for ind_video, my_video in enumerate(my_videos): if my_video in videos: ind += 1 ap += ind / (ind_video + 1) ap_total += ap / len(videos) return ap_total / id_num if __name__ == '__main__': gt_val_path = '/data/materials/val_gt.txt' my_val_path = '/data/result/result.txt' print('mAP: {}'.format(calculate_map(gt_val_path, my_val_path)))
evaluation_map.py
1,385
-*- coding: utf-8 -*- recall number upper bound
47
en
0.700324
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=g-bad-import-order,redefined-builtin """APIs to train an image classification model. Task guide: https://www.tensorflow.org/lite/tutorials/model_maker_image_classification. """ from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader as DataLoader from tensorflow_examples.lite.model_maker.core.task.image_classifier import create from tensorflow_examples.lite.model_maker.core.task.image_classifier import ImageClassifier from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite0_spec as EfficientNetLite0Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite1_spec as EfficientNetLite1Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite2_spec as EfficientNetLite2Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite3_spec as EfficientNetLite3Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import efficientnet_lite4_spec as EfficientNetLite4Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import ImageModelSpec as ModelSpec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import mobilenet_v2_spec as MobileNetV2Spec from tensorflow_examples.lite.model_maker.core.task.model_spec.image_spec import resnet_50_spec as Resnet50Spec
tensorflow_examples/lite/model_maker/public/image_classifier/__init__.py
2,090
APIs to train an image classification model. Task guide: https://www.tensorflow.org/lite/tutorials/model_maker_image_classification. Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=g-bad-import-order,redefined-builtin
770
en
0.829381
import os import random import numpy as np from PIL import Image from torch.utils.data import Dataset from datasets.data_io import get_transform, read_all_lines, pfm_imread class PicoStereoDataset(Dataset): def __init__(self, datapath, list_filename, training): self.datapath = datapath self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename) self.training = training def load_path(self, list_filename): lines = read_all_lines(list_filename) splits = [line.split() for line in lines] left_images = [x[0] for x in splits] right_images = [x[1] for x in splits] if len(splits[0]) == 2: # ground truth not available return left_images, right_images, None else: disp_images = [x[2] for x in splits] return left_images, right_images, disp_images def load_image(self, filename): return Image.open(filename).convert('RGB') def load_disp(self, filename): data, scale = pfm_imread(filename) data = np.ascontiguousarray(data, dtype=np.float32) return data def __len__(self): return len(self.left_filenames) def __getitem__(self, index): left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index])) right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index])) if self.disp_filenames: # has disparity ground truth disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index])) else: disparity = None # to tensor, normalize processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, # "disparity": disparity, "top_pad": 0, "right_pad": 0, "left_filename": self.left_filenames[index]} class SceneFlowDataset(Dataset): def __init__(self, datapath, list_filename, training): self.datapath = datapath self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename) self.training = training def load_path(self, list_filename): lines = read_all_lines(list_filename) splits = [line.split() for line in lines] left_images = [x[0] for x in splits] right_images = [x[1] for x in splits] disp_images = [x[2] for x in splits] return left_images, right_images, disp_images def load_image(self, filename): return Image.open(filename).convert('RGB') def load_disp(self, filename): data, scale = pfm_imread(filename) data = np.ascontiguousarray(data, dtype=np.float32) return data def __len__(self): return len(self.left_filenames) def __getitem__(self, index): left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index])) right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index])) disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index])) if self.training: w, h = left_img.size crop_w, crop_h = 512, 256 x1 = random.randint(0, w - crop_w) y1 = random.randint(0, h - crop_h) # random crop left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w] # to tensor, normalize processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, "disparity": disparity} else: w, h = left_img.size crop_w, crop_h = 960, 512 left_img = left_img.crop((w - crop_w, h - crop_h, w, h)) right_img = right_img.crop((w - crop_w, h - crop_h, w, h)) disparity = disparity[h - crop_h:h, w - crop_w: w] processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, "disparity": disparity, "top_pad": 0, "right_pad": 0, "left_filename": self.left_filenames[index]} class KITTIDataset(Dataset): def __init__(self, datapath, list_filename, training): self.datapath = datapath self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename) self.training = training if self.training: assert self.disp_filenames is not None def load_path(self, list_filename): lines = read_all_lines(list_filename) splits = [line.split() for line in lines] left_images = [x[0] for x in splits] right_images = [x[1] for x in splits] if len(splits[0]) == 2: # ground truth not available return left_images, right_images, None else: disp_images = [x[2] for x in splits] return left_images, right_images, disp_images def load_image(self, filename): return Image.open(filename).convert('RGB') def load_disp(self, filename): data = Image.open(filename) data = np.array(data, dtype=np.float32) / 256. return data def __len__(self): return len(self.left_filenames) def __getitem__(self, index): left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index])) right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index])) if self.disp_filenames: # has disparity ground truth disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index])) else: disparity = None if self.training: w, h = left_img.size crop_w, crop_h = 512, 256 x1 = random.randint(0, w - crop_w) y1 = random.randint(0, h - crop_h) # random crop left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w] # to tensor, normalize processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, "disparity": disparity} else: w, h = left_img.size # normalize processed = get_transform() left_img = processed(left_img).numpy() right_img = processed(right_img).numpy() # pad to size 1248x384 top_pad = 384 - h right_pad = 1248 - w assert top_pad > 0 and right_pad > 0 # pad images left_img = np.lib.pad(left_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant', constant_values=0) right_img = np.lib.pad(right_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant', constant_values=0) # pad disparity gt if disparity is not None: assert len(disparity.shape) == 2 disparity = np.lib.pad(disparity, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0) if disparity is not None: return {"left": left_img, "right": right_img, "disparity": disparity, "top_pad": top_pad, "right_pad": right_pad, "left_filename": self.left_filenames[index]} else: return {"left": left_img, "right": right_img, "top_pad": top_pad, "right_pad": right_pad, "left_filename": self.left_filenames[index], "right_filename": self.right_filenames[index]} class DrivingStereoDataset(Dataset): def __init__(self, datapath, list_filename, training): self.datapath = datapath self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename) self.training = training def load_path(self, list_filename): lines = read_all_lines(list_filename) splits = [line.split() for line in lines] left_images = [x[0] for x in splits] right_images = [x[1] for x in splits] disp_images = [x[2] for x in splits] return left_images, right_images, disp_images def load_image(self, filename): return Image.open(filename).convert('RGB') def load_disp(self, filename): data = Image.open(filename) data = np.array(data, dtype=np.float32) / 256. return data def __len__(self): return len(self.left_filenames) def __getitem__(self, index): left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index])) right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index])) disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index])) if self.training: w, h = left_img.size # (881, 400) crop_w, crop_h = 512, 256 x1 = random.randint(0, w - crop_w) y1 = random.randint(0, h - crop_h) # random crop left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h)) disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w] # to tensor, normalize processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, "disparity": disparity} else: w, h = left_img.size crop_w, crop_h = 880, 400 left_img = left_img.crop((w - crop_w, h - crop_h, w, h)) right_img = right_img.crop((w - crop_w, h - crop_h, w, h)) disparity = disparity[h - crop_h:h, w - crop_w: w] processed = get_transform() left_img = processed(left_img) right_img = processed(right_img) return {"left": left_img, "right": right_img, "disparity": disparity, "top_pad": 0, "right_pad": 0, "left_filename": self.left_filenames[index]}
datasets/dataset.py
11,058
ground truth not available has disparity ground truth to tensor, normalize "disparity": disparity, random crop to tensor, normalize ground truth not available has disparity ground truth random crop to tensor, normalize normalize pad to size 1248x384 pad images pad disparity gt (881, 400) random crop to tensor, normalize
321
en
0.828935
from unittest.mock import MagicMock import pytest from click.testing import CliRunner from prefect.cli.register import register def test_register_init(): runner = CliRunner() result = runner.invoke(register) assert result.exit_code == 0 assert "Register flows" in result.output def test_register_help(): runner = CliRunner() result = runner.invoke(register, ["--help"]) assert result.exit_code == 0 assert "Register flows" in result.output def test_register_flow_help(): runner = CliRunner() result = runner.invoke(register, ["flow", "--help"]) assert result.exit_code == 0 assert "Register a flow" in result.output @pytest.mark.parametrize("labels", [[], ["b", "c"]]) @pytest.mark.parametrize("kind", ["run_config", "environment"]) def test_register_flow_call(monkeypatch, tmpdir, kind, labels): client = MagicMock() monkeypatch.setattr("prefect.Client", MagicMock(return_value=client)) if kind == "environment": contents = ( "from prefect import Flow\n" "from prefect.environments.execution import LocalEnvironment\n" "from prefect.environments.storage import Local\n" "f = Flow('test-flow', environment=LocalEnvironment(labels=['a']),\n" " storage=Local(add_default_labels=False))" ) else: contents = ( "from prefect import Flow\n" "from prefect.run_configs import KubernetesRun\n" "from prefect.environments.storage import Local\n" "f = Flow('test-flow', run_config=KubernetesRun(labels=['a']),\n" " storage=Local(add_default_labels=False))" ) full_path = str(tmpdir.join("flow.py")) with open(full_path, "w") as f: f.write(contents) args = ["flow", "--file", full_path, "--name", "test-flow", "--project", "project"] for l in labels: args.extend(["-l", l]) runner = CliRunner() result = runner.invoke(register, args) assert client.register.called assert client.register.call_args[1]["project_name"] == "project" # Check additional labels are set if specified flow = client.register.call_args[1]["flow"] if kind == "run_config": assert flow.run_config.labels == {"a", *labels} else: assert flow.environment.labels == {"a", *labels} assert result.exit_code == 0
tests/cli/test_register.py
2,385
Check additional labels are set if specified
44
en
0.462999
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 """ This module contains pretty-print/formatting utilities. """ from dataclasses import dataclass from typing import Optional @dataclass(frozen=True) class PrettyOptions: """ Display options for pretty-printing DAML ASTs. Instance attributes: .. attribute:: PrettyOptions.column_width The maximum number of columns to use when rendering text, or ``None`` if lines should not wrap. .. attribute:: PrettyOptions.show_hidden_types ``True`` to render built-in DAML types defined in ``DA.Internal`` or ``GHC`` and specially generated names. .. attribute:: PrettyOptions.format A string that identifies the target language to render. """ column_width: Optional[int] = None show_hidden_types: bool = False format: str = "daml"
python/dazl/pretty/options.py
953
Display options for pretty-printing DAML ASTs. Instance attributes: .. attribute:: PrettyOptions.column_width The maximum number of columns to use when rendering text, or ``None`` if lines should not wrap. .. attribute:: PrettyOptions.show_hidden_types ``True`` to render built-in DAML types defined in ``DA.Internal`` or ``GHC`` and specially generated names. .. attribute:: PrettyOptions.format A string that identifies the target language to render. This module contains pretty-print/formatting utilities. Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. SPDX-License-Identifier: Apache-2.0
674
en
0.525521
import logging import numpy as np import trimesh from src.common import compute_iou # from scipy.spatial import cKDTree from src.utils.libkdtree import KDTree from src.utils.libmesh import check_mesh_contains # Maximum values for bounding box [-0.5, 0.5]^3 EMPTY_PCL_DICT = { 'completeness': np.sqrt(3), 'accuracy': np.sqrt(3), 'completeness2': 3, 'accuracy2': 3, 'chamfer': 6, } EMPTY_PCL_DICT_NORMALS = { 'normals completeness': -1., 'normals accuracy': -1., 'normals': -1., } logger = logging.getLogger(__name__) class MeshEvaluator(object): """ Mesh evaluation class. It handles the mesh evaluation process. Args: n_points (int): number of points to be used for evaluation """ def __init__(self, n_points=100000): self.n_points = n_points def eval_mesh(self, mesh, pointcloud_tgt, normals_tgt, points_iou, occ_tgt, remove_wall=False): """ Evaluates a mesh. Args: mesh (trimesh): mesh which should be evaluated pointcloud_tgt (numpy array): target point cloud normals_tgt (numpy array): target normals points_iou (numpy_array): points tensor for IoU evaluation occ_tgt (numpy_array): GT occupancy values for IoU points """ if len(mesh.vertices) != 0 and len(mesh.faces) != 0: if remove_wall: # ! Remove walls and floors pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True) eps = 0.007 x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min() y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min() z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min() # add small offsets x_max, x_min = x_max + eps, x_min - eps y_max, y_min = y_max + eps, y_min - eps z_max, z_min = z_max + eps, z_min - eps mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min) mask_y = (pointcloud[:, 1] >= y_min) # floor mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min) mask = mask_x & mask_y & mask_z pointcloud_new = pointcloud[mask] # Subsample idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points) pointcloud = pointcloud_new[idx_new] idx = idx[mask][idx_new] else: pointcloud, idx = mesh.sample(self.n_points, return_index=True) pointcloud = pointcloud.astype(np.float32) normals = mesh.face_normals[idx] else: pointcloud = np.empty((0, 3)) normals = np.empty((0, 3)) out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt) if len(mesh.vertices) != 0 and len(mesh.faces) != 0: occ = check_mesh_contains(mesh, points_iou) if occ_tgt.min() < 0: occ_tgt = (occ_tgt <= 0).astype(np.float32) out_dict['iou'] = compute_iou(occ, occ_tgt) else: out_dict['iou'] = 0. return out_dict @staticmethod def eval_pointcloud(pointcloud, pointcloud_tgt, normals=None, normals_tgt=None, thresholds=np.linspace(1. / 1000, 1, 1000)): """ Evaluates a point cloud. Args: pointcloud (numpy array): predicted point cloud pointcloud_tgt (numpy array): target point cloud normals (numpy array): predicted normals normals_tgt (numpy array): target normals thresholds (numpy array): threshold values for the F-score calculation """ # Return maximum losses if pointcloud is empty if pointcloud.shape[0] == 0: logger.warning('Empty pointcloud / mesh detected!') out_dict = EMPTY_PCL_DICT.copy() if normals is not None and normals_tgt is not None: out_dict.update(EMPTY_PCL_DICT_NORMALS) return out_dict pointcloud = np.asarray(pointcloud) pointcloud_tgt = np.asarray(pointcloud_tgt) # Completeness: how far are the points of the target point cloud from the predicted point cloud completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals) recall = get_threshold_percentage(completeness, thresholds) completeness2 = completeness ** 2 completeness = completeness.mean() completeness2 = completeness2.mean() completeness_normals = completeness_normals.mean() # Accuracy: how far are the points of the predicted pointcloud from the target pointcloud accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt) precision = get_threshold_percentage(accuracy, thresholds) accuracy2 = accuracy ** 2 accuracy = accuracy.mean() accuracy2 = accuracy2.mean() accuracy_normals = accuracy_normals.mean() # Chamfer distance chamferL2 = 0.5 * (completeness2 + accuracy2) normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals) chamferL1 = 0.5 * (completeness + accuracy) # F-Score F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))] out_dict = { 'completeness': completeness, 'accuracy': accuracy, 'normals completeness': completeness_normals, 'normals accuracy': accuracy_normals, 'normals': normals_correctness, 'completeness2': completeness2, 'accuracy2': accuracy2, 'chamfer-L2': chamferL2, 'chamfer-L1': chamferL1, 'f-score': F[9], # threshold = 1.0% 'f-score-15': F[14], # threshold = 1.5% 'f-score-20': F[19], # threshold = 2.0% } return out_dict def distance_p2p(points_src, normals_src, points_tgt, normals_tgt): """ Computes minimal distances of each point in points_src to points_tgt. Args: points_src (numpy array): source points normals_src (numpy array): source normals points_tgt (numpy array): target points normals_tgt (numpy array): target normals """ kdtree = KDTree(points_tgt) dist, idx = kdtree.query(points_src) if normals_src is not None and normals_tgt is not None: normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True) normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True) normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1) # Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation) normals_dot_product = np.abs(normals_dot_product) else: normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32) return dist, normals_dot_product def distance_p2m(points, mesh): """ Compute minimal distances of each point in points to mesh. Args: points (numpy array): points array mesh (trimesh): mesh """ _, dist, _ = trimesh.proximity.closest_point(mesh, points) return dist def get_threshold_percentage(dist, thresholds): """ Evaluates a point cloud. Args: dist (numpy array): calculated distance thresholds (numpy array): threshold values for the F-score calculation """ in_threshold = [(dist <= t).mean() for t in thresholds] return in_threshold
src/eval.py
7,912
Mesh evaluation class. It handles the mesh evaluation process. Args: n_points (int): number of points to be used for evaluation Compute minimal distances of each point in points to mesh. Args: points (numpy array): points array mesh (trimesh): mesh Computes minimal distances of each point in points_src to points_tgt. Args: points_src (numpy array): source points normals_src (numpy array): source normals points_tgt (numpy array): target points normals_tgt (numpy array): target normals Evaluates a mesh. Args: mesh (trimesh): mesh which should be evaluated pointcloud_tgt (numpy array): target point cloud normals_tgt (numpy array): target normals points_iou (numpy_array): points tensor for IoU evaluation occ_tgt (numpy_array): GT occupancy values for IoU points Evaluates a point cloud. Args: pointcloud (numpy array): predicted point cloud pointcloud_tgt (numpy array): target point cloud normals (numpy array): predicted normals normals_tgt (numpy array): target normals thresholds (numpy array): threshold values for the F-score calculation Evaluates a point cloud. Args: dist (numpy array): calculated distance thresholds (numpy array): threshold values for the F-score calculation from scipy.spatial import cKDTree Maximum values for bounding box [-0.5, 0.5]^3 ! Remove walls and floors add small offsets floor Subsample Return maximum losses if pointcloud is empty Completeness: how far are the points of the target point cloud from the predicted point cloud Accuracy: how far are the points of the predicted pointcloud from the target pointcloud Chamfer distance F-Score threshold = 1.0% threshold = 1.5% threshold = 2.0% Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)
1,837
en
0.677193
# Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument, SetEnvironmentVariable from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node from nav2_common.launch import RewrittenYaml def generate_launch_description(): # Get the launch directory bringup_dir = get_package_share_directory('kohm_gazebo') namespace = LaunchConfiguration('namespace') use_sim_time = LaunchConfiguration('use_sim_time', default=True) autostart = LaunchConfiguration('autostart') params_file = LaunchConfiguration('config') default_bt_xml_filename = LaunchConfiguration('default_bt_xml_filename') map_subscribe_transient_local = LaunchConfiguration( 'map_subscribe_transient_local') lifecycle_nodes = [ 'controller_server', 'planner_server', 'recoveries_server', 'bt_navigator', 'waypoint_follower' ] remappings = [('/tf', 'tf'), ('/tf_static', 'tf_static'), ('/cmd_vel', '/nav_vel'), ('/odom', '/kohm/odom')] param_substitutions = { 'use_sim_time': use_sim_time, 'default_bt_xml_filename': default_bt_xml_filename, 'autostart': autostart, 'map_subscribe_transient_local': map_subscribe_transient_local } configured_params = RewrittenYaml(source_file=params_file, root_key=namespace, param_rewrites=param_substitutions, convert_types=True) return LaunchDescription([ # Set env var to print messages to stdout immediately SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '1'), DeclareLaunchArgument('namespace', default_value='', description='Top-level namespace'), DeclareLaunchArgument( 'use_sim_time', default_value='false', description='Use simulation (Gazebo) clock if true'), DeclareLaunchArgument( 'autostart', default_value='true', description='Automatically startup the nav2 stack'), DeclareLaunchArgument( 'config', default_value=os.path.join(bringup_dir, 'config/navigation', 'nav2_params.yaml'), description='Full path to the ROS2 parameters file to use'), DeclareLaunchArgument( 'default_bt_xml_filename', default_value=os.path.join( get_package_share_directory('nav2_bt_navigator'), 'behavior_trees', 'navigate_w_replanning_and_recovery.xml'), description='Full path to the behavior tree xml file to use'), DeclareLaunchArgument( 'map_subscribe_transient_local', default_value='false', description='Whether to set the map subscriber QoS to transient local'), Node(package='nav2_controller', executable='controller_server', output='screen', parameters=[configured_params], remappings=remappings), Node(package='nav2_planner', executable='planner_server', name='planner_server', output='screen', parameters=[configured_params], remappings=remappings), Node(package='nav2_recoveries', executable='recoveries_server', name='recoveries_server', output='screen', parameters=[configured_params], remappings=remappings), Node(package='nav2_bt_navigator', executable='bt_navigator', name='bt_navigator', output='screen', parameters=[configured_params], remappings=remappings), Node(package='nav2_waypoint_follower', executable='waypoint_follower', name='waypoint_follower', output='screen', parameters=[configured_params], remappings=remappings), Node(package='nav2_lifecycle_manager', executable='lifecycle_manager', name='lifecycle_manager_navigation', output='screen', parameters=[{ 'use_sim_time': use_sim_time }, { 'autostart': autostart }, { 'node_names': lifecycle_nodes }]), ])
kohm_gazebo/launch/include/navigation/nav2/nav.launch.py
5,122
Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Get the launch directory Set env var to print messages to stdout immediately
635
en
0.859421
from setuptools import setup # def readme(): # with open('README.md') as f: # retun f.read() setup( name = 'cypher', version = '0.2', author = 'shashi', author_email = 'skssunny30@gmail.com', description = 'Password Encryptor by suggesting wheather a password is strong or not', #long_description = readme(), long_description_content_type = 'text/markdown', url = "https://github.com/walkershashi/Cypher", classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], lisence = 'MIT', packages = ['cypher'], )
setup.py
678
def readme(): with open('README.md') as f: retun f.read()long_description = readme(),
97
en
0.643676
# Generated by Django 3.0.8 on 2020-07-01 19:16 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(default='default.jpg', upload_to='profile_pics/')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
users/migrations/0001_initial.py
778
Generated by Django 3.0.8 on 2020-07-01 19:16
45
en
0.637432
"""Source URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include # from django.contrib.staticfiles.urls import staticfiles_urlpatterns urlpatterns = [ path('admin/', admin.site.urls), path("", include("ColorDetection.urls")), ] # urlpatterns += staticfiles_urlpatterns()
Source/urls.py
917
Source URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) from django.contrib.staticfiles.urls import staticfiles_urlpatterns urlpatterns += staticfiles_urlpatterns()
733
en
0.586313
# Lab 2 Linear Regression import tensorflow as tf tf.set_random_seed(777) # seed 설정 # training data x_train = [1, 2, 3] y_train = [1, 2, 3] # regerssion 결과는 W = 1, b = 0 이라는 것을 알 수 있음 # but tensorflow로 training 시켜서 해보기!! # W와 b는 어떻게 달라질까? # tf.Variable() : tensorflow가 사용하는 변수(trainable variable) # tf.random_normal([1]) : normal dist에서 1개의 난수 생성 W = tf.Variable(tf.random_normal([1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') # Linear regression model hypothesis = x_train * W + b # cost/loss function (MSE) # tf.square() : 제곱해주는 tf 함수 # tf.reduce_mean() : mean 구해주는 tf 함수 # hypothesis(y_hat), y_train(true value) cost = tf.reduce_mean(tf.square(hypothesis - y_train)) # GradientDescent # Minimize # learning rate=0.01로 training 시킴 => gradient descent로 인해 조금씩 true에 가까워짐 optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) train = optimizer.minimize(cost) # session 객체 생성(tf graph 객체 생성) sess = tf.Session() # 모든 tf variavle 초기화 sess.run(tf.global_variables_initializer()) # Fit # 2001번 최적화 시킴!!! for step in range(2001): sess.run(train) if step % 20 == 0: # 다 뽑으면 너무 많으니까 몇개만 뽑기 위해서 # step(몇 번째인지?), cost(mse), W(weight), b(bias) print(step, sess.run(cost), sess.run(W), sess.run(b)) # Learns best fit W:[ 1.], b:[ 0.] ''' 0 2.82329 [ 2.12867713] [-0.85235667] 20 0.190351 [ 1.53392804] [-1.05059612] 40 0.151357 [ 1.45725465] [-1.02391243] ... 1920 1.77484e-05 [ 1.00489295] [-0.01112291] 1940 1.61197e-05 [ 1.00466311] [-0.01060018] 1960 1.46397e-05 [ 1.004444] [-0.01010205] 1980 1.32962e-05 [ 1.00423515] [-0.00962736] 2000 1.20761e-05 [ 1.00403607] [-0.00917497] '''
Python/tensorflow/DeepLearningZeroToAll/ver.py/Lab02-1-linear_regression.py
1,879
Lab 2 Linear Regression seed 설정 training data regerssion 결과는 W = 1, b = 0 이라는 것을 알 수 있음 but tensorflow로 training 시켜서 해보기!! W와 b는 어떻게 달라질까? tf.Variable() : tensorflow가 사용하는 변수(trainable variable) tf.random_normal([1]) : normal dist에서 1개의 난수 생성 Linear regression model cost/loss function (MSE) tf.square() : 제곱해주는 tf 함수 tf.reduce_mean() : mean 구해주는 tf 함수 hypothesis(y_hat), y_train(true value) GradientDescent Minimize learning rate=0.01로 training 시킴 => gradient descent로 인해 조금씩 true에 가까워짐 session 객체 생성(tf graph 객체 생성) 모든 tf variavle 초기화 Fit 2001번 최적화 시킴!!! 다 뽑으면 너무 많으니까 몇개만 뽑기 위해서 step(몇 번째인지?), cost(mse), W(weight), b(bias) Learns best fit W:[ 1.], b:[ 0.]
660
ko
0.869091
""" """ import unittest from unittest.mock import Mock, patch from wheezy.core import __version__, httpclient from wheezy.core.gzip import compress class HTTPClientTestCase(unittest.TestCase): def setUp(self): self.patcher = patch.object(httpclient, "HTTPConnection") self.mock_c_class = self.patcher.start() self.headers = [("date", "Sat, 12 Oct 2013 18:29:13 GMT")] self.mock_response = Mock() self.mock_response.getheaders.return_value = self.headers self.mock_response.read.return_value = "".encode("utf-8") self.mock_c = Mock() self.mock_c.getresponse.return_value = self.mock_response self.mock_c_class.return_value = self.mock_c self.client = httpclient.HTTPClient( "http://localhost:8080/api/v1/", headers={"User-Agent": "wheezy/%s" % __version__}, ) def tearDown(self): self.patcher.stop() def test_init(self): self.mock_c_class.assert_called_once_with("localhost:8080") assert "/api/v1/" == self.client.path assert {} == self.client.cookies assert self.client.headers is None def test_get(self): self.mock_response.status = 200 assert 200 == self.client.get("auth/token") assert self.mock_c.connect.called assert self.mock_c.request.called method, path, body, headers = self.mock_c.request.call_args[0] assert "GET" == method assert "/api/v1/auth/token" == path assert "" == body assert self.client.default_headers == headers assert "gzip" == headers["Accept-Encoding"] assert "close" == headers["Connection"] assert 3 == len(headers) def test_ajax_get(self): self.client.ajax_get("auth/token") method, path, body, headers = self.mock_c.request.call_args[0] assert "XMLHttpRequest" == headers["X-Requested-With"] def test_get_query(self): self.client.get("auth/token", params={"a": ["1"]}) method, path, body, headers = self.mock_c.request.call_args[0] assert "/api/v1/auth/token?a=1" == path def test_head(self): self.client.head("auth/token") method, path, body, headers = self.mock_c.request.call_args[0] assert "HEAD" == method def test_post(self): self.client.post( "auth/token", params={ "a": ["1"], }, ) method, path, body, headers = self.mock_c.request.call_args[0] assert "POST" == method assert "/api/v1/auth/token" == path assert "a=1" == body assert "application/x-www-form-urlencoded" == headers["Content-Type"] def test_ajax_post(self): self.client.ajax_post("auth/token", params={"a": ["1"]}) assert self.mock_c.request.called method, path, body, headers = self.mock_c.request.call_args[0] assert "XMLHttpRequest" == headers["X-Requested-With"] def test_post_content(self): self.client.ajax_post( "auth/token", content_type="application/json", body='{"a":1}' ) assert self.mock_c.request.called method, path, body, headers = self.mock_c.request.call_args[0] assert "application/json" == headers["Content-Type"] assert '{"a":1}' == body def test_follow(self): self.mock_response.status = 303 self.headers.append(("location", "http://localhost:8080/error/401")) assert 303 == self.client.get("auth/token") self.client.follow() method, path, body, headers = self.mock_c.request.call_args[0] assert "GET" == method assert "/error/401" == path def test_cookies(self): self.headers.append(("set-cookie", "_x=1; path=/; httponly")) self.client.get("auth/token") assert self.client.cookies assert "1" == self.client.cookies["_x"] self.headers.append(("set-cookie", "_x=; path=/; httponly")) self.client.get("auth/token") assert not self.client.cookies def test_assert_json(self): """Expecting json response but content type is not valid.""" self.headers.append(("content-type", "text/html; charset=UTF-8")) self.client.get("auth/token") self.assertRaises(AssertionError, lambda: self.client.json) def test_json(self): """json response.""" patcher = patch.object(httpclient, "json_loads") mock_json_loads = patcher.start() mock_json_loads.return_value = {} self.headers.append( ("content-type", "application/json; charset=UTF-8") ) self.mock_response.read.return_value = "{}".encode("utf-8") self.client.get("auth/token") assert {} == self.client.json patcher.stop() def test_gzip(self): """Ensure gzip decompression.""" self.headers.append(("content-encoding", "gzip")) self.mock_response.read.return_value = compress("test".encode("utf-8")) self.client.get("auth/token") assert "test" == self.client.content def test_etag(self): """ETag processing.""" self.headers.append(("etag", '"ca231fbc"')) self.client.get("auth/token") method, path, body, headers = self.mock_c.request.call_args[0] assert "If-None-Match" not in headers assert '"ca231fbc"' == self.client.etags["/api/v1/auth/token"] self.client.get("auth/token") method, path, body, headers = self.mock_c.request.call_args[0] assert '"ca231fbc"' == headers["If-None-Match"]
src/wheezy/core/tests/test_httpclient.py
5,601
Expecting json response but content type is not valid. ETag processing. Ensure gzip decompression. json response.
113
en
0.674525