function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def remove_absolute_paths(line): """ Removes libraries from the line that are found under /usr """ if sys.platform == 'win32': return line elif sys.platform == 'darwin': return re.sub('/Applications/[-_a-zA-Z0-9/.]+.framework[;]?', '', line) else: return re.sub('/usr/[-_a-zA-Z0-9/]+.so[;]?', '', line)
DLR-SC/tigl
[ 186, 54, 186, 96, 1419243179 ]
def _IsDirectory(parent, item): """Helper that returns if parent/item is a directory.""" return tf.io.gfile.isdir(os.path.join(parent, item))
tensorflow/tensorboard
[ 6136, 1581, 6136, 616, 1494878887 ]
def ListPlugins(logdir): """List all the plugins that have registered assets in logdir. If the plugins_dir does not exist, it returns an empty list. This maintains compatibility with old directories that have no plugins written. Args: logdir: A directory that was created by a TensorFlow events writer. Returns: a list of plugin names, as strings """ plugins_dir = os.path.join(logdir, _PLUGINS_DIR) try: entries = tf.io.gfile.listdir(plugins_dir) except tf.errors.NotFoundError: return [] # Strip trailing slashes, which listdir() includes for some filesystems # for subdirectories, after using them to bypass IsDirectory(). return [ x.rstrip("/") for x in entries if x.endswith("/") or _IsDirectory(plugins_dir, x) ]
tensorflow/tensorboard
[ 6136, 1581, 6136, 616, 1494878887 ]
def read_batch_from_dataset_tables(input_table_patterns, batch_sizes, num_instances_per_record, shuffle, num_epochs, keypoint_names_3d=None, keypoint_names_2d=None, min_keypoint_score_2d=-1.0, shuffle_buffer_size=4096, num_shards=1, shard_index=None, common_module=common, dataset_class=tf.data.TFRecordDataset, input_example_parser_creator=None, seed=None): """Reads data from dataset table. IMPORTANT: We assume that 2D keypoints from the input have been normalized by image size. This function will reads image sizes from the input and denormalize the 2D keypoints with them. No normalization is expected and no denormalization will be performed for 3D keypoints. Output tensors may include: keypoints: A tensor for standardized 2D keypoints. Shape = [batch_size, num_instances_per_record, num_keypoints_2d, 2]. keypoint_scores: A tensor for 2D keypoint scores. Shape = [batch_size, num_instances_per_record, num_keypoints_2d]. keypoints_3d: A tensor for standardized 3D keypoints. Shape = [batch_size, num_instances_per_record, num_keypoints_3d, 3]. Args: input_table_patterns: A list of strings for the paths or pattern to input tables. batch_sizes: A list of integers for the batch sizes to read from each table. num_instances_per_record: An integer for the number of instances per tf.Example record. shuffle: A boolean for whether to shuffle batch. num_epochs: An integer for the number of epochs to read. Use None to read indefinitely, in which case remainder batch will be dropped. keypoint_names_3d: A list of strings for 3D keypoint names to read (coordinates). Use None to skip reading 2D keypoints. keypoint_names_2d: A list of strings for 2D keypoint names to read (coordinates and scores). Use None to skip reading 2D keypoints. min_keypoint_score_2d: A float for the minimum score to consider a 2D keypoint as invalid. shuffle_buffer_size: An integer for the buffer size used for shuffling. A large buffer size benefits shuffling quality. num_shards: An integer for the number of shards to divide the dataset. This is useful to distributed training. See `tf.data.Dataset.shard` for details. shard_index: An integer for the shard index to use. This is useful to distributed training, and should usually be set to the id of a synchronized worker. See `tf.data.Dataset.shard` for details. Note this must be specified if `num_shards` is greater than 1. common_module: A Python module that defines common constants. dataset_class: A dataset class to use. Must match input table type. input_example_parser_creator: A function handle for creating parser function. If None, uses the default parser creator. seed: An integer for random seed. Returns: outputs: A dictionary for output tensor inputs. """ parser_kwargs = { 'num_objects': num_instances_per_record, } if keypoint_names_3d: parser_kwargs.update({ 'keypoint_names_3d': keypoint_names_3d, 'include_keypoint_scores_3d': False, }) if keypoint_names_2d: parser_kwargs.update({ 'keypoint_names_2d': keypoint_names_2d, 'include_keypoint_scores_2d': True, }) if input_example_parser_creator is None: input_example_parser_creator = tfe_input_layer.create_tfe_parser parser_fn = input_example_parser_creator( common_module=common_module, **parser_kwargs) # TODO(lzyuan): consider to refactor read_batch_from_batches into other file. outputs = tfe_input_layer.read_batch_from_tables( input_table_patterns, batch_sizes=batch_sizes, drop_remainder=num_epochs is None, num_epochs=num_epochs, num_shards=num_shards, shard_index=shard_index, shuffle=shuffle, shuffle_buffer_size=shuffle_buffer_size, dataset_class=dataset_class, parser_fn=parser_fn, seed=seed) outputs = tf.data.make_one_shot_iterator(outputs).get_next() if keypoint_names_2d: # Since we assume 2D keypoints from the input have been normalized by image # size, so we need to denormalize them to restore correctly aspect ratio. keypoints_2d = keypoint_utils.denormalize_points_by_image_size( outputs[common_module.KEY_KEYPOINTS_2D], image_sizes=outputs[common_module.KEY_IMAGE_SIZES]) keypoint_scores_2d = outputs[common_module.KEY_KEYPOINT_SCORES_2D] if min_keypoint_score_2d < 0.0: keypoint_masks_2d = tf.ones_like(keypoint_scores_2d, dtype=tf.float32) else: keypoint_masks_2d = tf.cast( tf.math.greater_equal(keypoint_scores_2d, min_keypoint_score_2d), dtype=tf.float32) outputs.update({ common_module.KEY_KEYPOINTS_2D: keypoints_2d, common_module.KEY_KEYPOINT_MASKS_2D: keypoint_masks_2d }) return outputs
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def get_optimizer(optimizer_type, learning_rate, **kwargs): """Creates optimizer with learning rate. Currently supported optimizers include: 'ADAGRAD' Args: optimizer_type: A string for the type of optimizer to choose. learning_rate: A float or tensor for the learning rate. **kwargs: A dictionary of assorted arguments used by optimizers, keyed in the format of '${optimizer_type}_${arg}'. Returns: optimizer: An optimizer class object. Raises: ValueError: If the optimizer type is not supported. """ if optimizer_type == 'ADAGRAD': optimizer = tf.train.AdagradOptimizer( learning_rate, initial_accumulator_value=kwargs.get( 'ADAGRAD_initial_accumulator_value', 0.1)) elif optimizer_type == 'ADAM': optimizer = tf.train.AdamOptimizer( learning_rate, beta1=kwargs.get('ADAM_beta1', 0.9), beta2=kwargs.get('ADAM_beta2', 0.999), epsilon=kwargs.get('ADAM_epsilon', 1e-8)) elif optimizer_type == 'RMSPROP': optimizer = tf.train.RMSPropOptimizer( learning_rate=learning_rate, decay=kwargs.get('RMSPROP_decay', 0.9), momentum=kwargs.get('RMSPROP_momentum', 0.9), epsilon=kwargs.get('RMSPROP_epsilon', 1e-10)) else: raise ValueError('Unsupported optimizer type: `%s`.' % str(optimizer_type)) return optimizer
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def get_moving_average_variables_to_restore(global_step=None): """Gets variables to restore. Args: global_step: A tensor of global step to include. If None, do not restore global step variable, which is for exporting inference graph. For only evaluation, specifying a global step is needed. Returns: variables_to_restore: A dictionary of variables to restore. """ variable_averages = tf.train.ExponentialMovingAverage(0.0, global_step) variables_to_restore = variable_averages.variables_to_restore() if global_step is not None: variables_to_restore[global_step.op.name] = global_step return variables_to_restore
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def add_summary(scalars_to_summarize=None, histograms_to_summarize=None, images_to_summarize=None): """Adds summaries to the default summary collection. Args: scalars_to_summarize: A dictionary of (name, scalar tensor) tuples to summarize. histograms_to_summarize: A dictionary of (name, histogram tensor) tuples to summarize. images_to_summarize: A dictionary of (name, image tensor) tuples to summarize. """ if scalars_to_summarize: for key, value in scalars_to_summarize.items(): tf.summary.scalar(key, value) if histograms_to_summarize: for key, value in histograms_to_summarize.items(): tf.summary.histogram(key, value) if images_to_summarize: for key, value in images_to_summarize.items(): tf.summary.image(key, value)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def create_dir_and_save_flags(flags_module, log_dir, json_filename): """Creates log directory and saves flags to a JSON file. Args: flags_module: An absl.flags module. log_dir: A string for log directory. json_filename: A string for output JSON file name. """ # Create log directory if necessary. if not tf.io.gfile.exists(log_dir): tf.io.gfile.makedirs(log_dir) # Save all key flags. key_flag_dict = { flag.name: flag.value for flag in flags_module.FLAGS.get_key_flags_for_module(sys.argv[0]) } json_path = os.path.join(log_dir, json_filename) with tf.io.gfile.GFile(json_path, 'w') as f: json.dump(key_flag_dict, f, indent=2, sort_keys=True)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def stack_embeddings(model_outputs, embedding_keys, common_module=common): """Selects and stacks embeddings by key. Args: model_outputs: A dictionary for model output tensors. embedding_keys: A list for enum strings for tensor keys to select. common_module: A Python module that defines common flags and constants. Returns: A tensor for stacked embeddings. Shape = [..., num_embeddings_per_instance, embedding_dim]. """ embeddings_to_stack = [] for key in embedding_keys: if key in [ common_module.KEY_EMBEDDING_MEANS, common_module.KEY_EMBEDDING_STDDEVS ]: embeddings_to_stack.append(tf.expand_dims(model_outputs[key], axis=-2)) elif key == common_module.KEY_EMBEDDING_SAMPLES: embeddings_to_stack.append(model_outputs[key]) else: raise ValueError('Unsupported embedding key: `%s`.' % str(key)) return tf.concat(embeddings_to_stack, axis=-2)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def maybe_clamp(x, x_range, ignored_if_non_positive): """Clamps `x` to `x_range`.""" x_min, x_max = x_range if x_min is not None and x_max is not None and x_min > x_max: raise ValueError('Invalid range: %s.' % str(x_range)) if (x_min is not None) and (not ignored_if_non_positive or x_min > 0.0): x = tf.math.maximum(x_min, x) if (x_max is not None) and (not ignored_if_non_positive or x_max > 0.0): x = tf.math.minimum(x_max, x) return x
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def __init__( self, tried, except_handler, break_handler, continue_handler, return_handler, source_ref,
kayhayen/Nuitka
[ 8411, 456, 8411, 240, 1366731633 ]
def computeStatement(self, trace_collection): # This node has many children to handle, pylint: disable=I0021,too-many-branches,too-many-locals,too-many-statements tried = self.subnode_tried except_handler = self.subnode_except_handler break_handler = self.subnode_break_handler continue_handler = self.subnode_continue_handler return_handler = self.subnode_return_handler # The tried block must be considered as a branch, if it is not empty # already. collection_start = TraceCollectionBranch( parent=trace_collection, name="try start" ) abort_context = trace_collection.makeAbortStackContext( catch_breaks=break_handler is not None, catch_continues=continue_handler is not None, catch_returns=return_handler is not None, catch_exceptions=True, ) with abort_context: # As a branch point for the many types of handlers. result = tried.computeStatementsSequence(trace_collection=trace_collection) # We might be done entirely already. if result is None: return None, "new_statements", "Removed now empty try statement." # Might be changed. if result is not tried: self.setChild("tried", result) tried = result break_collections = trace_collection.getLoopBreakCollections() continue_collections = trace_collection.getLoopContinueCollections() return_collections = trace_collection.getFunctionReturnCollections() exception_collections = trace_collection.getExceptionRaiseCollections() tried_may_raise = tried.mayRaiseException(BaseException) # Exception handling is useless if no exception is to be raised. if not tried_may_raise: if except_handler is not None: except_handler.finalize() self.clearChild("except_handler") trace_collection.signalChange( tags="new_statements", message="Removed useless exception handler.", source_ref=except_handler.source_ref, ) except_handler = None # If tried may raise, even empty exception handler has a meaning to # ignore that exception. if tried_may_raise: collection_exception_handling = TraceCollectionBranch( parent=collection_start, name="except handler" ) # When no exception exits are there, this is a problem, we just # found an inconsistency that is a bug. if not exception_collections: for statement in tried.subnode_statements: if statement.mayRaiseException(BaseException): raise NuitkaOptimizationError( "This statement does raise but didn't annotate an exception exit.", statement, ) raise NuitkaOptimizationError( "Falsely assuming tried block may raise, but no statement says so.", tried, ) collection_exception_handling.mergeMultipleBranches(exception_collections) if except_handler is not None: result = except_handler.computeStatementsSequence( trace_collection=collection_exception_handling ) # Might be changed. if result is not except_handler: self.setChild("except_handler", result) except_handler = result if break_handler is not None: if not tried.mayBreak(): break_handler.finalize() self.clearChild("break_handler") break_handler = None if break_handler is not None: collection_break = TraceCollectionBranch( parent=collection_start, name="break handler" ) collection_break.mergeMultipleBranches(break_collections) result = break_handler.computeStatementsSequence( trace_collection=collection_break ) # Might be changed. if result is not break_handler: self.setChild("break_handler", result) break_handler = result if continue_handler is not None: if not tried.mayContinue(): continue_handler.finalize() self.clearChild("continue_handler") continue_handler = None if continue_handler is not None: collection_continue = TraceCollectionBranch( parent=collection_start, name="continue handler" ) collection_continue.mergeMultipleBranches(continue_collections) result = continue_handler.computeStatementsSequence( trace_collection=collection_continue ) # Might be changed. if result is not continue_handler: self.setChild("continue_handler", result) continue_handler = result if return_handler is not None: if not tried.mayReturn(): return_handler.finalize() self.clearChild("return_handler") return_handler = None if return_handler is not None: collection_return = TraceCollectionBranch( parent=collection_start, name="return handler" ) collection_return.mergeMultipleBranches(return_collections) result = return_handler.computeStatementsSequence( trace_collection=collection_return ) # Might be changed. if result is not return_handler: self.setChild("return_handler", result) return_handler = result # Check for trivial return handlers that immediately return, they can # just be removed. if return_handler is not None: if return_handler.subnode_statements[0].isStatementReturnReturnedValue(): return_handler.finalize() self.clearChild("return_handler") return_handler = None # Merge exception handler only if it is used. Empty means it is not # aborting, as it swallows the exception. if tried_may_raise and ( except_handler is None or not except_handler.isStatementAborting() ): trace_collection.mergeBranches( collection_yes=collection_exception_handling, collection_no=None ) # An empty exception handler means we have to swallow exception. if ( ( not tried_may_raise or ( except_handler is not None and except_handler.subnode_statements[ 0 ].isStatementReraiseException() ) ) and break_handler is None and continue_handler is None and return_handler is None ): return tried, "new_statements", "Removed useless try, all handlers removed." tried_statements = tried.subnode_statements pre_statements = [] while tried_statements: tried_statement = tried_statements[0] if tried_statement.mayRaiseException(BaseException): break if break_handler is not None and tried_statement.mayBreak(): break if continue_handler is not None and tried_statement.mayContinue(): break if return_handler is not None and tried_statement.mayReturn(): break pre_statements.append(tried_statement) tried_statements = list(tried_statements) del tried_statements[0] post_statements = [] if except_handler is not None and except_handler.isStatementAborting(): while tried_statements: tried_statement = tried_statements[-1] if tried_statement.mayRaiseException(BaseException): break if break_handler is not None and tried_statement.mayBreak(): break if continue_handler is not None and tried_statement.mayContinue(): break if return_handler is not None and tried_statement.mayReturn(): break post_statements.insert(0, tried_statement) tried_statements = list(tried_statements) del tried_statements[-1] if pre_statements or post_statements: assert tried_statements # Should be dealt with already tried.setChild("statements", tried_statements) result = StatementsSequence( statements=pre_statements + [self] + post_statements, source_ref=self.source_ref, ) def explain(): # TODO: We probably don't want to say this for re-formulation ones. result = "Reduced scope of tried block." if pre_statements: result += " Leading statements at %s." % ( ",".join( x.getSourceReference().getAsString() + "/" + str(x) for x in pre_statements ) ) if post_statements: result += " Trailing statements at %s." % ( ",".join( x.getSourceReference().getAsString() + "/" + str(x) for x in post_statements ) ) return result return (result, "new_statements", explain) return self, None, None
kayhayen/Nuitka
[ 8411, 456, 8411, 240, 1366731633 ]
def mayBreak(self): # TODO: If we optimized return handler away, this would be not needed # or even non-optimal. if self.subnode_tried.mayBreak(): return True except_handler = self.subnode_except_handler if except_handler is not None and except_handler.mayBreak(): return True break_handler = self.subnode_break_handler if break_handler is not None and break_handler.mayBreak(): return True continue_handler = self.subnode_continue_handler if continue_handler is not None and continue_handler.mayBreak(): return True return_handler = self.subnode_return_handler if return_handler is not None and return_handler.mayBreak(): return True return False
kayhayen/Nuitka
[ 8411, 456, 8411, 240, 1366731633 ]
def isStatementAborting(self): except_handler = self.subnode_except_handler if except_handler is None or not except_handler.isStatementAborting(): return False break_handler = self.subnode_break_handler if break_handler is not None and not break_handler.isStatementAborting(): return False continue_handler = self.subnode_continue_handler if continue_handler is not None and not continue_handler.isStatementAborting(): return False return_handler = self.subnode_return_handler if return_handler is not None and not return_handler.isStatementAborting(): return False return self.subnode_tried.isStatementAborting()
kayhayen/Nuitka
[ 8411, 456, 8411, 240, 1366731633 ]
def needsFrame(self): except_handler = self.subnode_except_handler if except_handler is not None and except_handler.needsFrame(): return True break_handler = self.subnode_break_handler if break_handler is not None and break_handler.needsFrame(): return True continue_handler = self.subnode_continue_handler if continue_handler is not None and continue_handler.needsFrame(): return True return_handler = self.subnode_return_handler if return_handler is not None and return_handler.needsFrame(): return True return self.subnode_tried.needsFrame()
kayhayen/Nuitka
[ 8411, 456, 8411, 240, 1366731633 ]
def setUp(self): self.fd, self.path = tempfile.mkstemp()
rbuffat/pyidf
[ 20, 7, 20, 2, 1417292720 ]
def test_data_is_valid(self): """Limit Processor / Identifier Hint / Data Validation""" self.assertEqual(data_is_valid(DATA), (True, "OK")) self.assertEqual(data_is_valid({}), (False, "At /: 'hint' is a required property")) self.assertRaises(ValueError, data_is_valid, 123)
perfsonar/pscheduler
[ 45, 31, 45, 115, 1452259533 ]
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('graph', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.Column('sketch_id', sa.Integer(), nullable=True), sa.Column('name', sa.UnicodeText(), nullable=True), sa.Column('description', sa.UnicodeText(), nullable=True), sa.Column('graph_config', sa.UnicodeText(), nullable=True), sa.Column('graph_elements', sa.UnicodeText(), nullable=True), sa.Column('graph_thumbnail', sa.UnicodeText(), nullable=True), sa.Column('num_nodes', sa.Integer(), nullable=True), sa.Column('num_edges', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('graphcache', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('sketch_id', sa.Integer(), nullable=True), sa.Column('graph_plugin', sa.UnicodeText(), nullable=True), sa.Column('graph_config', sa.UnicodeText(), nullable=True), sa.Column('graph_elements', sa.UnicodeText(), nullable=True), sa.Column('num_nodes', sa.Integer(), nullable=True), sa.Column('num_edges', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('graph_comment', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('comment', sa.UnicodeText(), nullable=True), sa.Column('parent_id', sa.Integer(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('graph_label', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('label', sa.Unicode(length=255), nullable=True), sa.Column('parent_id', sa.Integer(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ###
google/timesketch
[ 2113, 486, 2113, 278, 1403200185 ]
def setUp(self): self.set_filename('simple09.xlsx')
jmcnamara/XlsxWriter
[ 3172, 594, 3172, 18, 1357261626 ]
def upgrade(): op.drop_index('ix_team_domain', table_name='team') op.drop_column('team', 'domain')
hasgeek/lastuser
[ 165, 31, 165, 47, 1297954711 ]
def webhook(request): return HttpResponse('')
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def express_new_github(request): return HttpResponse('')
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def new_github(request): if request.method == 'POST': gform = GITHUBForm(request.POST, instance=GITHUB_Conf()) if gform.is_valid(): try: api_key = gform.cleaned_data.get('api_key') g = Github(api_key) user = g.get_user() logger.debug('Using user ' + user.login) new_j = gform.save(commit=False) new_j.api_key = api_key new_j.save() messages.add_message(request, messages.SUCCESS, 'Github Configuration Successfully Created.', extra_tags='alert-success') return HttpResponseRedirect(reverse('github', )) except Exception as info: logger.error(info) messages.add_message(request, messages.ERROR, 'Unable to authenticate on github.', extra_tags='alert-danger') return HttpResponseRedirect(reverse('github', )) else: gform = GITHUBForm() add_breadcrumb(title="New Github Configuration", top_level=False, request=request) return render(request, 'dojo/new_github.html', {'gform': gform})
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def github(request): confs = GITHUB_Conf.objects.all() add_breadcrumb(title="Github List", top_level=not len(request.GET), request=request) return render(request, 'dojo/github.html', {'confs': confs, })
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def _insert_custom_filters(analyzer_name, filter_list, char=False): """ Takes a list containing in-built filters (as strings), and the settings for custom filters (as dicts). Turns the dicts into instances of `token_filter` or `char_filter` depending on the value of the `char` argument. """ def mapping_func(position_filter_tuple): position, filter = position_filter_tuple if type(filter) is dict: prefix = analyzer_name default_filters = config.ES_DEFAULT_ANALYZER["char_filter" if char else "filter"] if filter in default_filters: # detect if this filter exists in the default analyzer # if it does use the same name as the default # to avoid defining the same filter for each locale prefix = config.ES_DEFAULT_ANALYZER_NAME position = default_filters.index(filter) name = f'{prefix}_{position}_{filter["type"]}' if char: return char_filter(name, **filter) return token_filter(name, **filter) return filter return list(map(mapping_func, enumerate(filter_list)))
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def es_analyzer_for_locale(locale, search_analyzer=False): """Pick an appropriate analyzer for a given locale. If no analyzer is defined for `locale` or the locale analyzer uses a plugin but using plugin is turned off from settings, return an analyzer named "default_sumo". """ name = "" analyzer_config = config.ES_LOCALE_ANALYZERS.get(locale) if not analyzer_config or (analyzer_config.get("plugin") and not settings.ES_USE_PLUGINS): name = config.ES_DEFAULT_ANALYZER_NAME analyzer_config = {} # use default values from ES_DEFAULT_ANALYZER if not overridden # using python 3.9's dict union operator analyzer_config = config.ES_DEFAULT_ANALYZER | analyzer_config # turn dictionaries into `char_filter` and `token_filter` instances filters = _insert_custom_filters(name or locale, analyzer_config["filter"]) char_filters = _insert_custom_filters( name or locale, analyzer_config["char_filter"], char=True ) if search_analyzer: # create a locale-specific search analyzer, even if the index-time analyzer is # `sumo_default`. we do this so that we can adjust the synonyms used in any locale, # even if it doesn't have a custom analysis chain set up, without having to re-index name = locale + "_search_analyzer" filters.append(_create_synonym_graph_filter(config.ES_ALL_SYNONYMS_NAME)) filters.append(_create_synonym_graph_filter(locale)) return analyzer( name or locale, tokenizer=analyzer_config["tokenizer"], filter=filters, char_filter=char_filters, )
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def get_doc_types(paths=["kitsune.search.documents"]): """Return all registered document types""" doc_types = [] modules = [importlib.import_module(path) for path in paths] for module in modules: for key in dir(module): cls = getattr(module, key) if ( inspect.isclass(cls) and issubclass(cls, Document) and cls != Document and cls.__name__ != "SumoDocument" ): doc_types.append(cls) return doc_types
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def index_object(doc_type_name, obj_id): """Index an ORM object given an object id and a document type name.""" doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name) model = doc_type.get_model() try: obj = model.objects.get(pk=obj_id) except model.DoesNotExist: # if the row doesn't exist in DB, it may have been deleted while this job # was in the celery queue - this shouldn't be treated as a failure, so # just return return if doc_type.update_document: doc_type.prepare(obj).to_action("update", doc_as_upsert=True) else: doc_type.prepare(obj).to_action("index")
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def index_objects_bulk( doc_type_name, obj_ids, timeout=settings.ES_BULK_DEFAULT_TIMEOUT, elastic_chunk_size=settings.ES_DEFAULT_ELASTIC_CHUNK_SIZE,
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def remove_from_field(doc_type_name, field_name, field_value): """Remove a value from all documents in the doc_type's index.""" doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name) script = ( f"if (ctx._source.{field_name}.contains(params.value)) {{" f"ctx._source.{field_name}.remove(ctx._source.{field_name}.indexOf(params.value))" f"}}" ) update = UpdateByQuery(using=es7_client(), index=doc_type._index._name) update = update.filter("term", **{field_name: field_value}) update = update.script(source=script, params={"value": field_value}, conflicts="proceed") # refresh index to ensure search fetches all matches doc_type._index.refresh() update.execute()
mozilla/kitsune
[ 1110, 779, 1110, 26, 1264532037 ]
def extractWwwTccedwardsCom(item): ''' Parser for 'www.tccedwards.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def balance(user): return int(user.kv.stickers.currency.get() or 0)
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def publish_balance(user): user.redis.coin_channel.publish({'balance': balance(user)})
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def debit(user, amount): _adjust_balance(user, -amount)
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def credit_quest_of_the_day_completion(user): credit(user, knobs.REWARDS['quest_of_the_day'])
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def credit_personal_share(user): credit(user, knobs.REWARDS['personal_share'])
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def credit_star(user): user.kv.stickers_received.increment(1) credit(user, knobs.REWARDS['star'])
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def hide_comment(request, comment_id): comment = get_object_or_404(Comment, pk=comment_id) request.user.redis.hidden_comments.hide_comment(comment) Metrics.downvote_action.record(request, comment=comment.id) Metrics.hide_comment.record(request)
canvasnetworks/canvas
[ 56, 15, 56, 3, 1447125133 ]
def get_scan_types(self): return ["Meterian Scan"]
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def get_description_for_scan_types(self, scan_type): return "Meterian JSON report output file can be imported."
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def get_security_reports(self, report_json): if "reports" in report_json: if "security" in report_json["reports"]: if "reports" in report_json["reports"]["security"]: return report_json["reports"]["security"]["reports"] raise ValueError("Malformed report: the security reports are missing.")
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def get_severity(self, advisory): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss if 'cvss' in advisory: if advisory['cvss'] <= 3.9: severity = "Low" elif advisory['cvss'] >= 4.0 and advisory['cvss'] <= 6.9: severity = "Medium" elif advisory['cvss'] >= 7.0 and advisory['cvss'] <= 8.9: severity = "High" else: severity = "Critical" else: if advisory["severity"] == "SUGGEST" or advisory["severity"] == "NA" or advisory["severity"] == "NONE": severity = "Info" else: severity = advisory["severity"].title() return severity
rackerlabs/django-DefectDojo
[ 2681, 1254, 2681, 272, 1424368427 ]
def __init__(self): super(MessageNode, self).__init__() # Valid after EndParsing, this is the MessageClique that contains the # source message and any translations of it that have been loaded. self.clique = None # We don't send leading and trailing whitespace into the translation # console, but rather tack it onto the source message and any # translations when formatting them into RC files or what have you. self.ws_at_start = '' # Any whitespace characters at the start of the text self.ws_at_end = '' # --"-- at the end of the text # A list of "shortcut groups" this message is in. We check to make sure # that shortcut keys (e.g. &J) within each shortcut group are unique. self.shortcut_groups_ = [] # Formatter-specific data used to control the output of individual strings. # formatter_data is a space separated list of C preprocessor-style # definitions. Names without values are given the empty string value. # Example: "foo=5 bar baz=100" self.formatter_data = {} # Whether or not to convert ... -> U+2026 within Translate(). self._replace_ellipsis = False
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _IsValidAttribute(self, name, value): if name not in ['name', 'offset', 'translateable', 'desc', 'meaning', 'internal_comment', 'shortcut_groups', 'custom_type', 'validation_expr', 'use_name_for_id', 'sub_variable', 'formatter_data']: return False if (name in ('translateable', 'sub_variable') and value not in ['true', 'false']): return False return True
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def MandatoryAttributes(self): return ['name|offset']
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def HandleAttribute(self, attrib, value): base.ContentNode.HandleAttribute(self, attrib, value) if attrib != 'formatter_data': return # Parse value, a space-separated list of defines, into a dict. # Example: "foo=5 bar" -> {'foo':'5', 'bar':''} for item in value.split(): name, _, val = item.partition('=') self.formatter_data[name] = val
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def IsTranslateable(self): return self.attrs['translateable'] == 'true'
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def InstallMessage(self, message): '''Sets this node's clique from a tclib.Message instance. Args: message: A tclib.Message. ''' self.clique = self.UberClique().MakeClique(message, self.IsTranslateable()) for group in self.shortcut_groups_: self.clique.AddToShortcutGroup(group) if self.attrs['custom_type'] != '': self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'], clique.CustomType)) elif self.attrs['validation_expr'] != '': self.clique.SetCustomType( clique.OneOffCustomType(self.attrs['validation_expr']))
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GetCliques(self): return [self.clique] if self.clique else []
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def NameOrOffset(self): key = 'name' if 'name' in self.attrs else 'offset' return self.attrs[key]
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GetDataPackValue(self, lang, encoding): '''Returns a str represenation for a data_pack entry.''' message = self.ws_at_start + self.Translate(lang) + self.ws_at_end return util.Encode(message, encoding)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def Construct(parent, message, name, desc='', meaning='', translateable=True): '''Constructs a new message node that is a child of 'parent', with the name, desc, meaning and translateable attributes set using the same-named parameters and the text of the message and any placeholders taken from 'message', which must be a tclib.Message() object.''' # Convert type to appropriate string translateable = 'true' if translateable else 'false' node = MessageNode() node.StartParsing('message', parent) node.HandleAttribute('name', name) node.HandleAttribute('desc', desc) node.HandleAttribute('meaning', meaning) node.HandleAttribute('translateable', translateable) items = message.GetContent() for ix, item in enumerate(items): if isinstance(item, six.string_types): # Ensure whitespace at front and back of message is correctly handled. if ix == 0: item = "'''" + item if ix == len(items) - 1: item = item + "'''" node.AppendContent(item) else: phnode = PhNode() phnode.StartParsing('ph', node) phnode.HandleAttribute('name', item.GetPresentation()) phnode.AppendContent(item.GetOriginal()) if len(item.GetExample()) and item.GetExample() != ' ': exnode = ExNode() exnode.StartParsing('ex', phnode) exnode.AppendContent(item.GetExample()) exnode.EndParsing() phnode.AddChild(exnode) phnode.EndParsing() node.AddChild(phnode) node.EndParsing() return node
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _IsValidChild(self, child): return isinstance(child, ExNode)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def EndParsing(self): super(PhNode, self).EndParsing() # We only allow a single example for each placeholder if len(self.children) > 1: raise exception.TooManyExamples()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def create(kernel): result = Tangible() result.template = "object/tangible/ship/components/engine/shared_eng_moncal_ifs32.iff" result.attribute_template_id = 8 result.stfName("space/space_item","eng_moncal_ifs32_n")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Static() result.template = "object/static/structure/tatooine/shared_planter_hanging_style_01.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Tangible() result.template = "object/tangible/powerup/weapon/fs_quest_sad/shared_melee_speed_quest.iff" result.attribute_template_id = -1 result.stfName("powerup_n","weapon_melee_speed_quest")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_sean_trenwell.iff" result.attribute_template_id = 9 result.stfName("npc_name","sean_trenwell")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Static() result.template = "object/static/structure/general/shared_fountain_generic_style_1.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def collect(self): return []
cloudera/hue
[ 804, 271, 804, 38, 1277149611 ]
def setUp(self): self.asn1Spec = rfc5280.Certificate()
etingof/pyasn1-modules
[ 38, 39, 38, 8, 1456353005 ]
def testOpenTypes(self): substrate = pem.readBase64fromText(self.cert_pem_text) asn1Object, rest = der_decoder( substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) perm_id_oid = rfc4043.id_on_permanentIdentifier assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48') permanent_identifier_found = False for extn in asn1Object['tbsCertificate']['extensions']: if extn['extnID'] == rfc5280.id_ce_subjectAltName: extnValue, rest = der_decoder( extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(), decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(extnValue.prettyPrint()) self.assertEqual(extn['extnValue'], der_encoder(extnValue)) for gn in extnValue: if gn['otherName'].hasValue(): on = gn['otherName'] self.assertEqual(perm_id_oid, on['type-id']) self.assertEqual(assigner_oid, on['value']['assigner']) permanent_identifier_found = True self.assertTrue(permanent_identifier_found)
etingof/pyasn1-modules
[ 38, 39, 38, 8, 1456353005 ]
def __init__(self, specification, other_id=None, other_valid=None, other_valid_pair=None, other_index=None): self._is_broadcast = specification == "rssi_broadcast" self._other_valid = other_valid self._other_id = other_id self._other_index = other_index self._other_valid_pair = other_valid_pair
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def is_broadcast(self): """ Whether the RSSI packet is a measurement broadcast packet, or that it is sent to the ground station. """ return self._is_broadcast
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def other_id(self): """ RF sensor ID of the other sensor. """ return self._other_id
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def other_valid(self): """ Whether the location of the other sensor is valid. """ return self._other_valid
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def other_valid_pair(self): """ Whether the other sensor has received a valid measurement from the current sensor. """ return self._other_valid_pair
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def other_index(self): """ The waypoint index of the other sensor. """ return self._other_index
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def __init__(self, arguments, thread_manager, location_callback, receive_callback, valid_callback): """ Initialize the RF sensor. The `arguments` parameter is used to load settings for a specific RF sensor type. The sensor has a `thread_manager`, which is a `Thread_Manager` object for registering its own thread loop. Additionally, it requires certian callbacks. The `location_callback` is called whenever the sensor needs to know its own location for the "rssi_broadcast" and the "rssi_ground_station" private packets. The `receive_callback` is called whenever non-private packets are received and has the `Packet` object as an argument. Finally, the `valid_callback` is called shortly after the `location_callback` is called. It may be given a boolean argument indicating whether another RF sensor has a valid location, but only when creating the "rssi_ground_station" private packet. This is used by the callback to determine if measurements at a certain location are finished. Classes that inherit this base class may extend this method. """ super(RF_Sensor, self).__init__("rf_sensor", thread_manager) # Make sure that the provided callbacks are callable. for callback in [location_callback, receive_callback, valid_callback]: if not hasattr(callback, "__call__"): raise TypeError("Provided RF sensor callback is not callable") # Load settings for a specific RF sensor type. if isinstance(arguments, Arguments): self._settings = arguments.get_settings(self.type) else: raise ValueError("'arguments' must be an instance of Arguments") # Initialize common member variables. self._id = self._settings.get("rf_sensor_id") self._number_of_sensors = self._settings.get("number_of_sensors") self._address = None self._connection = None self._buffer = None self._scheduler = TDMA_Scheduler(self._id, arguments) self._packets = Queue.Queue() self._custom_packets = Queue.Queue() self._joined = False self._activated = False self._started = False self._loop_delay = self._settings.get("loop_delay") self._location_callback = location_callback self._receive_callback = receive_callback self._valid_callback = valid_callback
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def id(self): """ Get the ID of the RF sensor. """ return self._id
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def number_of_sensors(self): """ Get the number of sensors in the network. """ return self._number_of_sensors
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def buffer(self): """ Get the buffer of the RF sensor. """ return self._buffer
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def buffer(self, buffer): """ Set the buffer. The `buffer` argument must be a `Buffer` object. """ if not isinstance(buffer, Buffer): raise ValueError("The `buffer` argument must be a `Buffer` object") self._buffer = buffer
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def type(self): raise NotImplementedError("Subclasses must implement the `type` property")
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def identity(self): """ Get the identity of the RF sensor, consisting of its ID, address and network join status. Classes that inherit this base class may extend this property. """ return { "id": self._id, "address": self._address, "joined": self._joined }
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def deactivate(self): """ Deactivate the sensor to stop sending and receiving packets. Classes that inherit this base class may extend this method. """ super(RF_Sensor, self).deactivate() if self._activated: self._activated = False if self._connection is not None: # Close the connection and clean up so that the thread might get # the signal faster and we can correctly reactivate later on. self._connection.close() self._connection = None
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def stop(self): """ Stop the signal strength measurements (and start sending custom packets). """ self._started = False # Reset the scheduler timestamp so that it updates correctly in case we # restart the sensor measurements. self._scheduler.timestamp = 0
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def discover(self, callback, required_sensors=None): """ Discover RF sensors in the network. The `callback` callable function is called when an RF sensor reports its identity. The `required_sensors` set indicates which sensors should be discovered; if it is not provided, then all RF sensors are discovered. Note that discovery may fail due to interference or disabled sensors. Classes that inherit this base class must extend this method. """ if not hasattr(callback, "__call__"): raise TypeError("Provided discovery callback is not callable") if isinstance(required_sensors, set): if not required_sensors.issubset(range(1, self._number_of_sensors + 1)): raise ValueError("Provided required sensors may only contain vehicle sensors") elif required_sensors is not None: raise TypeError("Provided required sensors must be a `set`")
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def _loop(self): """ Execute the sensor loop. This runs in a separate thread. """ try: while self._activated: self._loop_body() except DisabledException: return except: super(RF_Sensor, self).interrupt()
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def _send(self): """ Send a broadcast packet to each other sensor in the network and send collected packets to the ground station. Classes that inherit this base class may extend this method. """ # Create and send the RSSI broadcast packets. for to_id in xrange(1, self._number_of_sensors + 1): if not self._scheduler.in_slot: return if to_id == self._id: continue packet = self._create_rssi_broadcast_packet(to_id) self._send_tx_frame(packet, to_id) # Send collected packets to the ground station. while not self._packets.empty() and self._scheduler.in_slot: packet = self._packets.get() self._send_tx_frame(packet, 0)
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def _send_tx_frame(self, packet, to=None): """ Send a TX frame with `packet` as payload `to` another sensor. Classes that inherit this base class must extend this method. """ if self._connection is None: raise DisabledException if not isinstance(packet, Packet): raise TypeError("Only `Packet` objects can be sent") if to is None: raise TypeError("Invalid destination '{}' has been provided".format(to)) # Introduce a short delay to give the hardware more time to send # packets when this method is called many times in a row. time.sleep(self._loop_delay)
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def _create_rssi_broadcast_packet(self, to_id): """ Create a `Packet` object according to the "rssi_broadcast" specification. The resulting packet is complete. """ location, waypoint_index = self._location_callback() request = RSSI_Validity_Request("rssi_broadcast", other_id=to_id) valid, valid_pair = self._valid_callback(request) packet = Packet() packet.set("specification", "rssi_broadcast") packet.set("latitude", location[0]) packet.set("longitude", location[1]) packet.set("valid", valid) packet.set("valid_pair", valid_pair) packet.set("waypoint_index", waypoint_index) packet.set("sensor_id", self._id) packet.set("timestamp", time.time()) return packet
timvandermeij/mobile-radio-tomography
[ 4, 1, 4, 4, 1442839964 ]
def __init__(self, epw_file=None, output_wea_file=None): RadianceCommand.__init__(self) self.epw_file = epw_file """The path of the epw file that is to be converted to a wea file.""" self.output_wea_file = output_wea_file """The path of the output wea file. Note that this path will be created if not specified by the user."""
ladybug-analysis-tools/honeybee
[ 90, 26, 90, 38, 1451000618 ]
def epw_file(self): return self._epw_file
ladybug-analysis-tools/honeybee
[ 90, 26, 90, 38, 1451000618 ]
def epw_file(self, value): """The path of the epw file that is to be converted to a wea file.""" if value: self._epw_file = value if not self.output_wea_file._value: self.output_wea_file = os.path.splitext(value)[0] + '.wea' else: self._epw_file = None
ladybug-analysis-tools/honeybee
[ 90, 26, 90, 38, 1451000618 ]
def main(): parser = argparse.ArgumentParser( description='Run swarming_xcode_install on the bots.') parser.add_argument('--luci_path', required=True, type=os.path.abspath) parser.add_argument('--swarming-server', required=True, type=str) parser.add_argument('--isolate-server', required=True, type=str) parser.add_argument('--batches', type=int, default=25, help="Run xcode install in batches of size |batches|.") parser.add_argument('--dimension', nargs=2, action='append') args = parser.parse_args() args.dimension = args.dimension or [] script_dir = os.path.dirname(os.path.abspath(__file__)) tmp_dir = tempfile.mkdtemp(prefix='swarming_xcode') try: print('Making isolate.') shutil.copyfile(os.path.join(script_dir, 'swarming_xcode_install.py'), os.path.join(tmp_dir, 'swarming_xcode_install.py')) shutil.copyfile(os.path.join(script_dir, 'mac_toolchain.py'), os.path.join(tmp_dir, 'mac_toolchain.py')) luci_client = os.path.join(args.luci_path, 'client') cmd = [ sys.executable, os.path.join(luci_client, 'isolateserver.py'), 'archive', '-I', args.isolate_server, tmp_dir, ] isolate_hash = subprocess.check_output(cmd).split()[0] print('Running swarming_xcode_install.') # TODO(crbug.com/765361): The dimensions below should be updated once # swarming for iOS is fleshed out, likely removing xcode_version 9 and # adding different dimensions. luci_tools = os.path.join(luci_client, 'tools') dimensions = [['pool', 'Chrome'], ['xcode_version', '9.0']] + args.dimension dim_args = [] for d in dimensions: dim_args += ['--dimension'] + d cmd = [ sys.executable, os.path.join(luci_tools, 'run_on_bots.py'), '--swarming', args.swarming_server, '--isolate-server', args.isolate_server, '--priority', '20', '--batches', str(args.batches), '--tags', 'name:run_swarming_xcode_install', ] + dim_args + ['--name', 'run_swarming_xcode_install', '--', isolate_hash, 'python', 'swarming_xcode_install.py', ] subprocess.check_call(cmd) print('All tasks completed.') finally: shutil.rmtree(tmp_dir) return 0
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def test_basic_parse(self): events = list(self.backend.basic_parse(BytesIO(JSON))) self.assertEqual(events, JSON_EVENTS)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_scalar(self): events = list(self.backend.basic_parse(BytesIO(SCALAR_JSON))) self.assertEqual(events, [('number', 0)])
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_surrogate_pairs(self): event = next(self.backend.basic_parse(BytesIO(SURROGATE_PAIRS_JSON))) parsed_string = event[1] self.assertEqual(parsed_string, '💩')
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_invalid(self): for json in INVALID_JSONS: # Yajl1 doesn't complain about additional data after the end # of a parsed object. Skipping this test. if self.__class__.__name__ == 'YajlParse' and json == YAJL1_PASSING_INVALID: continue with self.assertRaises(common.JSONError) as cm: list(self.backend.basic_parse(BytesIO(json)))
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_utf8_split(self): buf_size = JSON.index(b'\xd1') + 1 try: events = list(self.backend.basic_parse(BytesIO(JSON), buf_size=buf_size)) except UnicodeDecodeError: self.fail('UnicodeDecodeError raised')
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_boundary_lexeme(self): buf_size = JSON.index(b'false') + 1 events = list(self.backend.basic_parse(BytesIO(JSON), buf_size=buf_size)) self.assertEqual(events, JSON_EVENTS)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_api(self): self.assertTrue(list(self.backend.items(BytesIO(JSON), ''))) self.assertTrue(list(self.backend.parse(BytesIO(JSON))))
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_object_builder(self): builder = common.ObjectBuilder() for event, value in basic_parse(BytesIO(JSON)): builder.event(event, value) self.assertEqual(builder.value, { 'docs': [ { 'string': 'строка - тест', 'null': None, 'boolean': False, 'true': True, 'integer': 0, 'double': Decimal('0.5'), 'exponent': 100, 'long': 10000000000, }, { 'meta': [[1], {}], }, { 'meta': {'key': 'value'}, }, { 'meta': None, }, ], })
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_parse(self): events = common.parse(basic_parse(BytesIO(JSON))) events = [value for prefix, event, value in events if prefix == 'docs.item.meta.item.item' ] self.assertEqual(events, [1])
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def test_bytes(self): l = Lexer(BytesIO(JSON)) self.assertEqual(next(l)[1], '{')
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def _AsLong(array): """Casts arrays elements to long type. Used to convert from numpy tf.""" return [int(x) for x in array]
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add): """Run a random test case with the given shape and indices. Args: shape: Shape of the parameters array. indices: One-dimensional array of ints, the indices of the last dimension of the parameters to update. scatter_op: ScatterAdd or ScatterSub. """ super(ScatterAddSubTest, self).setUp() with self.cached_session(use_gpu=False): # Create a random parameter array of given shape p_init = np.random.rand(*shape).astype("f") # Create the shape of the update array. All dimensions except the last # match the parameter array, the last dimension equals the # of indices. vals_shape = [len(indices)] + shape[1:] vals_init = np.random.rand(*vals_shape).astype("f") v_i = [float(x) for x in vals_init.ravel()] p = variables.Variable(p_init) vals = constant_op.constant(v_i, shape=vals_shape, name="vals") ind = constant_op.constant(indices, dtype=dtypes.int32) p2 = scatter_op(p, ind, vals, name="updated_p") # p = init self.evaluate(variables.global_variables_initializer()) # p += vals result = self.evaluate(p2) # Compute the expected 'p' using numpy operations. for i, ind in enumerate(indices): if scatter_op == state_ops.scatter_add: p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape( vals_shape[0], -1)[i, :]) else: p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape( vals_shape[0], -1)[i, :]) self.assertTrue(all((p_init == result).ravel()))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testNoRepetitions(self): self._TestCase([2, 2], [1]) self._TestCase([4, 4, 4], [2, 0]) self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testWithRepetitions(self): self._TestCase([2, 2], [1, 1]) self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3]) self._TestCase([32, 4, 4], [31] * 8)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]