function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def context(self) -> metadata_context.MetadataContext: """Returns metadata context for the experiment.""" return metadata_context.MetadataContext( creator=getpass.getuser(), annotations=metadata_context.ContextAnnotations())
deepmind/xmanager
[ 669, 33, 669, 13, 1619456631 ]
def create_experiment(experiment_title: Optional[str] = None) -> Experiment: """Returns a concrete Experiment instance.""" raise NotImplementedError
deepmind/xmanager
[ 669, 33, 669, 13, 1619456631 ]
def sort_by_system_instance_health(instances): return sorted( instances, key=lambda i: ( i.instance_status["SystemStatus"]["Status"] != "ok" or i.instance_status["InstanceStatus"]["Status"] != "ok" ), )
Yelp/paasta
[ 1644, 229, 1644, 129, 1445895353 ]
def sort_by_total_tasks(instances): return sorted(instances, key=lambda i: i.task_counts.count, reverse=True)
Yelp/paasta
[ 1644, 229, 1644, 129, 1445895353 ]
def _normalize_path(filename): """Normalizes a relative path to a command to spawn. Args: filename: String; relative or absolute path. Returns: The normalized path. This is necessary because in our use case, vexflow_generator_pipeline will live in a different directory from vexflow_generator, and there are symlinks to both directories in the same parent directory. Without normalization, `..` would reference the parent of the actual directory that was symlinked. With normalization, it references the directory that contains the symlink to the working directory. """ if filename.startswith('/'): return filename else: return os.path.normpath( os.path.join(os.path.dirname(sys.argv[0]), filename))
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def __init__(self, num_pages_per_batch, vexflow_generator_command, svg_to_png_command): self.num_pages_per_batch = num_pages_per_batch self.vexflow_generator_command = vexflow_generator_command self.svg_to_png_command = svg_to_png_command
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def get_pages_for_batch(self, batch_num, num_pages_per_batch): """Generates the music score pages in a single batch. The generator takes in a seed for the RNG for each page, and outputs all pages at once. The seeds for all batches are consecutive for determinism, starting from 0, but each seed to the Mersenne Twister RNG should result in completely different output. Args: batch_num: The index of the batch to output. num_pages_per_batch: The number of pages to generate in each batch. Returns: A list of dicts holding `svg` (XML text) and `page` (text-format `tensorflow.moonlight.Staff` proto). """ return self.get_pages( range(batch_num * num_pages_per_batch, (batch_num + 1) * num_pages_per_batch))
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def _svg_to_png(self, svg): svg_to_png_command = list(self.svg_to_png_command) svg_to_png_command[0] = _normalize_path(svg_to_png_command[0]) popen = subprocess.Popen( svg_to_png_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = popen.communicate(input=svg) if popen.returncode != 0: raise ValueError('convert failed with status %d\nstderr:\n%s' % (popen.returncode, stderr)) return stdout
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def __init__(self, negative_example_distance, patch_width, negative_to_positive_example_ratio, noise_fn=lambda x: x): self.negative_example_distance = negative_example_distance self.patch_width = patch_width self.negative_to_positive_example_ratio = negative_to_positive_example_ratio self.noise_fn = noise_fn self.patch_counter = Metrics.counter(self.__class__, 'num_patches')
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def process(self, item): png_contents, staff_message = item staff_message = musicscore_pb2.Staff.FromString(staff_message) with tf.Session(graph=self.omr.graph) as sess: # Load the image, then feed it in to apply noise. # Randomly rotate the image and apply noise, then dump it back out as a # PNG. # TODO(ringw): Expose a way to pass in the image contents to the main # OMR TF graph. img = tf.to_float(tf.image.decode_png(png_contents)) # Collapse the RGB channels, if any. No-op for a monochrome PNG. img = tf.reduce_mean(img[:, :, :3], axis=2)[:, :, None] # Fix the stafflines being #999. img = tf.clip_by_value(img * 2. - 255., 0., 255.) img = self.noise_fn(img) # Get a 2D uint8 image array for OMR. noisy_image = sess.run( tf.cast(tf.clip_by_value(img, 0, 255)[:, :, 0], tf.uint8)) # Run OMR staffline extraction and staffline distance estimation. The # stafflines are used to get patches from the generated image. stafflines, image_staffline_distance = sess.run( [ self.omr.glyph_classifier.staffline_extractor.extract_staves(), self.omr.structure.staff_detector.staffline_distance[0] ], feed_dict={self.omr.image: noisy_image}) if stafflines.shape[0] != 1: raise ValueError('Image should have one detected staff, got shape: ' + str(stafflines.shape)) positive_example_count = 0 negative_example_whitelist = np.ones( (stafflines.shape[staffline_extractor.Axes.POSITION], stafflines.shape[staffline_extractor.Axes.X]), np.bool) # Blacklist xs where the patch would overlap with either end. negative_example_overlap_from_end = max(self.negative_example_distance, self.patch_width // 2) negative_example_whitelist[:, :negative_example_overlap_from_end] = False negative_example_whitelist[:, -negative_example_overlap_from_end - 1:] = False all_positive_examples = [] for glyph in staff_message.glyph: staffline = staffline_extractor.get_staffline(glyph.y_position, stafflines[0]) glyph_x = int( round(glyph.x * self.omr.glyph_classifier.staffline_extractor.target_height / (image_staffline_distance * self.omr.glyph_classifier .staffline_extractor.staffline_distance_multiple))) example = self._create_example(staffline, glyph_x, glyph.type) if example: staffline_index = staffline_extractor.y_position_to_index( glyph.y_position, stafflines.shape[staffline_extractor.Axes.POSITION]) # Blacklist the area adjacent to the glyph, even if it is not selected # as a positive example below. negative_example_whitelist[staffline_index, glyph_x - self.negative_example_distance + 1:glyph_x + self.negative_example_distance] = False all_positive_examples.append(example) positive_example_count += 1 for example in random.sample(all_positive_examples, POSITIVE_EXAMPLES_PER_IMAGE): yield example self.patch_counter.inc() negative_example_staffline, negative_example_x = np.where( negative_example_whitelist) negative_example_inds = np.random.choice( len(negative_example_staffline), int(positive_example_count * self.negative_to_positive_example_ratio)) negative_example_staffline = negative_example_staffline[ negative_example_inds] negative_example_x = negative_example_x[negative_example_inds] for staffline, x in zip(negative_example_staffline, negative_example_x): example = self._create_example(stafflines[0, staffline], x, musicscore_pb2.Glyph.NONE) assert example, 'Negative example xs should always be in range' yield example self.patch_counter.inc()
tensorflow/moonlight
[ 311, 68, 311, 25, 1523981102 ]
def __init__( self, code: LiquidityPoolWithdrawResultCode,
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def pack(self, packer: Packer) -> None: self.code.pack(packer) if self.code == LiquidityPoolWithdrawResultCode.LIQUIDITY_POOL_WITHDRAW_SUCCESS: return
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def unpack(cls, unpacker: Unpacker) -> "LiquidityPoolWithdrawResult": code = LiquidityPoolWithdrawResultCode.unpack(unpacker) if code == LiquidityPoolWithdrawResultCode.LIQUIDITY_POOL_WITHDRAW_SUCCESS: return cls(code=code) return cls(code=code)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def from_xdr_bytes(cls, xdr: bytes) -> "LiquidityPoolWithdrawResult": unpacker = Unpacker(xdr) return cls.unpack(unpacker)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def from_xdr(cls, xdr: str) -> "LiquidityPoolWithdrawResult": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def get_model_evaluation_text_classification_sample( project: str, model_id: str, evaluation_id: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com",
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def forwards(self, orm): # Adding model 'Cloud' db.create_table(u'cloudslave_cloud', ( ('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)), ('endpoint', self.gf('django.db.models.fields.URLField')(max_length=200)), ('user_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('tenant_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('password', self.gf('django.db.models.fields.CharField')(max_length=200)), ('region', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('flavor_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('image_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('floating_ip_mode', self.gf('django.db.models.fields.SmallIntegerField')(default=0)), )) db.send_create_signal(u'cloudslave', ['Cloud']) # Adding model 'KeyPair' db.create_table(u'cloudslave_keypair', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('private_key', self.gf('django.db.models.fields.TextField')()), ('public_key', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'cloudslave', ['KeyPair']) # Adding unique constraint on 'KeyPair', fields ['cloud', 'name'] db.create_unique(u'cloudslave_keypair', ['cloud_id', 'name']) # Adding model 'Reservation' db.create_table(u'cloudslave_reservation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])), ('number_of_slaves', self.gf('django.db.models.fields.IntegerField')()), ('state', self.gf('django.db.models.fields.SmallIntegerField')(default=0)), ('timeout', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal(u'cloudslave', ['Reservation']) # Adding model 'Slave' db.create_table(u'cloudslave_slave', ( ('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)), ('reservation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Reservation'])), ('cloud_node_id', self.gf('django.db.models.fields.CharField')(max_length=200)), ('state', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)), )) db.send_create_signal(u'cloudslave', ['Slave'])
sorenh/python-django-cloudslave
[ 2, 3, 2, 1, 1371802788 ]
def index_json(request): queryParams = request.GET.copy() queryParams.update(request.POST) try: jsonp = queryParams.get('jsonp', False) requestContext = { 'localOnly': int( queryParams.get('local', 0) ), 'forwardHeaders': extractForwardHeaders(request), } matches = STORE.get_index(requestContext) except Exception: log.exception() return json_response_for(request, [], jsonp=jsonp, status=500) return json_response_for(request, matches, jsonp=jsonp)
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def find_view(request): "View for finding metrics matching a given pattern" queryParams = request.GET.copy() queryParams.update(request.POST) format = queryParams.get('format', 'treejson') leaves_only = queryParamAsInt(queryParams, 'leavesOnly', 0) local_only = queryParamAsInt(queryParams, 'local', 0) wildcards = queryParamAsInt(queryParams, 'wildcards', 0) tzinfo = pytz.timezone(settings.TIME_ZONE) if 'tz' in queryParams: try: value = queryParams['tz'] tzinfo = pytz.timezone(value) except pytz.UnknownTimeZoneError: pass except Exception as e: raise InputParameterError( 'Invalid value {value} for param tz: {err}' .format(value=repr(value), err=str(e))) if 'now' in queryParams: try: value = queryParams['now'] now = parseATTime(value, tzinfo) except Exception as e: raise InputParameterError( 'Invalid value {value} for param now: {err}' .format(value=repr(value), err=str(e))) else: now = datetime.now(tzinfo) if 'from' in queryParams and str(queryParams['from']) != '-1': try: value = queryParams['from'] fromTime = int(epoch(parseATTime(value, tzinfo, now))) except Exception as e: raise InputParameterError( 'Invalid value {value} for param from: {err}' .format(value=repr(value), err=str(e))) else: fromTime = -1 if 'until' in queryParams and str(queryParams['until']) != '-1': try: value = queryParams['until'] untilTime = int(epoch(parseATTime(value, tzinfo, now))) except Exception as e: raise InputParameterError( 'Invalid value {value} for param until: {err}' .format(value=repr(value), err=str(e))) else: untilTime = -1 nodePosition = queryParamAsInt(queryParams, 'position', -1) jsonp = queryParams.get('jsonp', False) forward_headers = extractForwardHeaders(request) if fromTime == -1: fromTime = None if untilTime == -1: untilTime = None automatic_variants = queryParamAsInt(queryParams, 'automatic_variants', 0) try: query = str(queryParams['query']) except KeyError: raise InputParameterError('Missing required parameter \'query\'') if query == '': raise InputParameterError('Required parameter \'query\' is empty') if '.' in query: base_path = query.rsplit('.', 1)[0] + '.' else: base_path = '' if format == 'completer': query = query.replace('..', '*.') if not query.endswith('*'): query += '*' if automatic_variants: query_parts = query.split('.') for i,part in enumerate(query_parts): if ',' in part and '{' not in part: query_parts[i] = '{%s}' % part query = '.'.join(query_parts) try: matches = list(STORE.find( query, fromTime, untilTime, local=local_only, headers=forward_headers, leaves_only=leaves_only, )) except Exception: log.exception() raise log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches))) matches.sort(key=lambda node: node.name) log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches))) if format == 'treejson': profile = getProfile(request) content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards) response = json_response_for(request, content, jsonp=jsonp) elif format == 'nodelist': content = nodes_by_position(matches, nodePosition) response = json_response_for(request, content, jsonp=jsonp) elif format == 'pickle': content = pickle_nodes(matches) response = HttpResponse(content, content_type='application/pickle') elif format == 'msgpack': content = msgpack_nodes(matches) response = HttpResponse(content, content_type='application/x-msgpack') elif format == 'json': content = json_nodes(matches) response = json_response_for(request, content, jsonp=jsonp) elif format == 'completer': results = [] for node in matches: node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf))) if not node.is_leaf: node_info['path'] += '.' results.append(node_info) if len(results) > 1 and wildcards: wildcardNode = {'name' : '*'} results.append(wildcardNode) response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp) else: return HttpResponseBadRequest( content="Invalid value for 'format' parameter", content_type='text/plain') response['Pragma'] = 'no-cache' response['Cache-Control'] = 'no-cache' return response
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def get_metadata_view(request): queryParams = request.GET.copy() queryParams.update(request.POST) key = queryParams.get('key') metrics = queryParams.getlist('metric') jsonp = queryParams.get('jsonp', False) results = {} for metric in metrics: try: results[metric] = CarbonLink.get_metadata(metric, key) except Exception: log.exception() results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key)) return json_response_for(request, results, jsonp=jsonp)
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def tree_json(nodes, base_path, wildcards=False): results = [] branchNode = { 'allowChildren': 1, 'expandable': 1, 'leaf': 0, } leafNode = { 'allowChildren': 0, 'expandable': 0, 'leaf': 1, } #Add a wildcard node if appropriate if len(nodes) > 1 and wildcards: wildcardNode = {'text' : '*', 'id' : base_path + '*'} if any(not n.is_leaf for n in nodes): wildcardNode.update(branchNode) else: wildcardNode.update(leafNode) results.append(wildcardNode) found = set() results_leaf = [] results_branch = [] for node in nodes: #Now let's add the matching children if node.name in found: continue found.add(node.name) resultNode = { 'text' : unquote_plus(str(node.name)), 'id' : base_path + str(node.name), } if node.is_leaf: resultNode.update(leafNode) results_leaf.append(resultNode) else: resultNode.update(branchNode) results_branch.append(resultNode) results.extend(results_branch) results.extend(results_leaf) return results
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def pickle_nodes(nodes): nodes_info = [] for node in nodes: info = dict(path=node.path, is_leaf=node.is_leaf) if node.is_leaf: info['intervals'] = node.intervals nodes_info.append(info) return pickle.dumps(nodes_info, protocol=-1)
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def json_nodes(nodes): nodes_info = [] for node in nodes: info = dict(path=node.path, is_leaf=node.is_leaf) if node.is_leaf: info['intervals'] = [{'start': i.start, 'end': i.end} for i in node.intervals] nodes_info.append(info) return sorted(nodes_info, key=lambda item: item['path'])
criteo-forks/graphite-web
[ 1, 4, 1, 2, 1372665938 ]
def _update_user_info(self): self.user_info[self.construct_key(FLOATING_IPS_KEY)] = \ util.str2bool(self.user_info.get_cloud(FLOATING_IPS_KEY, 'false'))
slipstream/SlipStreamConnectors
[ 3, 2, 3, 6, 1409573258 ]
def tearDown(self): os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE') os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN') self.client = None self.ch = None
slipstream/SlipStreamConnectors
[ 3, 2, 3, 6, 1409573258 ]
def xtest_2_buildImage(self): self.client.run_category = RUN_CATEGORY_IMAGE self.client.start_nodes_and_clients(self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance}) instances_details = self.client.get_vms_details() assert instances_details assert instances_details[0][NodeDecorator.MACHINE_NAME] new_id = self.client.build_image(self.user_info, self.node_instance) assert new_id
slipstream/SlipStreamConnectors
[ 3, 2, 3, 6, 1409573258 ]
def xtest_4_start_image_with_extra_disk(self): self.client.run_category = RUN_CATEGORY_IMAGE self.client.start_nodes_and_clients(self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance_with_additional_disk}) vm_id = self.client.get_vms()[NodeDecorator.MACHINE_NAME]['id'] nodes = self.client.list_instances() assert searchInObjectList(nodes, 'id', vm_id).extra['volumes_attached'] self.client.stop_deployment()
slipstream/SlipStreamConnectors
[ 3, 2, 3, 6, 1409573258 ]
def compress(directory: str, destination: str) -> subprocess.CompletedProcess: """Compress the given directory into the tarfile at destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--create", f"--directory={directory}", f"--file={destination}", # Treat a colon in the filename as part of the filename, # not an indication of a remote file. This is required in order to # handle canonical filenames on Windows. "--force-local", "--gzip", "--verbose", ".", ], hide_output=False, )
googleapis/docuploader
[ 8, 8, 8, 11, 1551827709 ]
def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def add_role_definition_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501 """Add a new role definition) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_role_definition_with_http_info(body, created_by, async=True) >>> result = thread.get() :param async bool :param RoleDefinition body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: RoleDefinition If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method add_role_definition" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `add_role_definition`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `add_role_definition`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/roles', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='RoleDefinition', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def add_user_roles_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501 """Add a new user with roles (to make api requests) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_user_roles_with_http_info(body, created_by, async=True) >>> result = thread.get() :param async bool :param UserRoles body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: UserRoles If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method add_user_roles" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `add_user_roles`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `add_user_roles`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/users', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UserRoles', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def get_current_user_permissions_with_http_info(self, **kwargs): # noqa: E501 """List user permissions # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_current_user_permissions_with_http_info(async=True) >>> result = thread.get() :param async bool :return: List[Str] If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_current_user_permissions" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/permissions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Str]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def get_current_user_subject_with_http_info(self, **kwargs): # noqa: E501 """Get user information # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_current_user_subject_with_http_info(async=True) >>> result = thread.get() :param async bool :return: Subject If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_current_user_subject" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/subject', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Subject', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def get_role_definition_with_http_info(self, role=None, **kwargs): # noqa: E501 """Get role definition # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_role_definition_with_http_info(role, async=True) >>> result = thread.get() :param async bool :param Str role: (required) :return: RoleDefinition If the method is called asynchronously, returns the request thread. """ all_params = ['role'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_role_definition" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'role' is set if ('role' not in params or params['role'] is None): raise ValueError("Missing the required parameter `role` when calling `get_role_definition`") # noqa: E501 if 'role' in params and not re.search('.*', params['role']): # noqa: E501 raise ValueError("Invalid value for parameter `role` when calling `get_role_definition`, must conform to the pattern `/.*/`") # noqa: E501 collection_formats = {} path_params = {} if 'role' in params: path_params['role'] = params['role'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/roles/{role}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='RoleDefinition', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def get_user_roles_with_http_info(self, username=None, **kwargs): # noqa: E501 """Get roles associated to a user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_user_roles_with_http_info(username, async=True) >>> result = thread.get() :param async bool :param Str username: (required) :return: UserRoles If the method is called asynchronously, returns the request thread. """ all_params = ['username'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_user_roles" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'username' is set if ('username' not in params or params['username'] is None): raise ValueError("Missing the required parameter `username` when calling `get_user_roles`") # noqa: E501 if 'username' in params and not re.search('.*', params['username']): # noqa: E501 raise ValueError("Invalid value for parameter `username` when calling `get_user_roles`, must conform to the pattern `/.*/`") # noqa: E501 collection_formats = {} path_params = {} if 'username' in params: path_params['username'] = params['username'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/users/{username}/roles', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UserRoles', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def invalidate_user_with_http_info(self, username=None, created_by=None, **kwargs): # noqa: E501 """Invalidate an existing user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.invalidate_user_with_http_info(username, created_by, async=True) >>> result = thread.get() :param async bool :param Str username: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['username', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method invalidate_user" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'username' is set if ('username' not in params or params['username'] is None): raise ValueError("Missing the required parameter `username` when calling `invalidate_user`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `invalidate_user`") # noqa: E501 if 'username' in params and not re.search('.*', params['username']): # noqa: E501 raise ValueError("Invalid value for parameter `username` when calling `invalidate_user`, must conform to the pattern `/.*/`") # noqa: E501 collection_formats = {} path_params = {} if 'username' in params: path_params['username'] = params['username'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/users/{username}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def update_role_definition_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501 """Update a new role definition) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_role_definition_with_http_info(body, created_by, async=True) >>> result = thread.get() :param async bool :param RoleDefinition body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_role_definition" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `update_role_definition`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `update_role_definition`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/roles', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def update_user_password_with_http_info(self, username=None, body=None, created_by=None, **kwargs): # noqa: E501 """Update a user password # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_user_password_with_http_info(username, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str username: (required) :param UserRoles body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['username', 'body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_user_password" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'username' is set if ('username' not in params or params['username'] is None): raise ValueError("Missing the required parameter `username` when calling `update_user_password`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `update_user_password`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `update_user_password`") # noqa: E501 if 'username' in params and not re.search('.*', params['username']): # noqa: E501 raise ValueError("Invalid value for parameter `username` when calling `update_user_password`, must conform to the pattern `/.*/`") # noqa: E501 collection_formats = {} path_params = {} if 'username' in params: path_params['username'] = params['username'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/security/users/{username}/password', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
killbill/killbill-client-python
[ 10, 16, 10, 2, 1339972407 ]
def parse_record(parent_field, record): field_names = [] field_values = [] for name in record: if isinstance(record[name], dict): new_parent_field = parent_field.copy() new_parent_field.append(name) names = " ".join(new_parent_field) if "converted" in record[name]: field_names.append(names) field_values.append(record[name]["converted"]) elif "raw" in record[name]: field_names.append(names) field_values.append(record[name]["raw"]) else: # Must have subgroups: sub_names, sub_values = parse_record(new_parent_field, record[name]) field_names.extend(sub_names) field_values.extend(sub_values) else: raise Exception("Unhandled parsing") return field_names, field_values
aerospike/aerospike-admin
[ 37, 20, 37, 9, 1409778886 ]
def get_separate_output(in_str=""): _regex = re.compile(r"((?<=^{).*?(?=^}))", re.MULTILINE | re.DOTALL) out = re.findall(_regex, in_str) ls = [] for item in out: item = remove_escape_sequence(item) item = "{" + item + "}" ls.append(json.loads(item)) return ls
aerospike/aerospike-admin
[ 37, 20, 37, 9, 1409778886 ]
def get_merged_header(*lines): h = [[_f for _f in _h.split(" ") if _f] for _h in lines] header = [] if len(h) == 0 or any(len(h[i]) != len(h[i + 1]) for i in range(len(h) - 1)): return header for idx in range(len(h[0])): header_i = h[0][idx] for jdx in range(len(h) - 1): if h[jdx + 1][idx] == ".": break header_i += " " + h[jdx + 1][idx] header.append(header_i) return header
aerospike/aerospike-admin
[ 37, 20, 37, 9, 1409778886 ]
def check_for_subset_in_list_of_lists(actual_list, list_of_expected_sub_lists): for expected_list in list_of_expected_sub_lists: if check_for_subset(actual_list, expected_list): return True return False
aerospike/aerospike-admin
[ 37, 20, 37, 9, 1409778886 ]
def setUp(self): super(CorruptTest, self).setUp() self.signal_lt = lt.select(self.input_lt, {'mask': util.slice_1(False)}) rc = lt.ReshapeCoder(['z', 'channel', 'mask'], ['channel']) self.corrupt_coded_lt = augment.corrupt(0.1, 0.05, 0.1, rc.encode(self.signal_lt)) self.corrupt_lt = rc.decode(self.corrupt_coded_lt)
google/in-silico-labeling
[ 248, 83, 248, 4, 1513032868 ]
def test(self): self.assertEqual(self.corrupt_lt.axes, self.signal_lt.axes) self.save_images('corrupt', [self.get_images('', self.corrupt_lt)]) self.assert_images_near('corrupt', True)
google/in-silico-labeling
[ 248, 83, 248, 4, 1513032868 ]
def setUp(self): super(AugmentTest, self).setUp() ap = augment.AugmentParameters(0.1, 0.05, 0.1) self.input_augment_lt, self.target_augment_lt = augment.augment( ap, self.input_lt, self.target_lt)
google/in-silico-labeling
[ 248, 83, 248, 4, 1513032868 ]
def test(self): self.assertEqual(self.input_augment_lt.axes, self.input_lt.axes) self.assertEqual(self.target_augment_lt.axes, self.target_lt.axes) self.save_images('augment', [ self.get_images('input_', self.input_augment_lt), self.get_images('target_', self.target_augment_lt) ]) self.assert_images_near('augment', True)
google/in-silico-labeling
[ 248, 83, 248, 4, 1513032868 ]
def load_data(): """Loads the data files needed for the module. Could be used by processes that care about controlling when the data is loaded. Otherwise, data will be loaded the first time it's needed. """ global _data_is_loaded if not _data_is_loaded: _load_property_value_aliases_txt() _load_unicode_data_txt() _load_scripts_txt() _load_script_extensions_txt() _load_blocks_txt() _load_derived_age_txt() _load_derived_core_properties_txt() _load_bidi_mirroring_txt() _load_indic_data() _load_emoji_data() _load_emoji_sequence_data() _load_unicode_emoji_variants() _load_variant_data() _load_proposed_emoji_data() _load_nameslist_data() _load_namealiases_data() _data_is_loaded = True
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _char_to_int(char): """Converts a potential character to its scalar value.""" if type(char) in [str, type(u"")]: return ord(char) else: return char
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def chars_with_property(propname): load_data() return frozenset(_core_properties_data[propname])
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def combining(char): """Returns the canonical combining class of a character.""" load_data() char = _char_to_int(char) try: return _combining_class_data[char] except KeyError: return 0
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def canonical_decomposition(char): """Returns the canonical decomposition of a character as a Unicode string.""" load_data() char = _char_to_int(char) try: return _decomposition_data[char] except KeyError: return u""
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def script_extensions(char): """Returns the script extensions property of a character. The return value is a frozenset of four-letter script codes. """ load_data() char = _char_to_int(char) try: return _script_extensions_data[char] except KeyError: return frozenset([script(char)])
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def block_range(block): """Returns a range (first, last) of the named block.""" load_data() return _block_range[block]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def block_names(): """Returns the names of the blocks in block order.""" load_data() return _block_names[:]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_default_ignorable(char): """Returns true if the character has the Default_Ignorable property.""" load_data() if isinstance(char, (str, unicode)): char = ord(char) return char in _core_properties_data["Default_Ignorable_Code_Point"]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_defined(char): """Returns true if the character is defined in the Unicode Standard.""" load_data() if isinstance(char, (str, unicode)): char = ord(char) return char in _defined_characters
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def mirrored(char): """Returns 1 if the characters is bidi mirroring, 0 otherwise.""" load_data() if isinstance(char, (str, unicode)): char = ord(char) return int(char in _bidi_mirroring_characters)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def mirrored_chars(): return frozenset(_bidi_mirroring_glyph_data.keys())
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def indic_syllabic_category(char): """Returns the Indic syllabic category of a character.""" load_data() if isinstance(char, (str, unicode)): char = ord(char) try: return _bidi_syllabic_data[char] except KeyError: return "Other"
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def defined_characters(version=None, scr=None): """Returns the set of all defined characters in the Unicode Standard.""" load_data() # handle common error where version is passed as string, the age test # will always pass if version is not None: version = float(version) try: return _DEFINED_CHARACTERS_CACHE[(version, scr)] except KeyError: pass characters = _defined_characters if version is not None: characters = { char for char in characters if age(char) is not None and float(age(char)) <= version } if scr is not None: characters = { char for char in characters if script(char) == scr or scr in script_extensions(char) } characters = frozenset(characters) _DEFINED_CHARACTERS_CACHE[(version, scr)] = characters return characters
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _folded_script_name(script_name): """Folds a script name to its bare bones for comparison.""" # string.translate is changed by codecs, the method no longer takes two # parameters and so script_name.translate(None, "'-_ ") fails to compile return _strip_re.sub("", script_name).lower()
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def human_readable_script_name(code): """Returns a human-readable name for the script code.""" try: return _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES[code] except KeyError: load_data() return _script_code_to_long_name[code]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def open_unicode_data_file(data_file_name): """Opens a Unicode data file. Args: data_file_name: A string containing the filename of the data file. Returns: A file handle to the data file. """ filename = path.join(_DATA_DIR_PATH, data_file_name) return codecs.open(filename, "r", "utf-8")
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _parse_semicolon_separated_data(input_data): """Reads semicolon-separated Unicode data from an input string. Reads a Unicode data file already imported into a string. The format is the Unicode data file format with a list of values separated by semicolons. The number of the values on different lines may be different from another. Example source data file: http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt Example data: sc; Cher ; Cherokee sc; Copt ; Coptic ; Qaac Args: input_data: An input string, containing the data. Returns: A list of lists corresponding to the input data, with each individual list containing the values as strings. For example: [['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']] """ all_data = [] for line in input_data.split("\n"): line = line.split("#", 1)[0].strip() # remove the comment if not line: continue fields = line.split(";") fields = [field.strip() for field in fields] all_data.append(fields) return all_data
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_scripts_txt(): """Load script property from Scripts.txt.""" with open_unicode_data_file("Scripts.txt") as scripts_txt: script_ranges = _parse_code_ranges(scripts_txt.read()) for first, last, script_name in script_ranges: folded_script_name = _folded_script_name(script_name) script = _folded_script_name_to_code[folded_script_name] for char_code in range(first, last + 1): _script_data[char_code] = script
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_blocks_txt(): """Load block name from Blocks.txt.""" with open_unicode_data_file("Blocks.txt") as blocks_txt: block_ranges = _parse_code_ranges(blocks_txt.read()) for first, last, block_name in block_ranges: _block_names.append(block_name) _block_range[block_name] = (first, last) for character_code in range(first, last + 1): _block_data[character_code] = block_name
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_derived_core_properties_txt(): """Load derived core properties from Blocks.txt.""" with open_unicode_data_file("DerivedCoreProperties.txt") as dcp_txt: dcp_ranges = _parse_code_ranges(dcp_txt.read()) for first, last, property_name in dcp_ranges: for character_code in range(first, last + 1): try: _core_properties_data[property_name].add(character_code) except KeyError: _core_properties_data[property_name] = {character_code}
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_bidi_mirroring_txt(): """Load bidi mirroring glyphs from BidiMirroring.txt.""" with open_unicode_data_file("BidiMirroring.txt") as bidi_mirroring_txt: bmg_pairs = _parse_semicolon_separated_data(bidi_mirroring_txt.read()) for char, bmg in bmg_pairs: char = int(char, 16) bmg = int(bmg, 16) _bidi_mirroring_glyph_data[char] = bmg
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_emoji_data(): """Parse the new draft format of emoji-data.txt""" global _presentation_default_emoji, _presentation_default_text global _emoji, _emoji_modifier_base if _presentation_default_emoji: return emoji_sets = { "Emoji": set(), "Emoji_Presentation": set(), "Emoji_Modifier": set(), "Emoji_Modifier_Base": set(), "Extended_Pictographic": set(), "Emoji_Component": set(), } set_names = "|".join(sorted(emoji_sets.keys())) line_re = re.compile( r"([0-9A-F]{4,6})(?:\.\.([0-9A-F]{4,6}))?\s*;\s*" r"(%s)\s*#.*$" % set_names ) with open_unicode_data_file("emoji-data.txt") as f: for line in f: line = line.strip() if not line or line[0] == "#": continue m = line_re.match(line) if not m: raise ValueError('Did not match "%s"' % line) start = int(m.group(1), 16) end = start if not m.group(2) else int(m.group(2), 16) emoji_set = emoji_sets.get(m.group(3)) emoji_set.update(range(start, end + 1)) # allow our legacy use of handshake and wrestlers with skin tone modifiers emoji_sets["Emoji_Modifier_Base"] |= {0x1F91D, 0x1F93C} _presentation_default_emoji = frozenset(emoji_sets["Emoji_Presentation"]) _presentation_default_text = frozenset( emoji_sets["Emoji"] - emoji_sets["Emoji_Presentation"] ) _emoji_modifier_base = frozenset(emoji_sets["Emoji_Modifier_Base"]) _emoji = frozenset(emoji_sets["Emoji"]) # we have no real use for the 'Emoji_Regional_Indicator' and # 'Emoji_Component' sets, and they're not documented, so ignore them. # The regional indicator set is just the 26 regional indicator # symbols, and the component set is number sign, asterisk, ASCII digits, # the regional indicators, and the skin tone modifiers.
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _read_emoji_data(lines): """Parse lines of emoji data and return a map from sequence to tuples of name, age, type.""" line_re = re.compile( r"(?:([0-9A-F ]+)|([0-9A-F]+\.\.[0-9A-F]+)\s*);\s*(%s)\s*;\s*([^#]*)\s*#\s*E?(\d+\.\d+).*" % "|".join(EMOJI_SEQUENCE_TYPES) ) result = {} for line in lines: line = line.strip() if not line or line[0] == "#": continue m = line_re.match(line) if not m: raise ValueError('"%s" Did not match "%s"' % (line_re.pattern, line)) # group 1 is a sequence, group 2 is a range of single character sequences. # we can't process the range because we don't have a name for each character # in the range, so skip it and get these emoji and their names from # UnicodeData if m.group(2): continue seq_type = m.group(3).strip().encode("ascii") seq = tuple(int(s, 16) for s in m.group(1).split()) name = m.group(4).strip() age = float(m.group(5)) result[seq] = (name, age, seq_type) return result
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _read_emoji_test_data(data_string): """Parse the emoji-test.txt data. This has names of proposed emoji that are not yet in the full Unicode data file. Returns a list of tuples of sequence, group, subgroup, name. The data is a string.""" line_re = re.compile( r"([0-9a-fA-F ]+)\s*;\s*(%s)\s*#\s*(?:[^\s]+)\s+(.*)\s*" % "|".join(_EMOJI_QUAL_TYPES) ) result = [] GROUP_PREFIX = "# group: " SUBGROUP_PREFIX = "# subgroup: " group = None subgroup = None for line in data_string.splitlines(): line = line.strip() if not line: continue if line[0] == "#": if line.startswith(GROUP_PREFIX): group = line[len(GROUP_PREFIX) :].strip().encode("ascii") subgroup = None elif line.startswith(SUBGROUP_PREFIX): subgroup = line[len(SUBGROUP_PREFIX) :].strip().encode("ascii") continue m = line_re.match(line) if not m: raise ValueError('Did not match "%s" in emoji-test.txt' % line) if m.group(2) not in ["component", "fully-qualified"]: # We only want component and fully-qualified sequences, as those are # 'canonical'. 'minimally-qualified' apparently just leave off the # FEOF emoji presentation tag, we already assume these. # Information for the unqualified sequences should be # redundant. At the moment we don't verify this so if the file # changes we won't catch that. continue seq = tuple(int(s, 16) for s in m.group(1).split()) name = m.group(3).strip() if not (group and subgroup): raise Exception( "sequence %s missing group or subgroup" % seq_to_string(seq) ) result.append((seq, group, subgroup, name)) return result
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _get_order_patch(order_text, seq_to_name): """Create a mapping from a key sequence to a list of sequence, name tuples. This will be used to insert additional sequences after the key sequence in the order data. seq_to_name is a mapping from new sequence to name, so the names don't have to be duplicated in the order data.""" patch_map = {} patch_key = None patch_list = None def get_sequence(seqtext): return tuple([int(s, 16) for s in seqtext.split()]) for line in order_text.splitlines(): ix = line.find("#") if ix >= 0: line = line[:ix] line = line.strip() if not line: continue if line.startswith("-"): if patch_list and patch_key: patch_map[patch_key] = patch_list patch_key = get_sequence(line[1:]) patch_list = [] else: seq = get_sequence(line) name = seq_to_name[seq] # exception if seq is not in sequence_text patch_list.append((seq, name)) if patch_list and patch_key: patch_map[patch_key] = patch_list return patch_map
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _apply_order_patch(patch, group_list): """patch is a map from a key sequence to list of sequence, name pairs, and group_list is an ordered list of sequence, group, subgroup, name tuples. Iterate through the group list appending each item to a new list, and after appending an item matching a key sequence, also append all of its associated sequences in order using the same group and subgroup. Return the new list. If there are any unused patches, raise an exception.""" result = [] patched = set() for t in group_list: result.append(t) if t[0] in patch: patched.add(t[0]) _, group, subgroup, _ = t for seq, name in patch[t[0]]: result.append((seq, group, subgroup, name)) unused = set(patch.keys()) - patched if unused: raise Exception( "%d unused patch%s\n %s: " % ( len(unused), "" if len(unused) == 1 else "es", "\n ".join(seq_to_string(seq) for seq in sorted(unused)), ) ) return result
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_emoji_group_data(seq): """Return group data for the canonical sequence seq, or None. Group data is a tuple of index, group, subgroup, and name. The index is a unique global sort index for the sequence among all sequences in the group data.""" _load_emoji_group_data() return _emoji_group_data.get(seq, None)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_emoji_subgroups(group): """Return the subgroups of this group, in order, or None if the group is not recognized.""" _load_emoji_group_data() subgroups = [] subgroup = None for _, g, sg, _ in sorted(_emoji_group_data.values()): if g == group: if sg != subgroup: subgroup = sg subgroups.append(subgroup) return subgroups if subgroups else None
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_sorted_emoji_sequences(seqs): """Seqs is a collection of canonical emoji sequences. Returns a list of these sequences in the canonical emoji group order. Sequences that are not canonical are placed at the end, in unicode code point order. """ _load_emoji_group_data() return sorted(seqs, key=lambda s: (_emoji_group_data.get(s, 100000), s))
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def add_data(data): for k, t in data.items(): if k in _emoji_sequence_data: print("already have data for sequence:", seq_to_string(k), t) _emoji_sequence_data[k] = t if EMOJI_VS in k: _emoji_non_vs_to_canonical[strip_emoji_vs(k)] = k
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_emoji_sequences(age=None, types=None): """Return the set of canonical emoji sequences, filtering to those <= age if age is not None, and those with type in types (if not a string) or type == types (if type is a string) if types is not None. By default all sequences are returned, including those for single emoji.""" _load_emoji_sequence_data() result = _emoji_sequence_data.keys() if types is not None: if isinstance(types, basestring): types = frozenset([types]) result = [k for k in result if _emoji_sequence_data[k][1] in types] if age is not None: age = float(age) result = [k for k in result if _emoji_sequence_data[k][0] <= age] return result
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_emoji_sequence_name(seq): """Return the name of the (possibly non-canonical) sequence, or None if not recognized as a sequence.""" data = get_emoji_sequence_data(seq) return None if not data else data[0]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_emoji_sequence_type(seq): """Return the type of the (possibly non-canonical) sequence, or None if not recognized as a sequence. Types are in EMOJI_SEQUENCE_TYPES.""" data = get_emoji_sequence_data(seq) return None if not data else data[2]
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_canonical_emoji_sequence(seq): """Return the canonical version of this emoji sequence if the sequence is known, or None.""" if is_canonical_emoji_sequence(seq): return seq seq = strip_emoji_vs(seq) return _emoji_non_vs_to_canonical.get(seq, None)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def seq_to_string(seq): """Return a string representation of the codepoint sequence.""" return "_".join("%04x" % cp for cp in seq)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_cp_seq(seq): return all(0 <= n <= 0x10FFFF for n in seq)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_regional_indicator(cp): return _REGIONAL_INDICATOR_START <= cp <= _REGIONAL_INDICATOR_END
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def regional_indicator_to_ascii(cp): assert is_regional_indicator(cp) return chr(cp - _REGIONAL_INDICATOR_START + ord("A"))
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def string_to_regional_indicator_seq(s): assert len(s) == 2 return ascii_to_regional_indicator(s[0]), ascii_to_regional_indicator(s[1])
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_tag(cp): return 0xE0020 < cp < 0xE0080 or cp == 0xE0001
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_regional_tag_seq(seq): return ( seq[0] == 0x1F3F4 and seq[-1] == 0xE007F and all(0xE0020 < cp < 0xE007E for cp in seq[1:-1]) )
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_skintone_modifier(cp): return _FITZ_START <= cp <= _FITZ_END
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_presentation_default_text(): _load_emoji_data() return _presentation_default_text
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def is_emoji(cp): _load_emoji_data() return cp in _emoji
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_unicode_emoji_variants(): """Parse StandardizedVariants.txt and initialize a set of characters that have a defined emoji variant presentation. All such characters also have a text variant presentation so a single set works for both.""" global _emoji_variants, _emoji_variants_proposed if _emoji_variants: return emoji_variants = set() # prior to Unicode 11 emoji variants were part of the standard data. # as of Unicode 11 however they're only in a separate emoji data file. line_re = re.compile(r"([0-9A-F]{4,6})\s+FE0F\s*;\s*emoji style\s*;") with open_unicode_data_file("emoji-variation-sequences.txt") as f: for line in f: m = line_re.match(line) if m: emoji_variants.add(int(m.group(1), 16)) _emoji_variants = frozenset(emoji_variants) try: read = 0 skipped = 0 with open_unicode_data_file("proposed-variants.txt") as f: for line in f: m = line_re.match(line) if m: read += 1 cp = int(m.group(1), 16) if cp in emoji_variants: skipped += 1 else: emoji_variants.add(cp) print( "skipped %s %d proposed variants" % ("all of" if skipped == read else skipped, read) ) except IOError as e: if e.errno != 2: raise _emoji_variants_proposed = frozenset(emoji_variants)
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_variant_data(): """Parse StandardizedVariants.txt and initialize all non-emoji variant data. The data is a mapping from codepoint to a list of tuples of: - variant selector - compatibility character (-1 if there is none) - shaping context (bitmask, 1 2 4 8 for isolate initial medial final) The compatibility character is for cjk mappings that map to 'the same' glyph as another CJK character.""" global _variant_data, _variant_data_cps if _variant_data: return compatibility_re = re.compile(r"\s*CJK COMPATIBILITY IDEOGRAPH-([0-9A-Fa-f]+)") variants = collections.defaultdict(list) with open_unicode_data_file("StandardizedVariants.txt") as f: for line in f: x = line.find("#") if x >= 0: line = line[:x] line = line.strip() if not line: continue tokens = line.split(";") cp, var = tokens[0].split(" ") cp = int(cp, 16) varval = int(var, 16) if varval in [0xFE0E, 0xFE0F]: continue # ignore emoji variants m = compatibility_re.match(tokens[1].strip()) compat = int(m.group(1), 16) if m else -1 context = 0 if tokens[2]: ctx = tokens[2] if ctx.find("isolate") != -1: context += 1 if ctx.find("initial") != -1: context += 2 if ctx.find("medial") != -1: context += 4 if ctx.find("final") != -1: context += 8 variants[cp].append((varval, compat, context)) _variant_data_cps = frozenset(variants.keys()) _variant_data = variants
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def get_variant_data(cp): _load_variant_data() return _variant_data[cp][:] if cp in _variant_data else None
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _load_proposed_emoji_data(): """Parse proposed-emoji.txt if it exists to get cps/names of proposed emoji (but not approved) for this version of Unicode.""" global _proposed_emoji_data, _proposed_emoji_data_cps if _proposed_emoji_data: return _proposed_emoji_data = {} line_re = re.compile(r"^U\+([a-zA-z0-9]{4,5})\s.*\s\d{4}Q\d\s+(.*)$") try: with open_unicode_data_file("proposed-emoji.txt") as f: for line in f: line = line.strip() if not line or line[0] == "#" or line.startswith(u"\u2022"): continue m = line_re.match(line) if not m: raise ValueError('did not match "%s"' % line) cp = int(m.group(1), 16) name = m.group(2) if cp in _proposed_emoji_data: raise ValueError( "duplicate emoji %x, old name: %s, new name: %s" % (cp, _proposed_emoji_data[cp], name) ) _proposed_emoji_data[cp] = name except IOError as e: if e.errno != 2: # not file not found, rethrow raise _proposed_emoji_data_cps = frozenset(_proposed_emoji_data.keys())
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def proposed_emoji_cps(): _load_proposed_emoji_data() return _proposed_emoji_data_cps
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def read_codeset(text): line_re = re.compile(r"^0x([0-9a-fA-F]{2,6})\s+0x([0-9a-fA-F]{4,6})\s+.*") codeset = set() for line in text.splitlines(): m = line_re.match(line) if m: cp = int(m.group(2), 16) codeset.add(cp) return codeset
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def _dump_emoji_presentation(): """Dump presentation info, for testing.""" text_p = 0 emoji_p = 0 for cp in sorted(get_emoji()): cp_name = name(cp, "<error>") if cp in get_presentation_default_emoji(): presentation = "emoji" emoji_p += 1 elif cp in get_presentation_default_text(): presentation = "text" text_p += 1 else: presentation = "<error>" print( "%s%04x %5s %s" % (" " if cp < 0x10000 else "", cp, presentation, cp_name) ) print( "%d total emoji, %d text presentation, %d emoji presentation" % (len(get_emoji()), text_p, emoji_p) )
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def see_also(cp): _load_nameslist_data() return frozenset(_nameslist_see_also.get(cp))
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]
def alt_names(cp): """Return list of name, nametype tuples for cp, or None.""" _load_namealiases_data() return tuple(_namealiases_alt_names.get(cp))
googlefonts/nototools
[ 247, 96, 247, 108, 1433465447 ]