code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def rename(self, names, inplace=False): if (type(names) is not dict): raise TypeError('names must be a dictionary: oldname -> newname') if inplace: self.__is_dirty__ = True with cython_context(): if self._is_vertex_frame(): graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values()) self.__graph__.__proxy__ = graph_proxy elif self._is_edge_frame(): graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values()) self.__graph__.__proxy__ = graph_proxy return self else: return super(GFrame, self).rename(names, inplace=inplace)
Rename the columns using the 'names' dict. This changes the names of the columns given as the keys and replaces them with the names given as the values. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- names : dict[string, string] Dictionary of [old_name, new_name] inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place.
def cancel_link_unlink_mode(self): self.logger.info("cancel_link_unlink_mode") self.scene_command('08') status = self.hub.get_buffer_status() return status
Cancel linking or unlinking mode
def send(self): response = self.session.request("method:queue", [ self.data ]) self.data = response return self
Send the draft.
def create_redis_client(redis_address, password=None): redis_ip_address, redis_port = redis_address.split(":") return redis.StrictRedis( host=redis_ip_address, port=int(redis_port), password=password)
Create a Redis client. Args: The IP address, port, and password of the Redis server. Returns: A Redis client.
def oindex(a, selection): selection = replace_ellipsis(selection, a.shape) drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)]) selection = ix_(selection, a.shape) result = a[selection] if drop_axes: result = result.squeeze(axis=drop_axes) return result
Implementation of orthogonal indexing with slices and ints.
def swap_word_order(source): assert len(source) % 4 == 0 words = "I" * (len(source) // 4) return struct.pack(words, *reversed(struct.unpack(words, source)))
Swap the order of the words in 'source' bitstring
def add_vxlan_port(self, name, remote_ip, local_ip=None, key=None, ofport=None): self.add_tunnel_port(name, 'vxlan', remote_ip, local_ip=local_ip, key=key, ofport=ofport)
Creates a VxLAN tunnel port. See the description of ``add_tunnel_port()``.
def _run_expiration(self, conn): now = time.time() script = conn.register_script( ) expiring = script(keys=[self._key_expiration()], args=[time.time()]) script = conn.register_script( ) for item in expiring: script(keys=[self._key_available(), self._key_priorities(), self._key_workers(), self._key_reservations(item)], args=[item])
Return any items that have expired.
def import_modules(names, src, dst): for name in names: module = importlib.import_module(src + '.' + name) setattr(sys.modules[dst], name, module)
Import modules in package.
def float_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def float_validator(text): error = None try: value = float(text) except ValueError: return None, "Please enter a floating point value." if (min is not None) and (value < min): return None, "The minimum allowable value is {0}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0}.".format(max) return value, None return self.generic_entry(prompt, float_validator, message, rofi_args, **kwargs)
Prompt the user to enter a floating point number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: float, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- float, or None if the dialog is cancelled.
def read_grid_from_file(filename): try: f = open(filename, mode='r') full_res = ast.literal_eval(f.read()) f.close() except SyntaxError: print('Problems reading ', filename) full_res = {'grid': 0, 'all_done': False} except (OSError, IOError): full_res = {'grid': 0, 'all_done': False} return full_res
Read the results of a full set of calculations from file
def alias_grade_entry(self, grade_entry_id, alias_id): self._alias_id(primary_id=grade_entry_id, equivalent_id=alias_id)
Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility. The primary ``Id`` of the ``GradeEntry`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another grade entry, it is reassigned to the given grade entry ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of a ``GradeEntry`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def CreateClass(cls, data_type_definition): cls._ValidateDataTypeDefinition(data_type_definition) class_definition = cls._CreateClassTemplate(data_type_definition) namespace = { '__builtins__' : { 'object': builtins.object, 'super': builtins.super}, '__name__': '{0:s}'.format(data_type_definition.name)} if sys.version_info[0] >= 3: namespace['__builtins__']['__build_class__'] = builtins.__build_class__ exec(class_definition, namespace) return namespace[data_type_definition.name]
Creates a new structure values class. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: class: structure values class.
async def getLiftRows(self, lops): for layeridx, layr in enumerate(self.layers): async for x in layr.getLiftRows(lops): yield layeridx, x
Yield row tuples from a series of lift operations. Row tuples only requirement is that the first element be the binary id of a node. Args: lops (list): A list of lift operations. Yields: (tuple): (layer_indx, (buid, ...)) rows.
def _add_lines(specification, module): caption = _all_spec2capt.get(specification, 'dummy') if caption.split()[-1] in ('parameters', 'sequences', 'Masks'): exists_collectionclass = True name_collectionclass = caption.title().replace(' ', '') else: exists_collectionclass = False lines = [] if specification == 'model': lines += [f'', f'.. autoclass:: {module.__name__}.Model', f' :members:', f' :show-inheritance:', f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}'] elif exists_collectionclass: lines += [f'', f'.. autoclass:: {module.__name__}.{name_collectionclass}', f' :members:', f' :show-inheritance:', f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}'] lines += ['', '.. automodule:: ' + module.__name__, ' :members:', ' :show-inheritance:'] if specification == 'model': lines += [' :exclude-members: Model'] elif exists_collectionclass: lines += [' :exclude-members: ' + name_collectionclass] return lines
Return autodoc commands for a basemodels docstring. Note that `collection classes` (e.g. `Model`, `ControlParameters`, `InputSequences` are placed on top of the respective section and the `contained classes` (e.g. model methods, `ControlParameter` instances, `InputSequence` instances at the bottom. This differs from the order of their definition in the respective modules, but results in a better documentation structure.
def edit(self, config={}, events=[], add_events=[], rm_events=[], active=True): data = {'config': config, 'active': active} if events: data['events'] = events if add_events: data['add_events'] = add_events if rm_events: data['remove_events'] = rm_events json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool
def _get_description(arg): desc = [] otherwise = False if arg.can_be_inferred: desc.append('If left unspecified, it will be inferred automatically.') otherwise = True elif arg.is_flag: desc.append('This argument defaults to ' '<code>None</code> and can be omitted.') otherwise = True if arg.type in {'InputPeer', 'InputUser', 'InputChannel', 'InputNotifyPeer', 'InputDialogPeer'}: desc.append( 'Anything entity-like will work if the library can find its ' '<code>Input</code> version (e.g., usernames, <code>Peer</code>, ' '<code>User</code> or <code>Channel</code> objects, etc.).' ) if arg.is_vector: if arg.is_generic: desc.append('A list of other Requests must be supplied.') else: desc.append('A list must be supplied.') elif arg.is_generic: desc.append('A different Request must be supplied for this argument.') else: otherwise = False if otherwise: desc.insert(1, 'Otherwise,') desc[-1] = desc[-1][:1].lower() + desc[-1][1:] return ' '.join(desc).replace( 'list', '<span class="tooltip" title="Any iterable that supports len() ' 'will work too">list</span>' )
Generates a proper description for the given argument.
def _replace_series_name(seriesname, replacements): for pat, replacement in six.iteritems(replacements): if re.match(pat, seriesname, re.IGNORECASE | re.UNICODE): return replacement return seriesname
Performs replacement of series name. Allow specified replacements of series names in cases where default filenames match the wrong series, e.g. missing year gives wrong answer, or vice versa. This helps the TVDB query get the right match.
def add_input_arg(self, inp): self.add_arg(inp._dax_repr()) self._add_input(inp)
Add an input as an argument
def construct(self, response_args, request, **kwargs): response_args = self.do_pre_construct(response_args, request, **kwargs) response = self.response_cls(**response_args) return self.do_post_construct(response, request, **kwargs)
Construct the response :param response_args: response arguments :param request: The parsed request, a self.request_cls class instance :param kwargs: Extra keyword arguments :return: An instance of the self.response_cls class
def itermonthdays2(cls, year, month): for day in NepCal.itermonthdates(year, month): if day.month == month: yield (day.day, day.weekday()) else: yield (0, day.weekday())
Similar to itermonthdays2 but returns tuples of day and weekday.
def set_instrumentation_callback(self, callback): self.logger.debug('Setting instrumentation callback: %r', callback) self._instrumentation_callback = callback
Assign a method to invoke when a request has completed gathering measurements. :param method callback: The method to invoke
def get_activities_by_objective_banks(self, objective_bank_ids): activity_list = [] for objective_bank_id in objective_bank_ids: activity_list += list( self.get_activities_by_objective_bank(objective_bank_id)) return objects.ActivityList(activity_list)
Gets the list of ``Activities`` corresponding to a list of ``ObjectiveBanks``. arg: objective_bank_ids (osid.id.IdList): list of objective bank ``Ids`` return: (osid.learning.ActivityList) - list of activities raise: NullArgument - ``objective_bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def avail_images(conn=None, call=None): if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) if not conn: conn = get_conn() ret = {} for appliance in conn.list_appliances(): ret[appliance['name']] = appliance return ret
Return a list of the server appliances that are on the provider
def political_views(self) -> str: views = self._data['political_views'] return self.random.choice(views)
Get a random political views. :return: Political views. :Example: Liberal.
def is_cdl(filename): if os.path.splitext(filename)[-1] != '.cdl': return False with open(filename, 'rb') as f: data = f.read(32) if data.startswith(b'netcdf') or b'dimensions' in data: return True return False
Quick check for .cdl ascii file Example: netcdf sample_file { dimensions: name_strlen = 7 ; time = 96 ; variables: float lat ; lat:units = "degrees_north" ; lat:standard_name = "latitude" ; lat:long_name = "station latitude" ; etc... :param str filename: Absolute path of file to check :param str data: First chuck of data from file to check
def file_chunks(self, fp): fsize = utils.file_size(fp) offset = 0 if hasattr(fp, 'readinto'): while offset < fsize: nb = fp.readinto(self._internal) yield self.buf[:nb] offset += nb else: while offset < fsize: nb = min(self.chunk_size, fsize - offset) yield fp.read(nb) offset += nb
Yields chunks of a file. Parameters ---------- fp : io.RawIOBase The file to break into chunks (must be an open file or have the ``readinto`` method)
def onPollVoted( self, mid=None, poll=None, added_options=None, removed_options=None, author_id=None, thread_id=None, thread_type=None, ts=None, metadata=None, msg=None, ): log.info( "{} voted in poll {} in {} ({})".format( author_id, poll, thread_id, thread_type.name ) )
Called when the client is listening, and somebody votes in a group poll :param mid: The action ID :param poll: Poll, that user voted in :param author_id: The ID of the person who voted in the poll :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type poll: models.Poll :type thread_type: models.ThreadType
def getColorName(c): c = np.array(getColor(c)) mdist = 99.0 kclosest = "" for key in colors.keys(): ci = np.array(getColor(key)) d = np.linalg.norm(c - ci) if d < mdist: mdist = d kclosest = str(key) return kclosest
Find the name of a color. .. hint:: |colorpalette| |colorpalette.py|_
def prior_names(self): return list(self.prior_information.groupby( self.prior_information.index).groups.keys())
get the prior information names Returns ------- prior_names : list a list of prior information names
def manifest_history(self, synchronous=True, **kwargs): kwargs = kwargs.copy() kwargs.update(self._server_config.get_client_kwargs()) response = client.get( self._org_path('manifest_history', kwargs['data']), **kwargs ) return _handle_response(response, self._server_config, synchronous)
Obtain manifest history for subscriptions. :param synchronous: What should happen if the server returns an HTTP 202 (accepted) status code? Wait for the task to complete if ``True``. Immediately return the server's response otherwise. :param kwargs: Arguments to pass to requests. :returns: The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` If the server responds with an HTTP 4XX or 5XX message.
def _report_profile(self, command, lock_name, elapsed_time, memory): message_raw = str(command) + "\t " + \ str(lock_name) + "\t" + \ str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \ str(memory) with open(self.pipeline_profile_file, "a") as myfile: myfile.write(message_raw + "\n")
Writes a string to self.pipeline_profile_file.
def _dict_func(self, func, axis, *args, **kwargs): if "axis" not in kwargs: kwargs["axis"] = axis if axis == 0: index = self.columns else: index = self.index func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])} def dict_apply_builder(df, func_dict={}): return pandas.DataFrame(df.apply(func_dict, *args, **kwargs)) result_data = self.data.apply_func_to_select_indices_along_full_axis( axis, dict_apply_builder, func, keep_remaining=False ) full_result = self._post_process_apply(result_data, axis) return full_result
Apply function to certain indices across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
def set_key(self, section, key, value): LOGGER.debug("> Saving '{0}' in '{1}' section with value: '{2}' in settings file.".format( key, section, foundations.strings.to_string(value))) self.__settings.beginGroup(section) self.__settings.setValue(key, QVariant(value)) self.__settings.endGroup()
Stores given key in settings file. :param section: Current section to save the key into. :type section: unicode :param key: Current key to save. :type key: unicode :param value: Current key value to save. :type value: object
def isinstance(self, instance, class_name): if isinstance(instance, BaseNode): klass = self.dynamic_node_classes.get(class_name, None) if klass: return isinstance(instance, klass) return False else: raise TypeError("This function can only be used for BaseNode objects")
Check if a BaseNode is an instance of a registered dynamic class
def fetch(self): os.makedirs(os.path.dirname(self.cached_repo), exist_ok=True) if not os.path.exists(self.cached_repo): self._log.warning("Index not found, caching %s in %s", self.repo, self.cached_repo) git.clone(self.remote_url, self.cached_repo, checkout=True) else: self._log.debug("Index is cached") if self._are_local_and_remote_heads_different(): self._log.info("Cached index is not up to date, pulling %s", self. repo) git.pull(self.cached_repo, self.remote_url) with open(os.path.join(self.cached_repo, self.INDEX_FILE), encoding="utf-8") as _in: self.contents = json.load(_in)
Load from the associated Git repository.
def point_on_screen(self, pos): if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: return True else: return False
Is the point still on the screen? :param pos: Point :type pos: tuple :return: Is it? :rtype: bool
def echo_heading(text, marker='=', marker_color='blue'): click.secho(marker * 3 + '>', fg=marker_color, nl=False) click.echo(' ' + text)
Print a text formatted to look like a heading. The output looks like: ===> text with marker='=' right now. :param str text: the text to echo :param str marker: the marker to mark the heading :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white')
def wait_for_instance_deletion(self, credentials, name, **kwargs): op_name = wait_for_instance_deletion( credentials, self.project, self.zone, name, **kwargs) return op_name
Wait for deletion of instance based on the configuration data. TODO: docstring
def _get_bin(self, key): sortedDict = {} for item in self.redis_conn.zscan_iter(key): my_item = ujson.loads(item[0]) my_score = -item[1] if my_score not in sortedDict: sortedDict[my_score] = [] sortedDict[my_score].append(my_item) return sortedDict
Returns a binned dictionary based on redis zscore @return: The sorted dict
def hash_answer(self, answer, timestamp): timestamp = str(timestamp) answer = str(answer) hashed = '' for _ in range(ITERATIONS): hashed = salted_hmac(timestamp, answer).hexdigest() return hashed
Cryptographically hash the answer with the provided timestamp This method allows the widget to securely generate time-sensitive signatures that will both prevent tampering with the answer as well as provide some protection against replay attacks by limiting how long a given signature is valid for. Using this same method, the field can validate the submitted answer against the signature also provided in the form.
def _replace_none(self, aDict): for k, v in aDict.items(): if v is None: aDict[k] = 'none'
Replace all None values in a dict with 'none'
def description(self, value): if not isinstance(value, six.string_types) and value is not None: raise ValueError("Pass a string, or None") self._properties["description"] = value
Update description of the zone. :type value: str :param value: (Optional) new description :raises: ValueError for invalid value types.
def add_positional_embedding(x, max_length, name=None, positions=None): with tf.name_scope("add_positional_embedding"): _, length, depth = common_layers.shape_list(x) var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) if positions is None: pad_length = tf.maximum(0, length - max_length) sliced = tf.cond( tf.less(length, max_length), lambda: tf.slice(var, [0, 0], [length, -1]), lambda: tf.pad(var, [[0, pad_length], [0, 0]])) return x + tf.expand_dims(sliced, 0) else: return x + tf.gather(var, tf.to_int32(positions))
Adds positional embedding. Args: x: Tensor with shape [batch, length, depth]. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. positions: Tensor with shape [batch, length]. Returns: Tensor of same shape as x.
def warn(msg, *args): if not args: sys.stderr.write('WARNING: ' + msg) else: sys.stderr.write('WARNING: ' + msg % args)
Print a warning on stderr
def bridge_param(_param, _method, *args, **kwargs): assert callable(_method), "method %r is not callable" % (_method, ) f = Fiber(debug_depth=1, debug_call=_method) f.add_callback(drop_param, _method, *args, **kwargs) f.add_callback(override_result, _param) return f.succeed()
Used as a callback to keep the result from the previous callback and use that instead of the result of the given callback when chaining to the next callback in the fiber.
def add_new_vehicle(self, hb): if hb.type == mavutil.mavlink.MAV_TYPE_GCS: return sysid = hb.get_srcSystem() self.vehicle_list.append(sysid) self.vehicle_name_by_sysid[sysid] = self.vehicle_type_string(hb) self.update_vehicle_menu()
add a new vehicle
def set(self, value): value = min(self.max, max(self.min, value)) self._value = value start_new_thread(self.func, (self.get(),))
Set the value of the bar. If the value is out of bound, sets it to an extremum
def cli(yamlfile, inline, format): print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline))
Generate JSON Schema representation of a biolink model
def select_next(self): self.footer.clear_message() old_index = self.selected new_index = self.selected + 1 if self.selected + 1 >= len(self.statuses): self.fetch_next() self.left.draw_statuses(self.statuses, self.selected, new_index - 1) self.draw_footer_status() self.selected = new_index self.redraw_after_selection_change(old_index, new_index)
Move to the next status in the timeline.
def _process_methods(self, req, resp, resource): requested_method = self._get_requested_method(req) if not requested_method: return False if self._cors_config['allow_all_methods']: allowed_methods = self._get_resource_methods(resource) self._set_allowed_methods(resp, allowed_methods) if requested_method in allowed_methods: return True elif requested_method in self._cors_config['allow_methods_list']: resource_methods = self._get_resource_methods(resource) allowed_methods = [ method for method in resource_methods if method in self._cors_config['allow_methods_list'] ] self._set_allowed_methods(resp, allowed_methods) if requested_method in allowed_methods: return True return False
Adds the Access-Control-Allow-Methods header to the response, using the cors settings to determine which methods are allowed.
def get_unavailable_brokers(zk, partition_metadata): topic_data = zk.get_topics(partition_metadata.topic) topic = partition_metadata.topic partition = partition_metadata.partition expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas']) available_replicas = set(partition_metadata.replicas) return expected_replicas - available_replicas
Returns the set of unavailable brokers from the difference of replica set of given partition to the set of available replicas.
def merge_upwards_if_smaller_than(self, small_size, a_or_u): prev_app_size = self.app_size() prev_use_size = self.use_size() small_nodes = self._find_small_nodes(small_size, (), a_or_u) for node, parents in small_nodes: if len(parents) >= 2: tail = parents[-2]._nodes[-1] if tail._isdir is None: assert tail._app_size is not None, tail tail._add_size(node.app_size(), node.use_size()) parents[-1]._nodes.remove(node) assert len(parents[-1]._nodes) assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
After prune_if_smaller_than is run, we may still have excess nodes. For example, with a small_size of 609710690: 7 /* 28815419 /data/* 32 /data/srv/* 925746 /data/srv/docker.bak/* 12 /data/srv/docker.bak/shared/* 682860348 /data/srv/docker.bak/shared/standalone/* This is reduced to: 31147487 /* 682860355 /data/srv/docker.bak/shared/standalone/* Run this only when done with the scanning.
def task_id(self): if self['task_id']: return self['task_id'] if self.extra and 'container_koji_task_id' in self.extra: return self.extra['container_koji_task_id']
Hack to return a task ID for a build, including container CG builds. We have something for this in Brewweb, but not yet for upstream Koji: https://pagure.io/koji/issue/215
def prettytable(self): table = prettytable.PrettyTable(self.columns) if self.sortby: if self.sortby in self.columns: table.sortby = self.sortby else: msg = "Column (%s) doesn't exist to sort by" % self.sortby raise exceptions.CLIAbort(msg) for a_col, alignment in self.align.items(): table.align[a_col] = alignment if self.title: table.title = self.title for row in self.rows: table.add_row(row) return table
Returns a new prettytable instance.
def _stream_annotation(file_name, pb_dir): url = posixpath.join(config.db_index_url, pb_dir, file_name) response = requests.get(url) response.raise_for_status() ann_data = np.fromstring(response.content, dtype=np.dtype('<u1')) return ann_data
Stream an entire remote annotation file from physiobank Parameters ---------- file_name : str The name of the annotation file to be read. pb_dir : str The physiobank directory where the annotation file is located.
def _verify_field_spec(self, spec, path): if 'required' in spec and not isinstance(spec['required'], bool): raise SchemaFormatException("{} required declaration should be True or False", path) if 'nullable' in spec and not isinstance(spec['nullable'], bool): raise SchemaFormatException("{} nullable declaration should be True or False", path) if 'type' not in spec: raise SchemaFormatException("{} has no type declared.", path) self._verify_type(spec, path) if 'validates' in spec: self._verify_validates(spec, path) if 'default' in spec: self._verify_default(spec, path) if not set(spec.keys()).issubset(set(['type', 'required', 'validates', 'default', 'nullable'])): raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path)
Verifies a given field specification is valid, recursing into nested schemas if required.
def stop(self): self.observer_thread.stop() self.observer_thread.join() logging.info("Configfile watcher plugin: Stopped")
Stop the config change monitoring thread.
def submit_statement_request(meth, end_point, query_str='', data=None, tries=2, **params): full_end_point = 'statements/' + end_point.lstrip('/') return make_db_rest_request(meth, full_end_point, query_str, data, params, tries)
Even lower level function to make the request.
def decodeCommandLine(self, cmdline): codec = getattr(sys.stdin, 'encoding', None) or sys.getdefaultencoding() return unicode(cmdline, codec)
Turn a byte string from the command line into a unicode string.
def _parse_numbered_syllable(unparsed_syllable): tone_number = unparsed_syllable[-1] if not tone_number.isdigit(): syllable, tone = unparsed_syllable, '5' elif tone_number == '0': syllable, tone = unparsed_syllable[:-1], '5' elif tone_number in '12345': syllable, tone = unparsed_syllable[:-1], tone_number else: raise ValueError("Invalid syllable: %s" % unparsed_syllable) return syllable, tone
Return the syllable and tone of a numbered Pinyin syllable.
def robust_mean(log_values): if log_values.shape[1] <= 3: return numpy.nanmean(log_values, axis=1) without_nans = numpy.nan_to_num(log_values) mask = ( (~numpy.isnan(log_values)) & (without_nans <= numpy.nanpercentile(log_values, 75, axis=1).reshape((-1, 1))) & (without_nans >= numpy.nanpercentile(log_values, 25, axis=1).reshape((-1, 1)))) return (without_nans * mask.astype(float)).sum(1) / mask.sum(1)
Mean of values falling within the 25-75 percentiles. Parameters ---------- log_values : 2-d numpy.array Center is computed along the second axis (i.e. per row). Returns ------- center : numpy.array of length log_values.shape[1]
def coerce(cls, key, value): self = MutationList((MutationObj.coerce(key, v) for v in value)) self._key = key return self
Convert plain list to MutationList
def highlightByAlternate(self): palette = QtGui.QApplication.palette() palette.setColor(palette.HighlightedText, palette.color(palette.Text)) clr = palette.color(palette.AlternateBase) palette.setColor(palette.Highlight, clr.darker(110)) self.setPalette(palette)
Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting.
def to_pandas(self): if not all(ind.is_raw() for ind in self.values): raise ValueError('Cannot convert to pandas MultiIndex if not evaluated.') from pandas import MultiIndex as PandasMultiIndex arrays = [ind.values for ind in self.values] return PandasMultiIndex.from_arrays(arrays, names=self.names)
Convert to pandas MultiIndex. Returns ------- pandas.base.MultiIndex
def flush(self, n=4096): try: read = self.from_stream.read(n) if read is None or len(read) == 0: self.eof = True if self.propagate_close: self.to_stream.close() return None return self.to_stream.write(read) except OSError as e: if e.errno != errno.EPIPE: raise e
Flush `n` bytes of data from the reader Stream to the writer Stream. Returns the number of bytes that were actually flushed. A return value of zero is not an error. If EOF has been reached, `None` is returned.
def get_data(package, resource): loader = get_loader(package) if loader is None or not hasattr(loader, 'get_data'): return None mod = sys.modules.get(package) or loader.load_module(package) if mod is None or not hasattr(mod, '__file__'): return None parts = resource.split('/') parts.insert(0, os.path.dirname(mod.__file__)) resource_name = os.path.join(*parts) return loader.get_data(resource_name)
Get a resource from a package. This is a wrapper round the PEP 302 loader get_data API. The package argument should be the name of a package, in standard module format (foo.bar). The resource argument should be in the form of a relative filename, using '/' as the path separator. The parent directory name '..' is not allowed, and nor is a rooted name (starting with a '/'). The function returns a binary string, which is the contents of the specified resource. For packages located in the filesystem, which have already been imported, this is the rough equivalent of d = os.path.dirname(sys.modules[package].__file__) data = open(os.path.join(d, resource), 'rb').read() If the package cannot be located or loaded, or it uses a PEP 302 loader which does not support get_data(), then None is returned.
def console_type(self, console_type): if console_type != self._console_type: self._manager.port_manager.release_tcp_port(self._console, self._project) if console_type == "vnc": self._console = self._manager.port_manager.get_free_tcp_port(self._project, 5900, 6000) else: self._console = self._manager.port_manager.get_free_tcp_port(self._project) self._console_type = console_type log.info("{module}: '{name}' [{id}]: console type set to {console_type}".format(module=self.manager.module_name, name=self.name, id=self.id, console_type=console_type))
Sets the console type for this node. :param console_type: console type (string)
def get_site_symmetries(wyckoff): ssyms = [] for w in wyckoff: ssyms += ["\"%-6s\"" % w_s['site_symmetry'] for w_s in w['wyckoff']] damp_array_site_symmetries(ssyms)
List up site symmetries The data structure is as follows: wyckoff[0]['wyckoff'][0]['site_symmetry'] Note ---- Maximum length of string is 6.
def free(self, connection): LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection)) try: self.connection_handle(connection).free() except KeyError: raise ConnectionNotFoundError(self.id, id(connection)) if self.idle_connections == list(self.connections.values()): with self._lock: self.idle_start = self.time_method() LOGGER.debug('Pool %s freed connection %s', self.id, id(connection))
Free the connection from use by the session that was using it. :param connection: The connection to free :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
def second_order_diff(arr, x): arr = np.array(arr) dxf = (x[2] - x[0])/2 dxb = (x[-1] - x[-3])/2 dx = (x[2:] - x[:-2])/2 first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf) last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb) interior = (arr[2:] - arr[:-2])/(2*dx) darr = np.concatenate(([first], interior, [last])) return darr
Compute second order difference of an array. A 2nd order forward difference is used for the first point, 2nd order central difference for interior, and 2nd order backward difference for last point, returning an array the same length as the input array.
def plot_and_save(self, data, w=800, h=420, filename='chart', overwrite=True): self.save(data, filename, overwrite) return IFrame(filename + '.html', w, h)
Save the rendered html to a file and returns an IFrame to display the plot in the notebook.
def exists(self, table_name, timeout=None): _validate_not_none('table_name', table_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/Tables' + "('" + table_name + "')" request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)] request.query = [('timeout', _int_to_str(timeout))] try: self._perform_request(request) return True except AzureHttpError as ex: _dont_fail_not_exist(ex) return False
Returns a boolean indicating whether the table exists. :param str table_name: The name of table to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the table exists. :rtype: bool
def events(self): if self._events is None: self._events = self.parent.calendar.events( calendarId="primary", singleEvents=True, orderBy="startTime", timeMin=self.since, timeMax=self.until) return self._events
All events in calendar within specified time range
def set_active_vectors(self, name, preference='cell'): _, field = get_scalar(self, name, preference=preference, info=True) if field == POINT_DATA_FIELD: self.GetPointData().SetActiveVectors(name) elif field == CELL_DATA_FIELD: self.GetCellData().SetActiveVectors(name) else: raise RuntimeError('Data field ({}) not useable'.format(field)) self._active_vectors_info = [field, name]
Finds the vectors by name and appropriately sets it as active
def double( self ): if self == INFINITY: return INFINITY p = self.__curve.p() a = self.__curve.a() l = ( ( 3 * self.__x * self.__x + a ) * \ numbertheory.inverse_mod( 2 * self.__y, p ) ) % p x3 = ( l * l - 2 * self.__x ) % p y3 = ( l * ( self.__x - x3 ) - self.__y ) % p return Point( self.__curve, x3, y3 )
Return a new point that is twice the old.
def import_datasources(path, sync, recursive): sync_array = sync.split(',') p = Path(path) files = [] if p.is_file(): files.append(p) elif p.exists() and not recursive: files.extend(p.glob('*.yaml')) files.extend(p.glob('*.yml')) elif p.exists() and recursive: files.extend(p.rglob('*.yaml')) files.extend(p.rglob('*.yml')) for f in files: logging.info('Importing datasources from file %s', f) try: with f.open() as data_stream: dict_import_export.import_from_dict( db.session, yaml.safe_load(data_stream), sync=sync_array) except Exception as e: logging.error('Error when importing datasources from file %s', f) logging.error(e)
Import datasources from YAML
def plot(self, file_type): samples = self.mod_data[file_type] plot_title = file_types[file_type]['title'] plot_func = file_types[file_type]['plot_func'] plot_params = file_types[file_type]['plot_params'] return plot_func(samples, file_type, plot_title=plot_title, plot_params=plot_params)
Call file_type plotting function.
def stop_dag(self, name=None): return self._client.send( Request( action='stop_dag', payload={'name': name if name is not None else self._dag_name} ) ).success
Send a stop signal to the specified dag or the dag that hosts this task. Args: name str: The name of the dag that should be stopped. If no name is given the dag that hosts this task is stopped. Upon receiving the stop signal, the dag will not queue any new tasks and wait for running tasks to terminate. Returns: bool: True if the signal was sent successfully.
def getReadAlignmentId(self, gaAlignment): compoundId = datamodel.ReadAlignmentCompoundId( self.getCompoundId(), gaAlignment.fragment_name) return str(compoundId)
Returns a string ID suitable for use in the specified GA ReadAlignment object in this ReadGroupSet.
def make_router(): global router routings = [ ('GET', '^/$', index), ('GET', '^/api/?$', index), ('POST', '^/api/1/calculate/?$', calculate.api1_calculate), ('GET', '^/api/2/entities/?$', entities.api2_entities), ('GET', '^/api/1/field/?$', field.api1_field), ('GET', '^/api/1/formula/(?P<name>[^/]+)/?$', formula.api1_formula), ('GET', '^/api/2/formula/(?:(?P<period>[A-Za-z0-9:-]*)/)?(?P<names>[A-Za-z0-9_+-]+)/?$', formula.api2_formula), ('GET', '^/api/1/parameters/?$', parameters.api1_parameters), ('GET', '^/api/1/reforms/?$', reforms.api1_reforms), ('POST', '^/api/1/simulate/?$', simulate.api1_simulate), ('GET', '^/api/1/swagger$', swagger.api1_swagger), ('GET', '^/api/1/variables/?$', variables.api1_variables), ] router = urls.make_router(*routings) return router
Return a WSGI application that searches requests to controllers
def aggregate_gradients_using_copy_with_device_selection( tower_grads, avail_devices, use_mean=True, check_inf_nan=False): agg_grads = [] has_nan_or_inf_list = [] for i, single_grads in enumerate(zip(*tower_grads)): with tf.device(avail_devices[i % len(avail_devices)]): grad_and_var, has_nan_or_inf = aggregate_single_gradient( single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) return agg_grads
Aggregate gradients, controlling device for the aggregation. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf.
def set_dtreat_interp_indch(self, indch=None): lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
def delete_authoring_nodes(self, editor): editor_node = foundations.common.get_first_item(self.get_editor_nodes(editor)) file_node = editor_node.parent self.unregister_editor(editor_node) self.unregister_file(file_node, raise_exception=False) return True
Deletes the Model authoring Nodes associated with given editor. :param editor: Editor. :type editor: Editor :return: Method success. :rtype: bool
def pop(self): if self._count == 0: raise StreamEmptyError("Pop called on buffered stream walker without any data", selector=self.selector) while True: curr = self.engine.get(self.storage_type, self.offset) self.offset += 1 stream = DataStream.FromEncoded(curr.stream) if self.matches(stream): self._count -= 1 return curr
Pop a reading off of this stream and return it.
def compute_covariance(L_aug, Y, k, p): n, d = L_aug.shape assert Y.shape[0] == n mu = compute_mu(L_aug, Y, k, p) return (L_aug.T @ L_aug) / n - mu @ np.diag(p) @ mu.T
Given label matrix L_aug and labels Y, compute the covariance. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
def compress(bytes, target): length = len(bytes) if target > length: raise ValueError("Fewer input bytes than requested output") seg_size = length // target segments = [bytes[i * seg_size:(i + 1) * seg_size] for i in range(target)] segments[-1].extend(bytes[target * seg_size:]) checksum = lambda bytes: reduce(operator.xor, bytes, 0) checksums = list(map(checksum, segments)) return checksums
Compress a list of byte values to a fixed target length. >>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151] >>> HumanHasher.compress(bytes, 4) [205, 128, 156, 96] Attempting to compress a smaller number of bytes to a larger number is an error: >>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Fewer input bytes than requested output
def glsl_type(self): if self.dtype is None: return None dtshape = self.dtype[0].shape n = dtshape[0] if dtshape else 1 if n > 1: dtype = 'vec%d' % n else: dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int' return 'attribute', dtype
GLSL declaration strings required for a variable to hold this data.
def bed(args): p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mapout, = args pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
%prog fasta map.out Convert MSTMAP output into bed format.
def download(url, file_handle, chunk_size=1024): r = requests.get(url, stream=True) total_length = r.headers.get('content-length') if total_length is None: maxval = UnknownLength else: maxval = int(total_length) name = file_handle.name with progress_bar(name=name, maxval=maxval) as bar: for i, chunk in enumerate(r.iter_content(chunk_size)): if total_length: bar.update(i * chunk_size) file_handle.write(chunk)
Downloads a given URL to a specific file. Parameters ---------- url : str URL to download. file_handle : file Where to save the downloaded URL.
def expand(self, expression): self.logger.debug("expand : expression %s", str(expression)) if not is_string(expression): return expression result = self._pattern.sub(lambda var: str(self._variables[var.group(1)]), expression) result = result.strip() self.logger.debug('expand : %s - result : %s', expression, result) if is_number(result): if result.isdigit(): self.logger.debug(' expand is integer !!!') return int(result) else: self.logger.debug(' expand is float !!!') return float(result) return result
Expands logical constructions.
def _next_lowest_integer(group_keys): try: largest_int= max([ int(val) for val in group_keys if _is_int(val)]) except: largest_int= 0 return largest_int + 1
returns the lowest available integer in a set of dict keys
def step(self)->Number: "Return next value along annealed schedule." self.n += 1 return self.func(self.start, self.end, self.n/self.n_iter)
Return next value along annealed schedule.
def string_tag(name, value): return name.encode('utf-8') + \ len(value).to_bytes(4, byteorder='big') + \ value.encode('utf-8')
Create a DMAP tag with string data.
def usermacro_delete(macroids, **kwargs): conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'usermacro.delete' if isinstance(macroids, list): params = macroids else: params = [macroids] ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['hostmacroids'] else: raise KeyError except KeyError: return ret
Delete host usermacros. :param macroids: macroids of the host usermacros :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) return: IDs of the deleted host usermacro. CLI Example: .. code-block:: bash salt '*' zabbix.usermacro_delete 21
def apicalCheck(self, apicalInput): (activeApicalSegments, matchingApicalSegments, apicalPotentialOverlaps) = self._calculateSegmentActivity( self.apicalConnections, apicalInput, self.connectedPermanence, self.activationThreshold, self.minThreshold, self.reducedBasalThreshold) apicallySupportedCells = self.apicalConnections.mapSegmentsToCells( activeApicalSegments) predictedCells = np.intersect1d( self.basalConnections.mapSegmentsToCells(self.activeBasalSegments), apicallySupportedCells) return predictedCells
Return 'recent' apically predicted cells for each tick of apical timer - finds active apical segments corresponding to predicted basal segment, @param apicalInput (numpy array) List of active input bits for the apical dendrite segments
def get_contrib_names(self, contrib): collab = contrib.find('collab') anon = contrib.find('anonymous') if collab is not None: proper_name = serialize(collab, strip=True) file_as_name = proper_name elif anon is not None: proper_name = 'Anonymous' file_as_name = proper_name else: name = contrib.find('name') surname = name.find('surname').text given = name.find('given-names') if given is not None: if given.text: proper_name = ' '.join([surname, given.text]) file_as_name = ', '.join([surname, given.text[0]]) else: proper_name = surname file_as_name = proper_name else: proper_name = surname file_as_name = proper_name return proper_name, file_as_name
Returns an appropriate Name and File-As-Name for a contrib element. This code was refactored out of nav_contributors and package_contributors to provide a single definition point for a common job. This is a useful utility that may be well-employed for other publishers as well.
def __schema_descriptor(self, services): methods_desc = {} for service in services: protorpc_methods = service.all_remote_methods() for protorpc_method_name in protorpc_methods.iterkeys(): rosy_method = '%s.%s' % (service.__name__, protorpc_method_name) method_id = self.__id_from_name[rosy_method] request_response = {} request_schema_id = self.__request_schema.get(method_id) if request_schema_id: request_response['request'] = { '$ref': request_schema_id } response_schema_id = self.__response_schema.get(method_id) if response_schema_id: request_response['response'] = { '$ref': response_schema_id } methods_desc[rosy_method] = request_response descriptor = { 'methods': methods_desc, 'schemas': self.__parser.schemas(), } return descriptor
Descriptor for the all the JSON Schema used. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: Dictionary containing all the JSON Schema used in the service.
def cleanup(graph, subgraphs): for subgraph in subgraphs.values(): update_node_helper(graph, subgraph) update_metadata(graph, subgraph)
Clean up the metadata in the subgraphs. :type graph: pybel.BELGraph :type subgraphs: dict[Any,pybel.BELGraph]
def _check_patch_type_mismatch(self, patched_item, existing_item): def raise_mismatch_error(patched_item, existing_item, data_type_name): error_msg = ('Type mismatch. Patch {} corresponds to pre-existing ' 'data_type {} ({}:{}) that has type other than {}.') raise InvalidSpec(error_msg.format( quote(patched_item.name), quote(existing_item.name), existing_item.path, existing_item.lineno, quote(data_type_name)), patched_item.lineno, patched_item.path) if isinstance(patched_item, AstStructPatch): if not isinstance(existing_item, AstStructDef): raise_mismatch_error(patched_item, existing_item, 'struct') elif isinstance(patched_item, AstUnionPatch): if not isinstance(existing_item, AstUnionDef): raise_mismatch_error(patched_item, existing_item, 'union') else: if existing_item.closed != patched_item.closed: raise_mismatch_error( patched_item, existing_item, 'union_closed' if existing_item.closed else 'union') else: raise AssertionError( 'Unknown Patch Object Type {}'.format(patched_item.__class__.__name__))
Enforces that each patch has a corresponding, already-defined data type.