code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def additional_cleanup(self): cmd.remove('not alt ""+A') cmd.hide('labels', 'Interactions') cmd.disable('%sCartoon' % self.protname) cmd.hide('everything', 'hydrogens')
Cleanup of various representations
def getHidden(self): if self.getHiddenManually(): return self.getField('Hidden').get(self) request = self.getRequest() if request: service_uid = self.getServiceUID() ar_settings = request.getAnalysisServiceSettings(service_uid) return ar_settings.get('hidden', False) return False
Returns whether if the analysis must be displayed in results reports or not, as well as in analyses view when the user logged in is a Client Contact. If the value for the field HiddenManually is set to False, this function will delegate the action to the method getAnalysisServiceSettings() from the Analysis Request. If the value for the field HiddenManually is set to True, this function will return the value of the field Hidden. :return: true or false :rtype: bool
def asrgb(self, *args, **kwargs): if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asrgb(self, *args, **kwargs)
Read image data from file and return RGB image as numpy array.
def _new_list(self, size, name): self._message_stack.append(ListTemplate(size, name, self._current_container))
Defines a new list to template of `size` and with `name`. List type must be given after this keyword by defining one field. Then the list definition has to be closed using `End List`. Special value '*' in size means that list will decode values as long as data is available. This free length value is not supported on encoding. Examples: | New list | 5 | myIntList | | u16 | | End List | | u8 | listLength | | New list | listLength | myIntList | | u16 | | End List | | New list | * | myIntList | | u16 | | End List |
def get_sections_2dnt(self, sec2d_go): return [(nm, self.get_ntgos_sorted(gos)) for nm, gos in sec2d_go]
Return a sections list containing sorted lists of namedtuples.
def get_instance(self, payload): return YearlyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of YearlyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.yearly.YearlyInstance :rtype: twilio.rest.api.v2010.account.usage.record.yearly.YearlyInstance
def switch(self, gen_mode:bool=None): "Switch the model, if `gen_mode` is provided, in the desired mode." self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode self.opt.opt = self.opt_gen.opt if self.gen_mode else self.opt_critic.opt self._set_trainable() self.model.switch(gen_mode) self.loss_func.switch(gen_mode)
Switch the model, if `gen_mode` is provided, in the desired mode.
def is_pg_at_least_nine_two(self): if self._is_pg_at_least_nine_two is None: results = self.version() regex = re.compile("PostgreSQL (\d+\.\d+\.\d+) on") matches = regex.match(results[0].version) version = matches.groups()[0] if version > '9.2.0': self._is_pg_at_least_nine_two = True else: self._is_pg_at_least_nine_two = False return self._is_pg_at_least_nine_two
Some queries have different syntax depending what version of postgres we are querying against. :returns: boolean
def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics
Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been ran. When it is running or has ran previously it should have (but may not) have useful and/or informational keys and values when running is underway and/or completed. .. warning:: The keys in this dictionary **should** be some what stable (not changing), but there existence **may** change between major releases as new statistics are gathered or removed so before accessing keys ensure that they actually exist and handle when they do not. .. note:: The values in this dictionary are local to the thread running call (so if multiple threads share the same retrying object - either directly or indirectly) they will each have there own view of statistics they have collected (in the future we may provide a way to aggregate the various statistics from each thread).
def _load_cwr_defaults(self): if self._cwr_defaults is None: self._cwr_defaults = self._reader.read_yaml_file( self._file_defaults) return self._cwr_defaults
Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix
def initialize(self, configfile=None): method = "initialize" A = None metadata = {method: configfile} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
Initialize the module
def write_block_data(self, cmd, block): self.bus.write_i2c_block_data(self.address, cmd, block) self.log.debug( "write_block_data: Wrote [%s] to command register 0x%02X" % ( ', '.join(['0x%02X' % x for x in block]), cmd ) )
Writes a block of bytes to the bus using I2C format to the specified command register
async def terminate(self) -> None: await asyncio.sleep(config.shutdown_wait) if not is_connected(): stop_server(self.application.server) self.application.loop.stop()
Terminate server if no more connection exists.
def copy_rec(source, dest): if os.path.isdir(source): for child in os.listdir(source): new_dest = os.path.join(dest, child) os.makedirs(new_dest, exist_ok=True) copy_rec(os.path.join(source, child), new_dest) elif os.path.isfile(source): logging.info(' Copy "{}" to "{}"'.format(source, dest)) shutil.copy(source, dest) else: logging.info(' Ignoring "{}"'.format(source))
Copy files between diferent directories. Copy one or more files to an existing directory. This function is recursive, if the source is a directory, all its subdirectories are created in the destination. Existing files in destination are overwrited without any warning. Args: source (str): File or directory name. dest (str): Directory name. Raises: FileNotFoundError: Destination directory doesn't exist.
def connect_db(Repo, database=":memory:"): Repo.db = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES) return Repo.db
Connect Repo to a database with path +database+ so all instances can interact with the database.
def reading_order(e1, e2): b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
A comparator to sort bboxes from top to bottom, left to right
def update(self, data): if data is None: return for key, value in sorted(data.items()): if key.startswith('/'): name = key.lstrip('/') match = re.search("([^/]+)(/.*)", name) if match: name = match.groups()[0] value = {match.groups()[1]: value} self.child(name, value) else: self.data[key] = value log.debug("Data for '{0}' updated.".format(self)) log.data(pretty(self.data))
Update metadata, handle virtual hierarchy
def maybe_get_fig_ax(*args, **kwargs): if 'ax' in kwargs: ax = kwargs.pop('ax') if 'fig' in kwargs: fig = kwargs.pop('fig') else: fig = plt.gcf() elif len(args) == 0: fig = plt.gcf() ax = plt.gca() elif isinstance(args[0], mpl.figure.Figure) and \ isinstance(args[1], mpl.axes.Axes): fig = args[0] ax = args[1] args = args[2:] else: fig, ax = plt.subplots(1) return fig, ax, args, dict(kwargs)
It used to be that the first argument of prettyplotlib had to be the 'ax' object, but that's not the case anymore. This is specially made for pcolormesh. @param args: @type args: @param kwargs: @type kwargs: @return: @rtype:
def add_sched_block_instance(self, config_dict): schema = self._get_schema() LOG.debug('Adding SBI with config: %s', config_dict) validate(config_dict, schema) updated_block = self._add_status(config_dict) scheduling_block_data, processing_block_data = \ self._split_sched_block_instance(updated_block) name = "scheduling_block:" + updated_block["id"] self._db.set_specified_values(name, scheduling_block_data) self._db.push_event(self.scheduling_event_name, updated_block["status"], updated_block["id"]) for value in processing_block_data: name = ("scheduling_block:" + updated_block["id"] + ":processing_block:" + value['id']) self._db.set_specified_values(name, value) self._db.push_event(self.processing_event_name, value["status"], value["id"])
Add Scheduling Block to the database. Args: config_dict (dict): SBI configuration
def lookup_cc_partner(nu_pid): neutrino_type = math.fabs(nu_pid) assert neutrino_type in [12, 14, 16] cc_partner = neutrino_type - 1 cc_partner = math.copysign( cc_partner, nu_pid) cc_partner = int(cc_partner) return cc_partner
Lookup the charge current partner Takes as an input neutrino nu_pid is a PDG code, then returns the charged lepton partner. So 12 (nu_e) returns 11. Keeps sign
def instructions(self): if self._instructions is None: if self.statements is None: self._instructions = 0 else: self._instructions = len([s for s in self.statements if type(s) is stmt.IMark]) return self._instructions
The number of instructions in this block
def convert_time_stamp_to_date(content): start_time_stamp = content.get('startTime') end_time_stamp = content.get('endTime') if start_time_stamp: start_time = datetime.datetime.utcfromtimestamp(start_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S") content['startTime'] = str(start_time) if end_time_stamp: end_time = datetime.datetime.utcfromtimestamp(end_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S") content['endTime'] = str(end_time) return content
Convert time stamp to date time format
def exit_full_screen(self): self.tk.attributes("-fullscreen", False) self._full_screen = False self.events.remove_event("<FullScreen.Escape>")
Change from full screen to windowed mode and remove key binding
def OnItemSelected(self, event): value = event.m_itemIndex self.startIndex = value self.switching = True post_command_event(self, self.GridActionTableSwitchMsg, newtable=value) self.switching = False event.Skip()
Item selection event handler
def reduce(x, op='sum'): import warnings warnings.warn( "Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning) from .function_bases import reduce_sum, reduce_mean if op == 'sum': return reduce_sum(x) elif op == 'mean': return reduce_mean(x) raise ValueError()
Reduction function with given operation. Args: x (Variable): An input. op (str): 'sum' or 'mean'. Note: This is deprecated. Use ``mean`` or ``sum`` instead.
def unset_role(username, role, **kwargs): role_line = 'no username {0} role {1}'.format(username, role) return config(role_line, **kwargs)
Remove role from username. username Username for role removal role Role to remove no_save_config If True, don't save configuration commands to startup configuration. If False, save configuration to startup configuration. Default: False .. code-block:: bash salt '*' nxos.cmd unset_role username=daniel role=vdc-admin
def get_version(root): version_json = os.path.join(root, 'version.json') if os.path.exists(version_json): with open(version_json, 'r') as version_json_file: return json.load(version_json_file) return None
Load and return the contents of version.json. :param root: The root path that the ``version.json`` file will be opened :type root: str :returns: Content of ``version.json`` or None :rtype: dict or None
def in_labelset(xmrs, nodeids, label=None): nodeids = set(nodeids) if label is None: label = xmrs.ep(next(iter(nodeids))).label return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
Test if all nodeids share a label. Args: nodeids: iterable of nodeids label (str, optional): the label that all nodeids must share Returns: bool: `True` if all nodeids share a label, otherwise `False`
def _insert(self, namespace, stream, events, configuration): index = self.index_manager.get_index(namespace) start_dts_to_add = set() def actions(): for _id, event in events: dt = kronos_time_to_datetime(uuid_to_kronos_time(_id)) start_dts_to_add.add(_round_datetime_down(dt)) event['_index'] = index event['_type'] = stream event[LOGSTASH_TIMESTAMP_FIELD] = dt.isoformat() yield event list(es_helpers.streaming_bulk(self.es, actions(), chunk_size=1000, refresh=self.force_refresh)) self.index_manager.add_aliases(namespace, index, start_dts_to_add)
`namespace` acts as db for different streams `stream` is the name of a stream and `events` is a list of events to insert.
def _Open(self, path_spec=None, mode='rb'): if not self._file_object_set_in_init and not path_spec: raise ValueError('Missing path specification.') if not self._file_object_set_in_init: if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') self._encryption_method = getattr(path_spec, 'encryption_method', None) if self._encryption_method is None: raise errors.PathSpecError( 'Path specification missing encryption method.') self._file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) self._path_spec = path_spec
Opens the file-like object. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def _get_ericscript_path(self): es = utils.which(os.path.join(utils.get_bcbio_bin(), self.EXECUTABLE)) return os.path.dirname(os.path.realpath(es))
Retrieve PATH to the isolated eriscript anaconda environment.
async def findArtifactFromTask(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
Get Artifact From Indexed Task Find a task by index path and redirect to the artifact on the most recent run with the given `name`. Note that multiple calls to this endpoint may return artifacts from differen tasks if a new task is inserted into the index between calls. Avoid using this method as a stable link to multiple, connected files if the index path does not contain a unique identifier. For example, the following two links may return unrelated files: * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe` * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip` This problem be remedied by including the revision in the index path or by bundling both installer and debug symbols into a single artifact. If no task exists for the given index path, this API end-point responds with 404. This method is ``stable``
def genetic(problem, population_size=100, mutation_chance=0.1, iterations_limit=0, viewer=None): return _local_search(problem, _create_genetic_expander(problem, mutation_chance), iterations_limit=iterations_limit, fringe_size=population_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
Genetic search. population_size specifies the size of the population (ORLY). mutation_chance specifies the probability of a mutation on a child, varying from 0 to 1. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.generate_random_state, SearchProblem.crossover, SearchProblem.mutate and SearchProblem.value.
def peak_generation(self): if self._peak_generation is None: self._peak_generation = sum( [gen.nominal_capacity for gen in self.generators]) return self._peak_generation
Cumulative peak generation capacity of generators of this grid Returns ------- float Ad-hoc calculated or cached peak generation capacity
def prepend_note(self, player, text): note = self._find_note(player) note.text = text + note.text
Prepend text to an already existing note.
def get_service(self, factory, svc_registration): svc_ref = svc_registration.get_reference() if svc_ref.is_prototype(): return self._get_from_prototype(factory, svc_registration) return self._get_from_factory(factory, svc_registration)
Returns the service required by the bundle. The Service Factory is called only when necessary while the Prototype Service Factory is called each time :param factory: The service factory :param svc_registration: The ServiceRegistration object :return: The requested service instance (created if necessary)
def load_transcript_fpkm_dict_from_gtf( gtf_path, transcript_id_column_name="reference_id", fpkm_column_name="FPKM", feature_column_name="feature"): df = gtfparse.read_gtf( gtf_path, column_converters={fpkm_column_name: float}) transcript_ids = _get_gtf_column(transcript_id_column_name, gtf_path, df) fpkm_values = _get_gtf_column(fpkm_column_name, gtf_path, df) features = _get_gtf_column(feature_column_name, gtf_path, df) logging.info("Loaded %d rows from %s" % (len(transcript_ids), gtf_path)) logging.info("Found %s transcript entries" % sum( feature == "transcript" for feature in features)) result = { transcript_id: float(fpkm) for (transcript_id, fpkm, feature) in zip(transcript_ids, fpkm_values, features) if ( (transcript_id is not None) and (len(transcript_id) > 0) and (feature == "transcript") ) } logging.info("Keeping %d transcript rows with reference IDs" % ( len(result),)) return result
Load a GTF file generated by StringTie which contains transcript-level quantification of abundance. Returns a dictionary mapping Ensembl IDs of transcripts to FPKM values.
def _is_gs_folder(cls, result): return (cls.is_key(result) and result.size == 0 and result.name.endswith(cls._gs_folder_suffix))
Return ``True`` if GS standalone folder object. GS will create a 0 byte ``<FOLDER NAME>_$folder$`` key as a pseudo-directory place holder if there are no files present.
def with_class(self, cls): rcls = [] for key, value in self._classification.items(): if value[0] == cls: rcls.append(key) return rcls
Return functions with the class
def search(self, user, since, until, target_type, action_name): if not self.user: self.user = self.get_user(user) if not self.events: self.events = self.user_events(self.user['id'], since, until) result = [] for event in self.events: created_at = dateutil.parser.parse(event['created_at']).date() if (event['target_type'] == target_type and event['action_name'] == action_name and since.date <= created_at and until.date >= created_at): result.append(event) log.debug("Result: {0} fetched".format(listed(len(result), "item"))) return result
Perform GitLab query
def check_resource(resource): linkchecker_type = resource.extras.get('check:checker') LinkChecker = get_linkchecker(linkchecker_type) if not LinkChecker: return {'error': 'No linkchecker configured.'}, 503 if is_ignored(resource): return dummy_check_response() result = LinkChecker().check(resource) if not result: return {'error': 'No response from linkchecker'}, 503 elif result.get('check:error'): return {'error': result['check:error']}, 500 elif not result.get('check:status'): return {'error': 'No status in response from linkchecker'}, 503 previous_status = resource.extras.get('check:available') check_keys = _get_check_keys(result, resource, previous_status) resource.extras.update(check_keys) resource.save(signal_kwargs={'ignores': ['post_save']}) return result
Check a resource availability against a linkchecker backend The linkchecker used can be configured on a resource basis by setting the `resource.extras['check:checker']` attribute with a key that points to a valid `udata.linkcheckers` entrypoint. If not set, it will fallback on the default linkchecker defined by the configuration variable `LINKCHECKING_DEFAULT_LINKCHECKER`. Returns ------- dict or (dict, int) Check results dict and status code (if error).
def putcolslice(self, columnname, value, blc, trc, inc=[], startrow=0, nrow=-1, rowincr=1): self._putcolslice(columnname, value, blc, trc, inc, startrow, nrow, rowincr)
Put into a slice in a table column holding arrays. Its arguments are the same as for getcolslice and putcellslice.
def get(self, uid): r = requests.get(self.apiurl + "/users/{}".format(uid), headers=self.header) if r.status_code != 200: raise ServerError jsd = r.json() if jsd['data']: return jsd['data'] else: return None
Get a user's information by their id. :param uid str: User ID :return: The user's information or None :rtype: Dictionary or None
def handle_cluster_request(self, tsn, command_id, args): if command_id == 0: if self._timer_handle: self._timer_handle.cancel() loop = asyncio.get_event_loop() self._timer_handle = loop.call_later(30, self._turn_off)
Handle the cluster command.
def _norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None): mn = min(values) if vmin is None else vmin mx = max(values) if vmax is None else vmax norm = normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) return n_cmap
Normalize and set colormap. Taken from geopandas@0.2.1 codebase, removed in geopandas@0.3.0.
def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs): funcs = get_splits(None, conf, **cdicts(opts, kwargs)) pieces, _pass = funcs[0](), funcs[2]() _OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field) return _OUTPUT
An operator that filters out non unique items according to the specified field. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- other inputs, e.g. to feed terminals for rule values conf : {'field': {'type': 'text', 'value': <field to be unique>}} Returns ------- _OUTPUT : generator of unique items
def category(self, category): self.url.category = category self.url.set_page(1) return self
Change category of current search and return self
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields): if remove_all_metadata_fields: for df in concated_meta_dfs: df.drop(df.columns, axis=1, inplace=True) all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0) n_rows = all_concated_meta_df.shape[0] logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs]) assert n_rows == n_rows_cumulative all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1) return all_concated_meta_df_sorted
Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df)
def _pwr_optfcn(df, loc): I = _lambertw_i_from_v(df['r_sh'], df['r_s'], df['nNsVth'], df[loc], df['i_0'], df['i_l']) return I * df[loc]
Function to find power from ``i_from_v``.
def channels(self): return types.MappingProxyType({ ch.name: ch for ch in self._refs.values() if not ch.is_pattern})
Read-only channels dict.
def get_all_patches(self, dont_use_cache=False): if not dont_use_cache and self._all_patches_cache is not None: return self._all_patches_cache print( 'Computing full change list (since you specified a percentage)...' ), sys.stdout.flush() endless_query = self.clone() endless_query.start_position = endless_query.end_position = None self._all_patches_cache = list(endless_query.generate_patches()) return self._all_patches_cache
Computes a list of all patches matching this query, though ignoreing self.start_position and self.end_position. @param dont_use_cache If False, and get_all_patches has been called before, compute the list computed last time.
def send_task(self, request, response): from bettercache.tasks import GeneratePage try: GeneratePage.apply_async((strip_wsgi(request),)) except: logger.error("failed to send celery task") self.set_cache(request, response)
send off a celery task for the current page and recache
def out(*output, **kwargs): output = ' '.join([str(o) for o in output]) if kwargs.get('wrap') is not False: output = '\n'.join(wrap(output, kwargs.get('indent', ''))) elif kwargs.get('indent'): indent = kwargs['indent'] output = indent + ('\n' + indent).join(output.splitlines()) sys.stdout.write(output + '\n')
Writes output to stdout. :arg wrap: If you set ``wrap=False``, then ``out`` won't textwrap the output.
def default_subsystem_for_plugin(plugin_type): if not issubclass(plugin_type, CheckstylePlugin): raise ValueError('Can only create a default plugin subsystem for subclasses of {}, given: {}' .format(CheckstylePlugin, plugin_type)) return type(str('{}Subsystem'.format(plugin_type.__name__)), (PluginSubsystemBase,), { str('options_scope'): 'pycheck-{}'.format(plugin_type.name()), str('plugin_type'): classmethod(lambda cls: plugin_type), str('register_plugin_options'): classmethod(lambda cls, register: None), })
Create a singleton PluginSubsystemBase subclass for the given plugin type. The singleton enforcement is useful in cases where dependent Tasks are installed multiple times, to avoid creating duplicate types which would have option scope collisions. :param plugin_type: A CheckstylePlugin subclass. :type: :class:`pants.contrib.python.checks.checker.common.CheckstylePlugin` :rtype: :class:`pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base.PluginSubsystemBase`
def schedule_messages(messages, recipients=None, sender=None, priority=None): if not is_iterable(messages): messages = (messages,) results = [] for message in messages: if isinstance(message, six.string_types): message = PlainTextMessage(message) resulting_priority = message.priority if priority is not None: resulting_priority = priority results.append(message.schedule(sender=sender, recipients=recipients, priority=resulting_priority)) return results
Schedules a message or messages. :param MessageBase|str|list messages: str or MessageBase heir or list - use str to create PlainTextMessage. :param list|None recipients: recipients addresses or Django User model heir instances If `None` Dispatches should be created before send using `prepare_dispatches()`. :param User|None sender: User model heir instance :param int priority: number describing message priority. If set overrides priority provided with message type. :return: list of tuples - (message_model, dispatches_models) :rtype: list
def is_field_method(node): name = node.attrname parent = node.last_child() inferred = safe_infer(parent) if not inferred: return False for cls_name, inst in FIELD_TYPES.items(): if node_is_instance(inferred, cls_name) and hasattr(inst, name): return True return False
Checks if a call to a field instance method is valid. A call is valid if the call is a method of the underlying type. So, in a StringField the methods from str are valid, in a ListField the methods from list are valid and so on...
def variableMissingValue(ncVar): attributes = ncVarAttributes(ncVar) if not attributes: return None for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'): if key in attributes: missingDataValue = attributes[key] return missingDataValue return None
Returns the missingData given a NetCDF variable Looks for one of the following attributes: _FillValue, missing_value, MissingValue, missingValue. Returns None if these attributes are not found.
def storage(self): annotation = get_portal_annotation() if annotation.get(NUMBER_STORAGE) is None: annotation[NUMBER_STORAGE] = OIBTree() return annotation[NUMBER_STORAGE]
get the counter storage
def __run_delta_py(self, delta): self.__run_py_file(delta.get_file(), delta.get_name()) self.__update_upgrades_table(delta)
Execute the delta py file
def select_point(action, action_space, select_point_act, screen): select = spatial(action, action_space).unit_selection_point screen.assign_to(select.selection_screen_coord) select.type = select_point_act
Select a unit at a point.
def create_data(step: 'projects.ProjectStep') -> STEP_DATA: return STEP_DATA( name=step.definition.name, status=step.status(), has_error=False, body=None, data=dict(), includes=[], cauldron_version=list(environ.version_info), file_writes=[] )
Creates the data object that stores the step information in the notebook results JavaScript file. :param step: Project step for which to create the data :return: Step data tuple containing scaffold data structure for the step output. The dictionary must then be populated with data from the step to correctly reflect the current state of the step. This is essentially a "blank" step dictionary, which is what the step would look like if it had not yet run
def image(self, data, cmap='cubehelix', clim='auto', fg_color=None): self._configure_2d(fg_color) image = scene.Image(data, cmap=cmap, clim=clim) self.view.add(image) self.view.camera.aspect = 1 self.view.camera.set_range() return image
Show an image Parameters ---------- data : ndarray Should have shape (N, M), (N, M, 3) or (N, M, 4). cmap : str Colormap name. clim : str | tuple Colormap limits. Should be ``'auto'`` or a two-element tuple of min and max values. fg_color : Color or None Sets the plot foreground color if specified. Returns ------- image : instance of Image The image. Notes ----- The colormap is only used if the image pixels are scalars.
def draw_help(self, surf): if not self._help: return def write(loc, text): surf.write_screen(self._font_large, colors.black, loc, text) surf.surf.fill(colors.white * 0.8) write((1, 1), "Shortcuts:") max_len = max(len(s) for s, _ in self.shortcuts) for i, (hotkey, description) in enumerate(self.shortcuts, start=2): write((2, i), hotkey) write((3 + max_len * 0.7, i), description)
Draw the help dialog.
def rename(self, container, name): url = self._url("/containers/{0}/rename", container) params = {'name': name} res = self._post(url, params=params) self._raise_for_status(res)
Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def check_children(self): if self._restart_processes is True: for pid, mapping in six.iteritems(self._process_map): if not mapping['Process'].is_alive(): log.trace('Process restart of %s', pid) self.restart_process(pid)
Check the children once
def split(self, verbose=None, end_in_new_line=None): elapsed_time = self.get_elapsed_time() self.split_elapsed_time.append(elapsed_time) self._cumulative_elapsed_time += elapsed_time self._elapsed_time = datetime.timedelta() if verbose is None: verbose = self.verbose_end if verbose: if end_in_new_line is None: end_in_new_line = self.end_in_new_line if end_in_new_line: self.log("{} done in {}".format(self.description, elapsed_time)) else: self.log(" done in {}".format(elapsed_time)) self._start_time = datetime.datetime.now()
Save the elapsed time of the current split and restart the stopwatch. The current elapsed time will be appended to :attr:`split_elapsed_time`. If the stopwatch is paused, then it will remain paused. Otherwise, it will continue running. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_end` set during initialization. end_in_new_line : Optional[bool]] Wether to log the `description`. If `None`, use `end_in_new_line` set during initialization.
def get_node(self, path): with self._mutex: if path[0] == self._name: if len(path) == 1: return self elif path[1] in self._children: return self._children[path[1]].get_node(path[1:]) else: return None else: return None
Get a child node of this node, or this node, based on a path. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return The node pointed to by @ref path, or None if the path does not point to a node in the tree below this node. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.get_node(['p', 'c1']) == c1 True >>> p.get_node(['p', 'c2']) == c2 True
def rotate_root_iam_credentials(self, mount_point=DEFAULT_MOUNT_POINT): api_path = '/v1/{mount_point}/config/rotate-root'.format(mount_point=mount_point) response = self._adapter.post( url=api_path, ) return response.json()
Rotate static root IAM credentials. When you have configured Vault with static credentials, you can use this endpoint to have Vault rotate the access key it used. Note that, due to AWS eventual consistency, after calling this endpoint, subsequent calls from Vault to AWS may fail for a few seconds until AWS becomes consistent again. In order to call this endpoint, Vault's AWS access key MUST be the only access key on the IAM user; otherwise, generation of a new access key will fail. Once this method is called, Vault will now be the only entity that knows the AWS secret key is used to access AWS. Supported methods: POST: /{mount_point}/config/rotate-root. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict
def validate(self, data=None): errors = {} data = self._getData(data) for name, field in self.fields.items(): try: field.clean(data.get(name)) except ValidationError, e: errors[name] = e.messages except AttributeError, e: raise ValidationError('data should be of type dict but is %s' % (type(data),)) extras = set(data.keys()) - set(self.fields.keys()) if extras: errors[', '.join(extras)] = ['field(s) not allowed'] if errors: raise ValidationError(errors)
Validate the data Check also that no extra properties are present. :raises: ValidationError if the data is not valid.
def init_static_field(state, field_class_name, field_name, field_type): field_ref = SimSootValue_StaticFieldRef.get_ref(state, field_class_name, field_name, field_type) field_val = SimSootValue_ThisRef.new_object(state, field_type) state.memory.store(field_ref, field_val)
Initialize the static field with an allocated, but not initialized, object of the given type. :param state: State associated to the field. :param field_class_name: Class containing the field. :param field_name: Name of the field. :param field_type: Type of the field and the new object.
def remove_watcher(self, issue, watcher): url = self._get_url('issue/' + str(issue) + '/watchers') params = {'username': watcher} result = self._session.delete(url, params=params) return result
Remove a user from an issue's watch list. :param issue: ID or key of the issue affected :param watcher: username of the user to remove from the watchers list :rtype: Response
def get_milestone(self, title): if not title: return GithubObject.NotSet if not hasattr(self, '_milestones'): self._milestones = {m.title: m for m in self.repo.get_milestones()} milestone = self._milestones.get(title) if not milestone: milestone = self.repo.create_milestone(title=title) return milestone
given the title as str, looks for an existing milestone or create a new one, and return the object
def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH): sign = 1 if scale_degree.startswith("*"): sign = -1 scale_degree = scale_degree.strip("*") edit_map = [0] * length sd_idx = scale_degree_to_semitone(scale_degree) if sd_idx < length or modulo: edit_map[sd_idx % length] = sign return np.array(edit_map)
Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree.
def rename(self, old_task_name, new_task_name): if not old_task_name or old_task_name.startswith('-'): raise ValueError('Old task name is invalid') if not new_task_name or new_task_name.startswith('-'): raise ValueError('New new task name is invalid') if old_task_name == new_task_name: raise ValueError('Cannot rename task to itself') try: old_task_dir = self._get_task_dir(old_task_name) if not self.exists(old_task_dir): raise errors.TaskNotFound(old_task_name) new_task_dir = self._get_task_dir(new_task_name) if self.exists(new_task_dir): raise errors.TaskExists(new_task_name) os.rename(old_task_dir, new_task_dir) return True except OSError: return False
Renames an existing task directory. `old_task_name` Current task name. `new_task_name` New task name. Returns ``True`` if rename successful.
def get(self, request, *args, **kwargs): try: context = self.get_context_data(**kwargs) except exceptions.NotAvailable: exceptions.handle(request) self.set_workflow_step_errors(context) return self.render_to_response(context)
Handler for HTTP GET requests.
def refresh(path=None): global GIT_OK GIT_OK = False if not Git.refresh(path=path): return if not FetchInfo.refresh(): return GIT_OK = True
Convenience method for setting the git executable path.
def removeRnaQuantificationSet(self, rnaQuantificationSet): q = models.Rnaquantificationset.delete().where( models.Rnaquantificationset.id == rnaQuantificationSet.getId()) q.execute()
Removes the specified rnaQuantificationSet from this repository. This performs a cascading removal of all items within this rnaQuantificationSet.
def _roots_to_targets(self, build_graph, target_roots): with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]): return [ build_graph.get_target(address) for address in build_graph.inject_roots_closure(target_roots, self._fail_fast) ]
Populate the BuildGraph and target list from a set of input TargetRoots.
def package_version(self, prefix=None, name=None, pkg=None, build=False): package_versions = {} if name and prefix: raise TypeError("Exactly one of 'name' or 'prefix' is required.") if name: prefix = self.get_prefix_envname(name) if self.environment_exists(prefix=prefix): for package in self.linked(prefix): if pkg in package: n, v, b = self.split_canonical_name(package) if build: package_versions[n] = '{0}={1}'.format(v, b) else: package_versions[n] = v return package_versions.get(pkg)
Get installed package version in a given env.
def handle_document(self, line: str, position: int, tokens: ParseResults) -> ParseResults: key = tokens['key'] value = tokens['value'] if key not in DOCUMENT_KEYS: raise InvalidMetadataException(self.get_line_number(), line, position, key, value) norm_key = DOCUMENT_KEYS[key] if norm_key in self.document_metadata: log.warning('Tried to overwrite metadata: %s', key) return tokens self.document_metadata[norm_key] = value if norm_key == METADATA_VERSION: self.raise_for_version(line, position, value) return tokens
Handle statements like ``SET DOCUMENT X = "Y"``. :raises: InvalidMetadataException :raises: VersionFormatWarning
def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch: if train: raise ValueError('Ensemble model cannot be trained.') self._load_models() current_batch = dict(copy.deepcopy(batch)) for model in self._models: current_batch.update(model.run(current_batch, False, None)) return {key: current_batch[key] for key in self.output_names}
Run all the models in-order and return accumulated outputs. N-th model is fed with the original inputs and outputs of all the models that were run before it. .. warning:: :py:class:`Sequence` model can not be trained. :param batch: batch to be processed :param train: ``True`` if this batch should be used for model update, ``False`` otherwise :param stream: stream wrapper (useful for precise buffer management) :return: accumulated model outputs :raise ValueError: if the ``train`` flag is set to ``True``
def release(self, subnets): if isinstance(subnets, str) or isinstance(subnets, IPNetwork): subnets = [subnets] subnets_iter = ( str(subnet) if isinstance(subnet, IPNetwork) else subnet for subnet in subnets ) try: with self._create_lock(): for subnet in subnets_iter: self._release(self.create_lease_object_from_subnet(subnet)) except (utils.TimerException, IOError): raise LagoSubnetLeaseLockException(self.path)
Free the lease of the given subnets Args: subnets (list of str or netaddr.IPAddress): dotted ipv4 subnet in CIDR notation (for example ```192.168.200.0/24```) or IPAddress object. Raises: LagoSubnetLeaseException: If subnet is a str and can't be parsed LagoSubnetLeaseLockException: If the lock to self.path can't be acquired.
def split_n(string, seps, reg=False): r deep = len(seps) if not deep: return string return [split_n(i, seps[1:]) for i in _re_split_mixin(string, seps[0], reg=reg)]
r"""Split strings into n-dimensional list. :: from torequests.utils import split_n ss = '''a b c d e f 1 2 3 4 5 6 a b c d e f 1 2 3 4 5 6 a b c d e f 1 2 3 4 5 6''' print(split_n(ss, ('\n', ' ', ' '))) # [[['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']]] print(split_n(ss, ['\s+'], reg=1)) # ['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6']
def update_form_labels(self, request=None, obj=None, form=None): for form_label in self.custom_form_labels: if form_label.field in form.base_fields: label = form_label.get_form_label( request=request, obj=obj, model=self.model, form=form ) if label: form.base_fields[form_label.field].label = mark_safe(label) return form
Returns a form obj after modifying form labels referred to in custom_form_labels.
def encode(self, encoding=None): r try: fc = self.func.value except AttributeError: fc = self.func mhash = bytes([fc, len(self.digest)]) + self.digest if encoding: mhash = CodecReg.get_encoder(encoding)(mhash) return mhash
r"""Encode into a multihash-encoded digest. If `encoding` is `None`, a binary digest is produced: >>> mh = Multihash(0x01, b'TEST') >>> mh.encode() b'\x01\x04TEST' If the name of an `encoding` is specified, it is used to encode the binary digest before returning it (see `CodecReg` for supported codecs). >>> mh.encode('base64') b'AQRURVNU' If the `encoding` is not available, a `KeyError` is raised.
def read_headers(rfile, hdict=None): if hdict is None: hdict = {} while True: line = rfile.readline() if not line: raise ValueError("Illegal end of headers.") if line == CRLF: break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in ' \t': v = line.strip() else: try: k, v = line.split(":", 1) except ValueError: raise ValueError("Illegal header line.") k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = ", ".join((existing, v)) hdict[hname] = v return hdict
Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens.
def _clone_reverses(self, old_reverses): for ctype, reverses in old_reverses.items(): for parts in reverses.values(): sub_objs = parts[1] field_name = parts[0] attrs = {} for sub_obj in sub_objs: if ctype != 'm2m' and not attrs: field = sub_obj._meta.get_field(field_name) attrs = { field.column: getattr(self, field.rel.field_name) } sub_obj._clone(**attrs) if ctype == 'm2m': setattr(self, field_name, sub_objs)
Clones all the objects that were previously gathered.
def get_all_tables(self, dataset_id, project_id=None): tables_data = self._get_all_tables_for_dataset(dataset_id, project_id) tables = [] for table in tables_data.get('tables', []): table_name = table.get('tableReference', {}).get('tableId') if table_name: tables.append(table_name) return tables
Retrieve a list of tables for the dataset. Parameters ---------- dataset_id : str The dataset to retrieve table data for. project_id: str Unique ``str`` identifying the BigQuery project contains the dataset Returns ------- A ``list`` with all table names
def _emit_error(cls, message): sys.stderr.write('ERROR: {message}\n'.format(message=message)) sys.stderr.flush()
Print an error message to STDERR.
def club(self,cid): headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/clubInfo.phtml?cid='+cid,headers=headers).content soup = BeautifulSoup(req) plist = [] for i in soup.find('table',cellpadding=2).find_all('tr')[1:]: plist.append('%s\t%s\t%s\t%s\t%s'%(i.find_all('td')[0].text,i.find_all('td')[1].text,i.find_all('td')[2].text,i.find_all('td')[3].text,i.find_all('td')[4].text)) return soup.title.text,plist
Get info by real team using a ID @return: name,[player list]
def is_available(self): if self.scheme in NOOP_PROTOCOLS: return True if not self.port: raise RuntimeError('port is required') s = socket.socket() try: s.connect((self.host, self.port)) except Exception: return False else: return True
Return True if the connection to the host and port is successful. @return: bool
def serve(handler, sock_path, timeout=UNIX_SOCKET_TIMEOUT): ssh_version = subprocess.check_output(['ssh', '-V'], stderr=subprocess.STDOUT) log.debug('local SSH version: %r', ssh_version) environ = {'SSH_AUTH_SOCK': sock_path, 'SSH_AGENT_PID': str(os.getpid())} device_mutex = threading.Lock() with server.unix_domain_socket_server(sock_path) as sock: sock.settimeout(timeout) quit_event = threading.Event() handle_conn = functools.partial(server.handle_connection, handler=handler, mutex=device_mutex) kwargs = dict(sock=sock, handle_conn=handle_conn, quit_event=quit_event) with server.spawn(server.server_thread, kwargs): try: yield environ finally: log.debug('closing server') quit_event.set()
Start the ssh-agent server on a UNIX-domain socket. If no connection is made during the specified timeout, retry until the context is over.
def InternalSend(self, cmd, payload): length_to_send = len(payload) max_payload = self.packet_size - 7 first_frame = payload[0:max_payload] first_packet = UsbHidTransport.InitPacket(self.packet_size, self.cid, cmd, len(payload), first_frame) del payload[0:max_payload] length_to_send -= len(first_frame) self.InternalSendPacket(first_packet) seq = 0 while length_to_send > 0: max_payload = self.packet_size - 5 next_frame = payload[0:max_payload] del payload[0:max_payload] length_to_send -= len(next_frame) next_packet = UsbHidTransport.ContPacket(self.packet_size, self.cid, seq, next_frame) self.InternalSendPacket(next_packet) seq += 1
Sends a message to the device, including fragmenting it.
def class_details(self, title=None): if title is None: return javabridge.call( self.jobject, "toClassDetailsString", "()Ljava/lang/String;") else: return javabridge.call( self.jobject, "toClassDetailsString", "(Ljava/lang/String;)Ljava/lang/String;", title)
Generates the class details. :param title: optional title :type title: str :return: the details :rtype: str
def debugDumpAttr(self, output, depth): libxml2mod.xmlDebugDumpAttr(output, self._o, depth)
Dumps debug information for the attribute
def create_reference_server_flask_app(cfg): app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
Create referece server Flask application with one or more IIIF handlers.
def _exact_match(response, matches, insensitive, fuzzy): for match in matches: if response == match: return match elif insensitive and response.lower() == match.lower(): return match elif fuzzy and _exact_fuzzy_match(response, match, insensitive): return match else: return None
returns an exact match, if it exists, given parameters for the match
def GetGtfsFactory(self): if self._gtfs_factory is None: from . import gtfsfactory self._gtfs_factory = gtfsfactory.GetGtfsFactory() return self._gtfs_factory
Return the object's GTFS Factory. Returns: The GTFS Factory that was set for this object. If none was explicitly set, it first sets the object's factory to transitfeed's GtfsFactory and returns it
def from_message_and_data(cls, message: str, data: Dict[str, Any] ) -> 'BugZooException': return cls(message)
Reproduces an exception from the message and data contained in its dictionary-based description.
def get_current_term(): url = "{}/current.json".format(term_res_url_prefix) term = _json_to_term_model(get_resource(url)) if datetime.now() > term.grade_submission_deadline: return get_next_term() return term
Returns a uw_sws.models.Term object, for the current term.