code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def merge_values(values1,values2): array1 = values_to_array(values1) array2 = values_to_array(values2) if array1.size == 0: return array2 if array2.size == 0: return array1 merged_array = [] for row_array1 in array1: for row_array2 in array2: merged_row = np.hstack((row_array1,row_array2)) merged_array.append(merged_row) return np.atleast_2d(merged_array)
Merges two numpy arrays by calculating all possible combinations of rows
def read_mac(self): mac0 = self.read_reg(self.ESP_OTP_MAC0) mac1 = self.read_reg(self.ESP_OTP_MAC1) mac3 = self.read_reg(self.ESP_OTP_MAC3) if (mac3 != 0): oui = ((mac3 >> 16) & 0xff, (mac3 >> 8) & 0xff, mac3 & 0xff) elif ((mac1 >> 16) & 0xff) == 0: oui = (0x18, 0xfe, 0x34) elif ((mac1 >> 16) & 0xff) == 1: oui = (0xac, 0xd0, 0x74) else: raise FatalError("Unknown OUI") return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
Read MAC from OTP ROM
def assign_literal(element, value): u helper = helpers.CAST_DICT.get(type(value), str) element.clear() element.text = helper(value)
u"""Assigns a literal. If a given node doesn't exist, it will be created. :param etree.Element element: element to which we assign. :param value: the value to assign
def return_on_initial_capital(capital, period_pl, leverage=None): if capital <= 0: raise ValueError('cost must be a positive number not %s' % capital) leverage = leverage or 1. eod = capital + (leverage * period_pl.cumsum()) ltd_rets = (eod / capital) - 1. dly_rets = ltd_rets dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:] return dly_rets
Return the daily return series based on the capital
def types_(self, col: str) -> pd.DataFrame: cols = self.df.columns.values all_types = {} for col in cols: local_types = [] for i, val in self.df[col].iteritems(): t = type(val).__name__ if t not in local_types: local_types.append(t) all_types[col] = (local_types, i) df = pd.DataFrame(all_types, index=["type", "num"]) return df
Display types of values in a column :param col: column name :type col: str :return: a pandas dataframe :rtype: pd.DataFrame :example: ``ds.types_("Col 1")``
def transform(self, data): if not self._get("fitted"): raise RuntimeError("`transform` called before `fit` or `fit_transform`.") data = data.copy() output_column_prefix = self._get("output_column_prefix") if output_column_prefix is None: prefix = "" else: prefix = output_column_prefix + '.' transform_function = self._get("transform_function") feature_columns = self._get("features") feature_columns = _internal_utils.select_feature_subset(data, feature_columns) for f in feature_columns: data[prefix + f] = transform_function(data[f]) return data
Transforms the data.
def create_tag(self, tag_name): self.create() self.ensure_working_tree() logger.info("Creating tag '%s' in %s ..", tag_name, format_path(self.local)) self.context.execute(*self.get_create_tag_command(tag_name))
Create a new tag based on the working tree's revision. :param tag_name: The name of the tag to create (a string).
def get_race(self, row, division): office = self.get_office(row, division) try: return election.Race.objects.get( office=office, cycle__name=row["electiondate"].split("-")[0], special=( (row["seatnum"] == ("2") and office.body.slug == "senate") or ( "Class II" in row.get("description", "") and office.body.slug == "senate" ) or (row["racetype"].startswith("Special")) ), ) except ObjectDoesNotExist: print( "Could not find race for {} {}".format( row["electiondate"].split("-")[0], office.label ) )
Gets the Race object for the given row of election results. In order to get the race, we must know the office. This function will get the office as well. The only way to know if a Race is a special is based on the string of the `racetype` field from the AP data.
def is_vert_aligned(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean
def _send_pub(self, load): for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load)
Take a load and send it across the network to connected minions
def render(self): engine = Engine() return engine.from_string(SUMMARY_TEMPLATE).render(Context(self.__dict__))
Render the summary.
def describe_table(self, tablename): try: response = self.call( 'describe_table', TableName=tablename)['Table'] return Table.from_response(response) except DynamoDBError as e: if e.kwargs['Code'] == 'ResourceNotFoundException': return None else: raise
Get the details about a table Parameters ---------- tablename : str Name of the table Returns ------- table : :class:`~dynamo3.fields.Table`
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): from pandas.core.groupby.groupby import groupby if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs)
Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Captive 210.0 Wild 185.0
def update(self, docs=None, split=0, parallelism=None, progress_bar=True): self.apply( docs=docs, split=split, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
Update the features of the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
def start_in_oneshot_processes(obj, nb_process): processes = [] for i in range(nb_process): p = Process(target=oneshot_in_process, args=(obj,)) p.start() processes.append(p) for process in processes: process.join()
Start nb_process processes to do the job. Then process finish the job they die.
def change_state_id(self, state_id=None): old_state_id = self.state_id super(ContainerState, self).change_state_id(state_id) for transition in self.transitions.values(): if transition.from_state == old_state_id: transition._from_state = self.state_id if transition.to_state == old_state_id: transition._to_state = self.state_id for data_flow in self.data_flows.values(): if data_flow.from_state == old_state_id: data_flow._from_state = self.state_id if data_flow.to_state == old_state_id: data_flow._to_state = self.state_id
Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state
def _convert_status(self, msg): code = msg.get_int() text = msg.get_text() if code == SFTP_OK: return elif code == SFTP_EOF: raise EOFError(text) elif code == SFTP_NO_SUCH_FILE: raise IOError(errno.ENOENT, text) elif code == SFTP_PERMISSION_DENIED: raise IOError(errno.EACCES, text) else: raise IOError(text)
Raises EOFError or IOError on error status; otherwise does nothing.
def wait(self): while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1)
Waits for the pool to be fully stopped
def add_usb_device_source(self, backend, id_p, address, property_names, property_values): if not isinstance(backend, basestring): raise TypeError("backend can only be an instance of type basestring") if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") if not isinstance(address, basestring): raise TypeError("address can only be an instance of type basestring") if not isinstance(property_names, list): raise TypeError("property_names can only be an instance of type list") for a in property_names[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(property_values, list): raise TypeError("property_values can only be an instance of type list") for a in property_values[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("addUSBDeviceSource", in_p=[backend, id_p, address, property_names, property_values])
Adds a new USB device source. in backend of type str The backend to use as the new device source. in id_p of type str Unique ID to identify the source. in address of type str Address to use, the format is dependent on the backend. For USB/IP backends for example the notation is host[:port]. in property_names of type str Array of property names for more detailed configuration. Not used at the moment. in property_values of type str Array of property values for more detailed configuration. Not used at the moment.
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash): return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])]
def get_route(self): session = OAuth1Session(self.consumer_key, self.consumer_secret, access_token=self.session_token, access_token_secret=self.session_secret) r = session.get(self.test_rest_url) if r.status_code == 200 or r.status_code == 201: if re.search('json', r.headers['content-type'], flags=0): decoded = r.json() else: decoded = r.text self.loci = decoded['loci'] self.profile = decoded['schemes']
Creates a session to find the URL for the loci and schemes
def put_file(self, name, filename, to_local_store=True, to_remote_store=True, compress_hint=True): if not to_local_store and not to_remote_store: raise ValueError("Neither to_local_store nor to_remote_store set " "in a call to filetracker.Client.put_file") check_name(name) lock = None if self.local_store: lock = self.lock_manager.lock_for(name) lock.lock_exclusive() try: if (to_local_store or not self.remote_store) and self.local_store: versioned_name = self.local_store.add_file(name, filename) if (to_remote_store or not self.local_store) and self.remote_store: versioned_name = self.remote_store.add_file( name, filename, compress_hint=compress_hint) finally: if lock: lock.close() return versioned_name
Adds file ``filename`` to the filetracker under the name ``name``. If the file already exists, a new version is created. In practice if the store does not support versioning, the file is overwritten. The file may be added to local store only (if ``to_remote_store`` is ``False``), to remote store only (if ``to_local_store`` is ``False``) or both. If only one store is configured, the values of ``to_local_store`` and ``to_remote_store`` are ignored. Local data store implemented in :class:`LocalDataStore` tries to not directly copy the data to the final cache destination, but uses hardlinking. Therefore you should not modify the file in-place later as this would be disastrous. If ``compress_hint`` is set to False, file is compressed on the server, instead of the client. This is generally not recommended, unless you know what you're doing.
def data_chunk(data, chunk, with_overlap=False): assert isinstance(chunk, tuple) if len(chunk) == 2: i, j = chunk elif len(chunk) == 4: if with_overlap: i, j = chunk[:2] else: i, j = chunk[2:] else: raise ValueError("'chunk' should have 2 or 4 elements, " "not {0:d}".format(len(chunk))) return data[i:j, ...]
Get a data chunk.
def _numbered_vowel_to_accented(vowel, tone): if isinstance(tone, int): tone = str(tone) return _PINYIN_TONES[vowel + tone]
Convert a numbered Pinyin vowel to an accented Pinyin vowel.
def data_size(self, live_data=None): if live_data is not None: warnings.warn("The 'live_data' keyword argument is deprecated.", DeprecationWarning) output = self.nodetool('info')[0] return _get_load_from_info_output(output)
Uses `nodetool info` to get the size of a node's data in KB.
def parse_href(href): url = urlparse(href) path = url.path.split('/') collection_name = path[1] id_ = path[2] return collection_name, id_
Parses an Analyze Re href into collection name and ID
def pre_dispatch(self): middleware = sort_by_priority(self) return tuple(m.pre_dispatch for m in middleware if hasattr(m, 'pre_dispatch'))
List of pre-dispatch methods from registered middleware.
def is_output(self, stream): for streamer in self.streamers: if streamer.selector.matches(stream): return True return False
Check if a stream is a sensor graph output. Return: bool
def _get_queue_types(fed_arrays, data_sources): try: return [data_sources[n].dtype for n in fed_arrays] except KeyError as e: raise ValueError("Array '{k}' has no data source!" .format(k=e.message)), None, sys.exc_info()[2]
Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary
def find_results_gen(search_term, field='title'): scan_params = make_query(search_term, querytype='AdvancedKeywordQuery') search_result_ids = do_search(scan_params) all_titles = [] for pdb_result in search_result_ids: result= describe_pdb(pdb_result) if field in result.keys(): yield result[field]
Return a generator of the results returned by a search of the protein data bank. This generator is used internally. Parameters ---------- search_term : str The search keyword field : str The type of information to record about each entry Examples -------- >>> result_gen = find_results_gen('bleb') >>> pprint.pprint([item for item in result_gen][:5]) ['MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-BEFX', 'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456Y BOUND WITH MGADP-ALF4', 'DICTYOSTELIUM DISCOIDEUM MYOSIN II MOTOR DOMAIN S456E WITH BOUND MGADP-BEFX', 'MYOSIN II DICTYOSTELIUM DISCOIDEUM MOTOR DOMAIN S456E BOUND WITH MGADP-ALF4', 'The structural basis of blebbistatin inhibition and specificity for myosin ' 'II']
def read_galaxy_amqp_config(galaxy_config, base_dir): galaxy_config = add_full_path(galaxy_config, base_dir) config = six.moves.configparser.ConfigParser() config.read(galaxy_config) amqp_config = {} for option in config.options("galaxy_amqp"): amqp_config[option] = config.get("galaxy_amqp", option) return amqp_config
Read connection information on the RabbitMQ server from Galaxy config.
def minutes(start, end=None): return iterate.between(start, datetime.timedelta(minutes=1), end)
Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart.
def tob(data, enc='utf8'): return data.encode(enc) if isinstance(data, six.text_type) else bytes(data)
Convert anything to bytes
def wind_bft(ms): "Convert wind from metres per second to Beaufort scale" if ms is None: return None for bft in range(len(_bft_threshold)): if ms < _bft_threshold[bft]: return bft return len(_bft_threshold)
Convert wind from metres per second to Beaufort scale
def save_csv(self, fd): from pylon.io.excel import CSVWriter CSVWriter(self).write(fd)
Saves the case as a series of Comma-Separated Values.
def update_progress(self, progress, prefix=''): total_length = 40 if progress == 1.: sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50)) sys.stderr.write('\n') sys.stderr.flush() else: bar_length = int(round(total_length * progress)) sys.stderr.write('\r%s [%s%s] %.1f %% ' % (prefix, '=' * bar_length, ' ' * (total_length - bar_length), progress * 100)) sys.stderr.flush()
Print a progress bar for longer-running scripts. The progress value is a value between 0.0 and 1.0. If a prefix is present, it will be printed before the progress bar.
def on_success(self, inv_plugin, emit_set_slot): self.dirty = set() self.apply(inv_plugin) for changed_slot in self.dirty: emit_set_slot(changed_slot)
Called when the click was successful and should be applied to the inventory. Args: inv_plugin (InventoryPlugin): inventory plugin instance emit_set_slot (func): function to signal a slot change, should be InventoryPlugin().emit_set_slot
def create_cities_csv(filename="places2k.txt", output="cities.csv"): with open(filename, 'r') as city_file: with open(output, 'w') as out: for line in city_file: if line[0:2] == "PR": continue out.write(" ".join(line[9:72].split()[:-1]) + '\n')
Takes the places2k.txt from USPS and creates a simple file of all cities.
def _add_filters(self, **filters): string_filters = filters.copy() for key, value in filters.items(): is_extended = False if isinstance(value, RedisField): if (not isinstance(value, SingleValueField) or getattr(value, '_instance', None) is None): raise ValueError('If a field is used as a filter value, it ' 'must be a simple value field attached to ' 'an instance') is_extended = True elif isinstance(value, RedisModel): is_extended = True if is_extended: if self._field_is_pk(key): raw_filter = RawFilter(key, value) self._lazy_collection['pks'].add(raw_filter) else: index, suffix, extra_field_parts = self._parse_filter_key(key) parsed_filter = ParsedFilter(index, suffix, extra_field_parts, value) self._lazy_collection['sets'].append(parsed_filter) string_filters.pop(key) super(ExtendedCollectionManager, self)._add_filters(**string_filters) return self
In addition to the normal _add_filters, this one accept RedisField objects on the right part of a filter. The value will be fetched from redis when calling the collection. The filter value can also be a model instance, in which case its PK will be fetched when calling the collection, too.
def descend(self, remote, force=False): remote_dirs = remote.split('/') for directory in remote_dirs: try: self.conn.cwd(directory) except Exception: if force: self.conn.mkd(directory) self.conn.cwd(directory) return self.conn.pwd()
Descend, possibly creating directories as needed
def cleanup_images(remove_old=False, **kwargs): keep_tags = env.get('docker_keep_tags') if keep_tags is not None: kwargs.setdefault('keep_tags', keep_tags) removed_images = docker_fabric().cleanup_images(remove_old=remove_old, **kwargs) if kwargs.get('list_only'): puts('Unused images:') for image_name in removed_images: fastprint(image_name, end='\n')
Removes all images that have no name, and that are not references as dependency by any other named image. Similar to the ``prune`` functionality in newer Docker versions, but supports more filters. :param remove_old: Also remove images that do have a name, but no `latest` tag. :type remove_old: bool
def channel_is_opened( self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID, ) -> bool: try: channel_state = self._get_channel_state( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier, ) except RaidenRecoverableError: return False return channel_state == ChannelState.OPENED
Returns true if the channel is in an open state, false otherwise.
def __list_uniques(self, date_range, field_name): s = Search(using=self._es_conn, index=self._es_index) s = s.filter('range', **date_range) s = s[0:0] s.aggs.bucket('uniques', 'terms', field=field_name, size=1000) response = s.execute() uniques_list = [] for item in response.aggregations.uniques.buckets: uniques_list.append(item.key) return uniques_list
Retrieve a list of unique values in a given field within a date range. :param date_range: :param field_name: :return: list of unique values.
def prepare_hmet_lsm(self, lsm_data_var_map_array, hmet_ascii_output_folder=None, netcdf_file_path=None): if self.l2g is None: raise ValueError("LSM converter not loaded ...") with tmp_chdir(self.project_manager.project_directory): self._update_simulation_end_from_lsm() if netcdf_file_path is not None: self.l2g.lsm_data_to_subset_netcdf(netcdf_file_path, lsm_data_var_map_array) self._update_card("HMET_NETCDF", netcdf_file_path, True) self.project_manager.deleteCard('HMET_ASCII', self.db_session) else: if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder: hmet_ascii_output_folder = hmet_ascii_output_folder.format(self.simulation_start.strftime("%Y%m%d%H%M"), self.simulation_end.strftime("%Y%m%d%H%M")) self.l2g.lsm_data_to_arc_ascii(lsm_data_var_map_array, main_output_folder=os.path.join(self.gssha_directory, hmet_ascii_output_folder)) self._update_card("HMET_ASCII", os.path.join(hmet_ascii_output_folder, 'hmet_file_list.txt'), True) self.project_manager.deleteCard('HMET_NETCDF', self.db_session) self._update_gmt()
Prepares HMET data for GSSHA simulation from land surface model data. Parameters: lsm_data_var_map_array(str): Array with connections for LSM output and GSSHA input. See: :func:`~gsshapy.grid.GRIDtoGSSHA.` hmet_ascii_output_folder(Optional[str]): Path to diretory to output HMET ASCII files. Mutually exclusice with netcdf_file_path. Default is None. netcdf_file_path(Optional[str]): If you want the HMET data output as a NetCDF4 file for input to GSSHA. Mutually exclusice with hmet_ascii_output_folder. Default is None.
def write_alias_config_hash(alias_config_hash='', empty_hash=False): with open(GLOBAL_ALIAS_HASH_PATH, 'w') as alias_config_hash_file: alias_config_hash_file.write('' if empty_hash else alias_config_hash)
Write self.alias_config_hash to the alias hash file. Args: empty_hash: True if we want to write an empty string into the file. Empty string in the alias hash file means that we have to perform a full load of the command table in the next run.
def decode(self, codes): assert codes.ndim == 2 N, M = codes.shape assert M == self.M assert codes.dtype == self.code_dtype vecs = np.empty((N, self.Ds * self.M), dtype=np.float32) for m in range(self.M): vecs[:, m * self.Ds : (m+1) * self.Ds] = self.codewords[m][codes[:, m], :] return vecs
Given PQ-codes, reconstruct original D-dimensional vectors approximately by fetching the codewords. Args: codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype. Each row is a PQ-code Returns: np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32
def remove_pending_work_units(self, work_spec_name, work_unit_names): return self._remove_some_work_units( work_spec_name, work_unit_names, priority_min=time.time())
Remove some work units in the pending list. If `work_unit_names` is :const:`None` (which must be passed explicitly), all pending work units in `work_spec_name` are removed; otherwise only the specific named work units will be. Note that this function has the potential to confuse workers if they are actually working on the work units in question. If you have ensured that the workers are dead and you would be otherwise waiting for the leases to expire before calling :meth:`remove_available_work_units`, then this is a useful shortcut. :param str work_spec_name: name of the work spec :param list work_unit_names: names of the work units, or :const:`None` for all in `work_spec_name` :return: number of work units removed
def do_unmute(self, sender, body, args): if sender.get('MUTED'): sender['MUTED'] = False self.broadcast('%s has unmuted this chatroom' % (sender['NICK'],)) for msg in sender.get('QUEUED_MESSAGES', []): self.send_message(msg, sender) sender['QUEUED_MESSAGES'] = [] else: self.send_message('you were not muted', sender)
Unmutes the chatroom for a user
def sparsify(dirname, output_every): fnames = get_filenames(dirname) output_every_old = get_output_every(dirname) if output_every % output_every_old != 0: raise ValueError('Directory with output_every={} cannot be coerced to' 'desired new value.'.format(output_every_old)) keep_every = output_every // output_every_old fnames_to_keep = fnames[::keep_every] fnames_to_delete = set(fnames) - set(fnames_to_keep) for fname in fnames_to_delete: os.remove(fname)
Remove files from an output directory at regular interval, so as to make it as if there had been more iterations between outputs. Can be used to reduce the storage size of a directory. If the new number of iterations between outputs is not an integer multiple of the old number, then raise an exception. Parameters ---------- dirname: str A path to a directory. output_every: int Desired new number of iterations between outputs. Raises ------ ValueError The directory cannot be coerced into representing `output_every`.
def _add_trustee(self, device): device_name = get_device_info(device).name if device_name in self.domain: msg = 'Device: %r is already in this trust domain.' % device_name raise DeviceAlreadyInTrustDomain(msg) self._modify_trust(self.devices[0], self._get_add_trustee_cmd, device)
Add a single trusted device to the trust domain. :param device: ManagementRoot object -- device to add to trust domain
def _setGroupNames(classes, classRename): groups = {} for groupName, glyphList in classes.items(): groupName = classRename.get(groupName, groupName) if len(glyphList) == 1: continue groups[groupName] = glyphList return groups
Set the final names into the groups.
def _visible_in_diff(merge_result, context_lines=3): i = old_line = new_line = 0 while i < len(merge_result): line_or_conflict = merge_result[i] if isinstance(line_or_conflict, tuple): yield old_line, new_line, line_or_conflict old_line += len(line_or_conflict[0]) new_line += len(line_or_conflict[1]) else: for j in (list(range(max(0, i-context_lines), i )) + list(range(i+1 , min(len(merge_result), i+1+context_lines)))): if isinstance(merge_result[j], tuple): yield old_line, new_line, line_or_conflict break else: yield None old_line += 1 new_line += 1 i += 1 yield None
Collects the set of lines that should be visible in a diff with a certain number of context lines
def datetimes(self): if self._timestamps_data is None: self._calculate_timestamps() return tuple(DateTime.from_moy(moy, self.is_leap_year) for moy in self._timestamps_data)
A sorted list of datetimes in this analysis period.
def on_close(self, ws): log.debug("Closing WebSocket connection with {}".format(self.url)) if self.keepalive and self.keepalive.is_alive(): self.keepalive.do_run = False self.keepalive.join()
Called when websocket connection is closed
def parsyfiles_global_config(multiple_errors_tb_limit: int = None, full_paths_in_logs: bool = None, dict_to_object_subclass_limit: int = None): if multiple_errors_tb_limit is not None: GLOBAL_CONFIG.multiple_errors_tb_limit = multiple_errors_tb_limit if full_paths_in_logs is not None: GLOBAL_CONFIG.full_paths_in_logs = full_paths_in_logs if dict_to_object_subclass_limit is not None: GLOBAL_CONFIG.dict_to_object_subclass_limit = dict_to_object_subclass_limit
This is the method you should use to configure the parsyfiles library :param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when parsyfiles tries several parsing chains and all of them fail. :param full_paths_in_logs: if True, full file paths will be displayed in logs. Otherwise only the parent path will be displayed and children paths will be indented (default is False) :param dict_to_object_subclass_limit: the number of subclasses that the <dict_to_object> converter will try, when instantiating an object from a dictionary. Default is 50 :return:
def pop_stream(cache, user_id): stack = cache.get(user_id) if stack is None: return None result = stack.pop() if len(stack) == 0: cache.delete(user_id) else: cache.set(user_id, stack) return result
Pop an item off the stack in the cache. If stack is empty after pop, it deletes the stack. :param cache: werkzeug BasicCache-like object :param user_id: id of user, used as key in cache :return: top item from stack, otherwise None
def remove_tenant_user(request, project=None, user=None, domain=None): client = keystoneclient(request, admin=True) roles = client.roles.roles_for_user(user, project) for role in roles: remove_tenant_user_role(request, user=user, role=role.id, project=project, domain=domain)
Removes all roles from a user on a tenant, removing them from it.
def is_dynamo_value(value): if not isinstance(value, dict) or len(value) != 1: return False subkey = six.next(six.iterkeys(value)) return subkey in TYPES_REV
Returns True if the value is a Dynamo-formatted value
def get_message_plain_text(msg: Message): if msg.body: return msg.body if BeautifulSoup is None or not msg.html: return msg.html plain_text = '\n'.join(line.strip() for line in BeautifulSoup(msg.html, 'lxml').text.splitlines()) return re.sub(r'\n\n+', '\n\n', plain_text).strip()
Converts an HTML message to plain text. :param msg: A :class:`~flask_mail.Message` :return: The plain text message.
def add_to_package_package_tree(self, root, node_path, pkgnode): if node_path: ptr = root for node in node_path[:-1]: ptr = ptr.children.setdefault(node, GroupNode(dict())) ptr.children[node_path[-1]] = pkgnode else: if root.children: raise PackageException("Attempting to overwrite root node of a non-empty package.") root.children = pkgnode.children.copy()
Adds a package or sub-package tree from an existing package to this package's contents.
def My_TreeTable(self, table, heads, heads2=None): self.Define_TreeTable(heads, heads2) self.Display_TreeTable(table)
Define and display a table in which the values in first column form one or more trees.
def exec_module(self, module): global MAIN_MODULE_NAME if module.__name__ == MAIN_MODULE_NAME: module.__name__ = "__main__" MAIN_MODULE_NAME = None with open(self.filename) as f: source = f.read() if transforms.transformers: source = transforms.transform(source) else: for line in source.split('\n'): if transforms.FROM_EXPERIMENTAL.match(line): source = transforms.transform(source) break exec(source, vars(module))
import the source code, transforma it before executing it so that it is known to Python.
def modify_karma(self, words): k = defaultdict(int) if words: for word_tuple in words: word = word_tuple[0] ending = word[-1] change = -1 if ending == '-' else 1 if '-' in ending: word = word.rstrip('-') elif '+' in ending: word = word.rstrip('+') if word.startswith('(') and word.endswith(')'): word = word[1:-1] word = word.strip() if word: k[word] += change return k
Given a regex object, look through the groups and modify karma as necessary
def _convert_units(self): self.m1 = self.m1*M_sun*ct.G/ct.c**2 self.m2 = self.m2*M_sun*ct.G/ct.c**2 initial_cond_type_conversion = { 'time': ct.c*ct.Julian_year, 'frequency': 1./ct.c, 'separation': ct.parsec, } self.initial_point = self.initial_point*initial_cond_type_conversion[self.initial_cond_type] self.t_obs = self.t_obs*ct.c*ct.Julian_year return
Convert units to geometrized units. Change to G=c=1 (geometrized) units for ease in calculations.
def childFactory(self, ctx, name): try: o = self.webapp.fromWebID(name) except _WebIDFormatException: return None if o is None: return None return self.webViewer.wrapModel(o)
Return a shell page wrapped around the Item model described by the webID, or return None if no such item can be found.
def get_proc_name(cmd): if isinstance(cmd, Iterable) and not isinstance(cmd, str): cmd = " ".join(cmd) return cmd.split()[0].replace('(', '').replace(')', '')
Get the representative process name from complex command :param str | list[str] cmd: a command to be processed :return str: the basename representative command
def cudaMemcpy_htod(dst, src, count): status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyHostToDevice) cudaCheckStatus(status)
Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy.
def _setup_phantomjs(self, capabilities): phantomjs_driver = self.config.get('Driver', 'phantomjs_driver_path') self.logger.debug("Phantom driver path given in properties: %s", phantomjs_driver) return webdriver.PhantomJS(executable_path=phantomjs_driver, desired_capabilities=capabilities)
Setup phantomjs webdriver :param capabilities: capabilities object :returns: a new local phantomjs driver
def from_py_func(cls, func): from bokeh.util.deprecation import deprecated deprecated("'from_py_func' is deprecated and will be removed in an eventual 2.0 release. " "Use CustomJSFilter directly instead.") if not isinstance(func, FunctionType): raise ValueError('CustomJSFilter.from_py_func only accepts function objects.') pscript = import_required( 'pscript', dedent( ) ) argspec = inspect.getargspec(func) default_names = argspec.args default_values = argspec.defaults or [] if len(default_names) - len(default_values) != 0: raise ValueError("Function may only contain keyword arguments.") if default_values and not any(isinstance(value, Model) for value in default_values): raise ValueError("Default value must be a plot object.") func_kwargs = dict(zip(default_names, default_values)) code = pscript.py2js(func, 'filter') + 'return filter(%s);\n' % ', '.join(default_names) return cls(code=code, args=func_kwargs)
Create a ``CustomJSFilter`` instance from a Python function. The function is translated to JavaScript using PScript. The ``func`` function namespace will contain the variable ``source`` at render time. This will be the data source associated with the ``CDSView`` that this filter is added to.
def write(self, data): block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = _BLOCK_SIZE if block_remaining < len(data) + _HEADER_LENGTH: first_chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining >= len(data) + _HEADER_LENGTH: self.__write_record(_RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(_RECORD_TYPE_FULL, data)
Write single record. Args: data: record data to write as string, byte array or byte sequence.
def update(self, back=None): back = self.backends(back) for fsb in back: fstr = '{0}.update'.format(fsb) if fstr in self.servers: log.debug('Updating %s fileserver cache', fsb) self.servers[fstr]()
Update all of the enabled fileserver backends which support the update function, or
def raise_thread_exception(thread_id, exception): if current_platform == "CPython": _raise_thread_exception_cpython(thread_id, exception) else: message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = (exception if inspect.isclass(exception) else type(exception)).__name__ logger.critical(message, exctype, current_platform)
Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls.
def get_schema_descendant( self, route: SchemaRoute) -> Optional[SchemaNode]: node = self for p in route: node = node.get_child(*p) if node is None: return None return node
Return descendant schema node or ``None`` if not found. Args: route: Schema route to the descendant node (relative to the receiver).
def _create_scsi_devices(scsi_devices): keys = range(-1000, -1050, -1) scsi_specs = [] if scsi_devices: devs = [scsi['adapter'] for scsi in scsi_devices] log.trace('Creating SCSI devices %s', devs) for (key, scsi_controller) in zip(keys, scsi_devices): scsi_spec = _apply_scsi_controller(scsi_controller['adapter'], scsi_controller['type'], scsi_controller['bus_sharing'], key, scsi_controller['bus_number'], 'add') scsi_specs.append(scsi_spec) return scsi_specs
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing SCSI controllers scsi_devices: List of SCSI device properties
def pct_negative(self, threshold=0.0): return np.count_nonzero(self[self < threshold]) / self.count()
Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float
def astype(value, types=None): if types is None: types = int, float, asbool, bytes2str for typ in types: try: return typ(value) except (ValueError, AttributeError, TypeError, UnicodeEncodeError): pass return value
Return argument as one of types if possible. >>> astype('42') 42 >>> astype('3.14') 3.14 >>> astype('True') True >>> astype(b'Neee-Wom') 'Neee-Wom'
def strip_tags(cls, html): s = cls() s.feed(html) return s.get_data()
This function may be used to remove HTML tags from data.
def cache_dest(self, url, saltenv='base', cachedir=None): proto = urlparse(url).scheme if proto == '': return url if proto == 'salt': url, senv = salt.utils.url.parse(url) if senv: saltenv = senv return salt.utils.path.join( self.opts['cachedir'], 'files', saltenv, url.lstrip('|/')) return self._extrn_path(url, saltenv, cachedir=cachedir)
Return the expected cache location for the specified URL and environment.
def _package_exists(path): while path: if os.path.exists(path): return True else: path = os.path.dirname(path) return False
Checks if the given Python path matches a valid file or a valid container file :param path: A Python path :return: True if the module or its container exists
def index_template_absent(name): ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} try: index_template = __salt__['elasticsearch.index_template_get'](name=name) if index_template and name in index_template: if __opts__['test']: ret['comment'] = 'Index template {0} will be removed'.format(name) ret['changes']['old'] = index_template[name] ret['result'] = None else: ret['result'] = __salt__['elasticsearch.index_template_delete'](name=name) if ret['result']: ret['comment'] = 'Successfully removed index template {0}'.format(name) ret['changes']['old'] = index_template[name] else: ret['comment'] = 'Failed to remove index template {0} for unknown reasons'.format(name) else: ret['comment'] = 'Index template {0} is already absent'.format(name) except Exception as err: ret['result'] = False ret['comment'] = six.text_type(err) return ret
Ensure that the named index template is absent. name Name of the index to remove
def reverse_transform(self, tables, table_metas=None, missing=None): if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning) reverse = {} for table_name in tables: table = tables[table_name] if table_metas is None: table_meta = self.table_dict[table_name][1] else: table_meta = table_metas[table_name] reverse[table_name] = self.reverse_transform_table(table, table_meta) return reverse
Transform data back to its original format. Args: tables(dict): mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. table_metas(dict): Full metadata file for the dataset. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).
def draw(self): self.ax.plot(self.k_values_, self.k_scores_, marker="D") if self.locate_elbow and self.elbow_value_!=None: elbow_label = "$elbow\ at\ k={}, score={:0.3f}$".format(self.elbow_value_, self.elbow_score_) self.ax.axvline(self.elbow_value_, c=LINE_COLOR, linestyle="--", label=elbow_label) if self.timings: self.axes = [self.ax, self.ax.twinx()] self.axes[1].plot( self.k_values_, self.k_timers_, label="fit time", c='g', marker="o", linestyle="--", alpha=0.75, ) return self.ax
Draw the elbow curve for the specified scores and values of K.
def exec_cmd(self, cmdstr): parts = cmdstr.split() if len(parts): cmd, args = parts[0], parts[1:] self._dispatch(cmd, args) else: pass
Parse line from CLI read loop and execute provided command
def _unpack(self, data): msg_magic, msg_length, msg_type = self._unpack_header(data) msg_size = self._struct_header_size + msg_length payload = data[self._struct_header_size:msg_size] return payload.decode('utf-8', 'replace')
Unpacks the given byte string and parses the result from JSON. Returns None on failure and saves data into "self.buffer".
def ParseMultiple(self, result_dicts): for result_dict in result_dicts: kb_user = rdf_client.User() for wmi_key, kb_key in iteritems(self.account_mapping): try: kb_user.Set(kb_key, result_dict[wmi_key]) except KeyError: pass if kb_user.sid or kb_user.username: yield kb_user
Parse the WMI Win32_UserAccount output.
def get_subscriptions(self, fetch=False): return Subscriptions( self.resource.subscriptions, self.client, populate=fetch)
Return this Wallet's subscriptions object, populating it if fetch is True.
def create_symlink_job(directory, checksums, filetype, symlink_path): pattern = NgdConfig.get_fileending(filetype) filename, _ = get_name_and_checksum(checksums, pattern) local_file = os.path.join(directory, filename) full_symlink = os.path.join(symlink_path, filename) return DownloadJob(None, local_file, None, full_symlink)
Create a symlink-creating DownloadJob for an already downloaded file.
def _check_pcre_minions(self, expr, greedy): reg = re.compile(expr) return {'minions': [m for m in self._pki_minions() if reg.match(m)], 'missing': []}
Return the minions found by looking via regular expressions
def _handle_response_error(self, response, retries, **kwargs): r error = self._convert_response_to_error(response) if error is None: return response max_retries = self._max_retries_for_error(error) if max_retries is None or retries >= max_retries: return response backoff = min(0.0625 * 2 ** retries, 1.0) self.logger.warning("Sleeping for %r before retrying failed request...", backoff) time.sleep(backoff) retries += 1 self.logger.warning("Retrying failed request. Attempt %d/%d.", retries, max_retries) return self.request(retries=retries, **kwargs)
r"""Provides a way for each connection wrapper to handle error responses. Parameters: response(Response): An instance of :class:`.requests.Response`. retries(int): The number of times :meth:`.request` has been called so far. \**kwargs: The parameters with which :meth:`.request` was called. The `retries` parameter is excluded from `kwargs` intentionally. Returns: requests.Response
def random_uniform(mesh, shape, **kwargs): shape = convert_to_shape(shape) return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor
def omit_wells(self, uwis=None): if uwis is None: raise ValueError('Must specify at least one uwi') return Project([w for w in self if w.uwi not in uwis])
Returns a new project where wells with specified uwis have been omitted Args: uwis (list): list or tuple of UWI strings. Returns: project
def buscar(self, id_vlan): if not is_valid_int_param(id_vlan): raise InvalidParameterError( u'Vlan id is invalid or was not informed.') url = 'vlan/' + str(id_vlan) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Get VLAN by its identifier. :param id_vlan: VLAN identifier. :return: Following dictionary: :: {'vlan': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_ambiente': < id_ambiente >, 'id_tipo_rede': < id_tipo_rede >, 'rede_oct1': < rede_oct1 >, 'rede_oct2': < rede_oct2 >, 'rede_oct3': < rede_oct3 >, 'rede_oct4': < rede_oct4 >, 'bloco': < bloco >, 'mascara_oct1': < mascara_oct1 >, 'mascara_oct2': < mascara_oct2 >, 'mascara_oct3': < mascara_oct3 >, 'mascara_oct4': < mascara_oct4 >, 'broadcast': < broadcast >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'ativada': < ativada >} OR {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_tipo_rede': < id_tipo_rede >, 'id_ambiente': < id_ambiente >, 'bloco1': < bloco1 >, 'bloco2': < bloco2 >, 'bloco3': < bloco3 >, 'bloco4': < bloco4 >, 'bloco5': < bloco5 >, 'bloco6': < bloco6 >, 'bloco7': < bloco7 >, 'bloco8': < bloco8 >, 'bloco': < bloco >, 'mask_bloco1': < mask_bloco1 >, 'mask_bloco2': < mask_bloco2 >, 'mask_bloco3': < mask_bloco3 >, 'mask_bloco4': < mask_bloco4 >, 'mask_bloco5': < mask_bloco5 >, 'mask_bloco6': < mask_bloco6 >, 'mask_bloco7': < mask_bloco7 >, 'mask_bloco8': < mask_bloco8 >, 'broadcast': < broadcast >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'acl_file_name_v6': < acl_file_name_v6 >, 'acl_valida_v6': < acl_valida_v6 >, 'ativada': < ativada >}} :raise VlanNaoExisteError: VLAN does not exist. :raise InvalidParameterError: VLAN id is none or invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def serialize(v, known_modules=[]): tname = name(v, known_modules=known_modules) func = serializer(tname) return func(v), tname
Get a text representation of an object.
def dispatch_hook(cls, _pkt, _underlayer=None, *args, **kargs): for klass in cls._payload_class: if hasattr(klass, "can_handle") and \ klass.can_handle(_pkt, _underlayer): return klass print("DCE/RPC payload class not found or undefined (using Raw)") return Raw
dispatch_hook to choose among different registered payloads
def config_args(self, section='default'): if sys.version_info >= (2, 7, 0): parser = ConfigParser.SafeConfigParser(allow_no_value=True) else: parser = ConfigParser.SafeConfigParser() parser.optionxform = str args = {} try: parser.read(self.config_file) for name, value in parser.items(section): if any([value == 'False', value == 'false']): value = False elif any([value == 'True', value == 'true']): value = True else: value = utils.is_int(value=value) args[name] = value except Exception as exp: self.log.warn('Section: [ %s ] Message: "%s"', section, exp) return {} else: return args
Loop through the configuration file and set all of our values. Note: that anything can be set as a "section" in the argument file. If a section does not exist an empty dict will be returned. :param section: ``str`` :return: ``dict``
def nmltostring(nml): if not isinstance(nml,dict): raise ValueError("nml should be a dict !") curstr = "" for key,group in nml.items(): namelist = ["&" + key] for k, v in group.items(): if isinstance(v, list) or isinstance(v, tuple): namelist.append(k + " = " + ",".join(map(str, v)) + ",") elif is_string(v): namelist.append(k + " = '" + str(v) + "',") else: namelist.append(k + " = " + str(v) + ",") namelist.append("/") curstr = curstr + "\n".join(namelist) + "\n" return curstr
Convert a dictionary representing a Fortran namelist into a string.
def _rollback_handle(cls, connection): try: connection.handle.rollback() except snowflake.connector.errors.ProgrammingError as e: msg = dbt.compat.to_string(e) if 'Session no longer exists' not in msg: raise
On snowflake, rolling back the handle of an aborted session raises an exception.
def OnLinkBitmap(self, event): wildcard = "*" message = _("Select bitmap for current cell") style = wx.OPEN | wx.CHANGE_DIR filepath, __ = \ self.grid.interfaces.get_filepath_findex_from_user(wildcard, message, style) try: bmp = wx.Bitmap(filepath) except TypeError: return if bmp.Size == (-1, -1): return code = "wx.Bitmap(r'{filepath}')".format(filepath=filepath) key = self.grid.actions.cursor self.grid.actions.set_code(key, code)
Link bitmap event handler
def _wait_and_except_if_failed(self, event, timeout=None): event.wait(timeout or self.__sync_timeout) self._except_if_failed(event)
Combines waiting for event and call to `_except_if_failed`. If timeout is not specified the configured sync_timeout is used.
def create_group(self, group): data = group.to_json() response = self._do_request('POST', '/v2/groups', data=data) return response.json()
Create and start a group. :param :class:`marathon.models.group.MarathonGroup` group: the group to create :returns: success :rtype: dict containing the version ID