code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def on_service_arrival(self, svc_ref): """ Called when a service has been registered in the framework :param svc_ref: A service reference """ with self._lock: if svc_ref not in self.services: # Get the key property prop_value = svc_ref.get_property(self._key) if ( prop_value not in self._future_value and prop_value is not None or self._allow_none ): # Matching new property value service = self._context.get_service(svc_ref) # Store the information self._future_value[prop_value] = service self.services[svc_ref] = service # Call back iPOPO self._ipopo_instance.bind(self, service, svc_ref) return True return None
Called when a service has been registered in the framework :param svc_ref: A service reference
def _validate_allof(self, definitions, field, value): """ {'type': 'list', 'logical': 'allof'} """ valids, _errors = \ self.__validate_logical('allof', definitions, field, value) if valids < len(definitions): self._error(field, errors.ALLOF, _errors, valids, len(definitions))
{'type': 'list', 'logical': 'allof'}
def update_ipsec_site_connection(self, ipsecsite_conn, body=None): """Updates an IPsecSiteConnection.""" return self.put( self.ipsec_site_connection_path % (ipsecsite_conn), body=body )
Updates an IPsecSiteConnection.
def set_select(self, select_or_deselect = 'select', value=None, text=None, index=None): """ Private method used by select methods @type select_or_deselect: str @param select_or_deselect: Should I select or deselect the element @type value: str @type value: Value to be selected @type text: str @type text: Text to be selected @type index: int @type index: index to be selected @rtype: WebElementWrapper @return: Self """ # TODO: raise exception if element is not select element if select_or_deselect is 'select': if value is not None: Select(self.element).select_by_value(value) elif text is not None: Select(self.element).select_by_visible_text(text) elif index is not None: Select(self.element).select_by_index(index) elif select_or_deselect is 'deselect': if value is not None: Select(self.element).deselect_by_value(value) elif text is not None: Select(self.element).deselect_by_visible_text(text) elif index is not None: Select(self.element).deselect_by_index(index) elif select_or_deselect is 'deselect all': Select(self.element).deselect_all() return self
Private method used by select methods @type select_or_deselect: str @param select_or_deselect: Should I select or deselect the element @type value: str @type value: Value to be selected @type text: str @type text: Text to be selected @type index: int @type index: index to be selected @rtype: WebElementWrapper @return: Self
def to_user(user): """Serializes user to id string :param user: object to serialize :return: string id """ from sevenbridges.models.user import User if not user: raise SbgError('User is required!') elif isinstance(user, User): return user.username elif isinstance(user, six.string_types): return user else: raise SbgError('Invalid user parameter!')
Serializes user to id string :param user: object to serialize :return: string id
def get(self, name, section=None, fallback=False): """ Returns a previously registered preference :param section: The section name under which the preference is registered :type section: str. :param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param :type name: str. :param fallback: Should we return a dummy preference object instead of raising an error if no preference is found? :type name: bool. :return: a :py:class:`prefs.BasePreference` instance """ # try dotted notation try: _section, name = name.split( preferences_settings.SECTION_KEY_SEPARATOR) return self[_section][name] except ValueError: pass # use standard params try: return self[section][name] except KeyError: if fallback: return self._fallback(section_name=section, pref_name=name) raise NotFoundInRegistry("No such preference in {0} with section={1} and name={2}".format( self.__class__.__name__, section, name))
Returns a previously registered preference :param section: The section name under which the preference is registered :type section: str. :param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param :type name: str. :param fallback: Should we return a dummy preference object instead of raising an error if no preference is found? :type name: bool. :return: a :py:class:`prefs.BasePreference` instance
def _area(sma, eps, phi, r): """ Compute elliptical sector area. """ aux = r * math.cos(phi) / sma signal = aux / abs(aux) if abs(aux) >= 1.: aux = signal return abs(sma**2 * (1.-eps) / 2. * math.acos(aux))
Compute elliptical sector area.
def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information.
def join(self, right, on=None, how='inner'): """ Merge two SFrames. Merges the current (left) SFrame with the given (right) SFrame using a SQL-style equi-join operation by columns. Parameters ---------- right : SFrame The SFrame to join. on : None | str | list | dict, optional The column name(s) representing the set of join keys. Each row that has the same value in this set of columns will be merged together. * If 'None' is given, join will use all columns that have the same name as the set of join keys. * If a str is given, this is interpreted as a join using one column, where both SFrames have the same column name. * If a list is given, this is interpreted as a join using one or more column names, where each column name given exists in both SFrames. * If a dict is given, each dict key is taken as a column name in the left SFrame, and each dict value is taken as the column name in right SFrame that will be joined together. e.g. {'left_col_name':'right_col_name'}. how : {'left', 'right', 'outer', 'inner'}, optional The type of join to perform. 'inner' is default. * inner: Equivalent to a SQL inner join. Result consists of the rows from the two frames whose join key values match exactly, merged together into one SFrame. * left: Equivalent to a SQL left outer join. Result is the union between the result of an inner join and the rest of the rows from the left SFrame, merged with missing values. * right: Equivalent to a SQL right outer join. Result is the union between the result of an inner join and the rest of the rows from the right SFrame, merged with missing values. * outer: Equivalent to a SQL full outer join. Result is the union between the result of a left outer join and a right outer join. Returns ------- out : SFrame Examples -------- >>> animals = turicreate.SFrame({'id': [1, 2, 3, 4], ... 'name': ['dog', 'cat', 'sheep', 'cow']}) >>> sounds = turicreate.SFrame({'id': [1, 3, 4, 5], ... 'sound': ['woof', 'baa', 'moo', 'oink']}) >>> animals.join(sounds, how='inner') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | +----+-------+-------+ [3 rows x 3 columns] >>> animals.join(sounds, on='id', how='left') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 2 | cat | None | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on=['id'], how='right') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on={'id':'id'}, how='outer') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | | 2 | cat | None | +----+-------+-------+ [5 rows x 3 columns] """ available_join_types = ['left','right','outer','inner'] if not isinstance(right, SFrame): raise TypeError("Can only join two SFrames") if how not in available_join_types: raise ValueError("Invalid join type") if (self.num_columns() <= 0) or (right.num_columns() <= 0): raise ValueError("Cannot join an SFrame with no columns.") join_keys = dict() if on is None: left_names = self.column_names() right_names = right.column_names() common_columns = [name for name in left_names if name in right_names] for name in common_columns: join_keys[name] = name elif type(on) is str: join_keys[on] = on elif type(on) is list: for name in on: if type(name) is not str: raise TypeError("Join keys must each be a str.") join_keys[name] = name elif type(on) is dict: join_keys = on else: raise TypeError("Must pass a str, list, or dict of join keys") with cython_context(): return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
Merge two SFrames. Merges the current (left) SFrame with the given (right) SFrame using a SQL-style equi-join operation by columns. Parameters ---------- right : SFrame The SFrame to join. on : None | str | list | dict, optional The column name(s) representing the set of join keys. Each row that has the same value in this set of columns will be merged together. * If 'None' is given, join will use all columns that have the same name as the set of join keys. * If a str is given, this is interpreted as a join using one column, where both SFrames have the same column name. * If a list is given, this is interpreted as a join using one or more column names, where each column name given exists in both SFrames. * If a dict is given, each dict key is taken as a column name in the left SFrame, and each dict value is taken as the column name in right SFrame that will be joined together. e.g. {'left_col_name':'right_col_name'}. how : {'left', 'right', 'outer', 'inner'}, optional The type of join to perform. 'inner' is default. * inner: Equivalent to a SQL inner join. Result consists of the rows from the two frames whose join key values match exactly, merged together into one SFrame. * left: Equivalent to a SQL left outer join. Result is the union between the result of an inner join and the rest of the rows from the left SFrame, merged with missing values. * right: Equivalent to a SQL right outer join. Result is the union between the result of an inner join and the rest of the rows from the right SFrame, merged with missing values. * outer: Equivalent to a SQL full outer join. Result is the union between the result of a left outer join and a right outer join. Returns ------- out : SFrame Examples -------- >>> animals = turicreate.SFrame({'id': [1, 2, 3, 4], ... 'name': ['dog', 'cat', 'sheep', 'cow']}) >>> sounds = turicreate.SFrame({'id': [1, 3, 4, 5], ... 'sound': ['woof', 'baa', 'moo', 'oink']}) >>> animals.join(sounds, how='inner') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | +----+-------+-------+ [3 rows x 3 columns] >>> animals.join(sounds, on='id', how='left') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 2 | cat | None | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on=['id'], how='right') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on={'id':'id'}, how='outer') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | | 2 | cat | None | +----+-------+-------+ [5 rows x 3 columns]
def requires_list(self): """ It is important that this property is calculated lazily. Getting the 'requires' attribute may trigger a package load, which may be avoided if this variant is reduced away before that happens. """ requires = self.variant.get_requires(build_requires=self.building) reqlist = RequirementList(requires) if reqlist.conflict: raise ResolveError( "The package %s has an internal requirements conflict: %s" % (str(self), str(reqlist))) return reqlist
It is important that this property is calculated lazily. Getting the 'requires' attribute may trigger a package load, which may be avoided if this variant is reduced away before that happens.
def save_history(self): """Save history to a text file in user home directory""" open(self.LOG_PATH, 'w').write("\n".join( \ [to_text_string(self.pydocbrowser.url_combo.itemText(index)) for index in range(self.pydocbrowser.url_combo.count())]))
Save history to a text file in user home directory
def from_dict(cls, d): """ Construct a MSONable AdfKey object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- adfkey : AdfKey An AdfKey object recovered from the JSON dict ``d``. """ key = d.get("name") options = d.get("options", None) subkey_list = d.get("subkeys", []) if len(subkey_list) > 0: subkeys = list(map(lambda k: AdfKey.from_dict(k), subkey_list)) else: subkeys = None return cls(key, options, subkeys)
Construct a MSONable AdfKey object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- adfkey : AdfKey An AdfKey object recovered from the JSON dict ``d``.
def connect(self, protocol=None, mode=None, disposition=None): """Connect to the card. If protocol is not specified, connect with the default connection protocol. If mode is not specified, connect with SCARD_SHARE_SHARED.""" CardConnection.connect(self, protocol) pcscprotocol = translateprotocolmask(protocol) if 0 == pcscprotocol: pcscprotocol = self.getProtocol() if mode == None: mode = SCARD_SHARE_SHARED # store the way to dispose the card if disposition == None: disposition = SCARD_UNPOWER_CARD self.disposition = disposition hresult, self.hcard, dwActiveProtocol = SCardConnect( self.hcontext, str(self.reader), mode, pcscprotocol) if hresult != 0: self.hcard = None if hresult in (SCARD_W_REMOVED_CARD, SCARD_E_NO_SMARTCARD): raise NoCardException('Unable to connect', hresult=hresult) else: raise CardConnectionException( 'Unable to connect with protocol: ' + \ dictProtocol[pcscprotocol] + '. ' + \ SCardGetErrorMessage(hresult)) protocol = 0 if dwActiveProtocol == SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1: # special case for T0 | T1 # this happen when mode=SCARD_SHARE_DIRECT and no protocol is # then negociated with the card protocol = CardConnection.T0_protocol | CardConnection.T1_protocol else: for p in dictProtocol: if p == dwActiveProtocol: protocol = eval("CardConnection.%s_protocol" % dictProtocol[p]) PCSCCardConnection.setProtocol(self, protocol)
Connect to the card. If protocol is not specified, connect with the default connection protocol. If mode is not specified, connect with SCARD_SHARE_SHARED.
def add_chapter(self, title): ''' Adds a new chapter to the report. :param str title: Title of the chapter. ''' chap_id = 'chap%s' % self.chap_counter self.chap_counter += 1 self.sidebar += '<a href="#%s" class="list-group-item">%s</a>\n' % ( chap_id, title) self.body += '<h1 id="%s">%s</h1>\n' % (chap_id, title)
Adds a new chapter to the report. :param str title: Title of the chapter.
def rlmb_tiny_recurrent(): """Tiny setting with a recurrent next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_recurrent" hparams.generative_model_params = "next_frame_basic_recurrent" return hparams
Tiny setting with a recurrent next-frame model.
def commit_account_vesting(self, block_height): """ vest any tokens at this block height """ # save all state log.debug("Commit all database state before vesting") self.db.commit() if block_height in self.vesting: traceback.print_stack() log.fatal("Tried to vest tokens twice at {}".format(block_height)) os.abort() # commit all vesting in one transaction cur = self.db.cursor() namedb_query_execute(cur, 'BEGIN', ()) res = namedb_accounts_vest(cur, block_height) namedb_query_execute(cur, 'END', ()) self.vesting[block_height] = True return True
vest any tokens at this block height
def _initialize_global_state(self, redis_address, redis_password=None, timeout=20): """Initialize the GlobalState object by connecting to Redis. It's possible that certain keys in Redis may not have been fully populated yet. In this case, we will retry this method until they have been populated or we exceed a timeout. Args: redis_address: The Redis address to connect. redis_password: The password of the redis server. """ self.redis_client = services.create_redis_client( redis_address, redis_password) start_time = time.time() num_redis_shards = None redis_shard_addresses = [] while time.time() - start_time < timeout: # Attempt to get the number of Redis shards. num_redis_shards = self.redis_client.get("NumRedisShards") if num_redis_shards is None: print("Waiting longer for NumRedisShards to be populated.") time.sleep(1) continue num_redis_shards = int(num_redis_shards) if num_redis_shards < 1: raise Exception("Expected at least one Redis shard, found " "{}.".format(num_redis_shards)) # Attempt to get all of the Redis shards. redis_shard_addresses = self.redis_client.lrange( "RedisShards", start=0, end=-1) if len(redis_shard_addresses) != num_redis_shards: print("Waiting longer for RedisShards to be populated.") time.sleep(1) continue # If we got here then we successfully got all of the information. break # Check to see if we timed out. if time.time() - start_time >= timeout: raise Exception("Timed out while attempting to initialize the " "global state. num_redis_shards = {}, " "redis_shard_addresses = {}".format( num_redis_shards, redis_shard_addresses)) # Get the rest of the information. self.redis_clients = [] for shard_address in redis_shard_addresses: self.redis_clients.append( services.create_redis_client(shard_address.decode(), redis_password))
Initialize the GlobalState object by connecting to Redis. It's possible that certain keys in Redis may not have been fully populated yet. In this case, we will retry this method until they have been populated or we exceed a timeout. Args: redis_address: The Redis address to connect. redis_password: The password of the redis server.
def call_method(self, service, path, interface, method, signature=None, args=None, no_reply=False, auto_start=False, timeout=-1): """Call a D-BUS method and wait for its reply. This method calls the D-BUS method with name *method* that resides on the object at bus address *service*, at path *path*, on interface *interface*. The *signature* and *args* are optional arguments that can be used to add parameters to the method call. The signature is a D-BUS signature string, while *args* must be a sequence of python types that can be converted into the types specified by the signature. See the `D-BUS specification <http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a reference on signature strings. The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and NO_AUTO_START flags on the D-BUS message. The return value is the result of the D-BUS method call. This will be a possibly empty sequence of values. """ message = txdbus.MethodCallMessage(path, method, interface=interface, destination=service, signature=signature, body=args, expectReply=not no_reply, autoStart=auto_start) serial = message.serial if timeout == -1: timeout = self._timeout try: with switch_back(timeout) as switcher: self._method_calls[serial] = switcher self.send_message(message) args, _ = self._hub.switch() finally: self._method_calls.pop(serial, None) response = args[0] assert response.reply_serial == serial if isinstance(response, txdbus.ErrorMessage): raise DbusMethodCallError(method, response) args = tuple(response.body) if response.body else () return args
Call a D-BUS method and wait for its reply. This method calls the D-BUS method with name *method* that resides on the object at bus address *service*, at path *path*, on interface *interface*. The *signature* and *args* are optional arguments that can be used to add parameters to the method call. The signature is a D-BUS signature string, while *args* must be a sequence of python types that can be converted into the types specified by the signature. See the `D-BUS specification <http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a reference on signature strings. The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and NO_AUTO_START flags on the D-BUS message. The return value is the result of the D-BUS method call. This will be a possibly empty sequence of values.
def mark_locations(h,section,locs,markspec='or',**kwargs): """ Marks one or more locations on along a section. Could be used to mark the location of a recording or electrical stimulation. Args: h = hocObject to interface with neuron section = reference to section locs = float between 0 and 1, or array of floats optional arguments specify details of marker Returns: line = reference to plotted markers """ # get list of cartesian coordinates specifying section path xyz = get_section_path(h,section) (r,theta,phi) = sequential_spherical(xyz) rcum = np.append(0,np.cumsum(r)) # convert locs into lengths from the beginning of the path if type(locs) is float or type(locs) is np.float64: locs = np.array([locs]) if type(locs) is list: locs = np.array(locs) lengths = locs*rcum[-1] # find cartesian coordinates for markers xyz_marks = [] for targ_length in lengths: xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi)) xyz_marks = np.array(xyz_marks) # plot markers line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \ xyz_marks[:,2], markspec, **kwargs) return line
Marks one or more locations on along a section. Could be used to mark the location of a recording or electrical stimulation. Args: h = hocObject to interface with neuron section = reference to section locs = float between 0 and 1, or array of floats optional arguments specify details of marker Returns: line = reference to plotted markers
def get_trips(self, timestamp, start, via, destination, departure=True, prev_advices=1, next_advices=1): """ Fetch trip possibilities for these parameters http://webservices.ns.nl/ns-api-treinplanner?<parameters> fromStation toStation dateTime: 2012-02-21T15:50 departure: true for starting at timestamp, false for arriving at timestamp previousAdvices nextAdvices """ timezonestring = '+0100' if is_dst('Europe/Amsterdam'): timezonestring = '+0200' url = 'http://webservices.ns.nl/ns-api-treinplanner?' url = url + 'fromStation=' + start url = url + '&toStation=' + destination if via: url = url + '&via=' + via if len(timestamp) == 5: # Format of HH:MM - api needs yyyy-mm-ddThh:mm timestamp = time.strftime("%Y-%m-%d") + 'T' + timestamp #requested_time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M") # TODO: DST/normal time requested_time = load_datetime(timestamp + timezonestring, "%Y-%m-%dT%H:%M%z") else: #requested_time = datetime.strptime(timestamp, "%d-%m-%Y %H:%M") requested_time = load_datetime(timestamp + timezonestring, "%d-%m-%Y %H:%M%z") timestamp = datetime.strptime(timestamp, "%d-%m-%Y %H:%M").strftime("%Y-%m-%dT%H:%M") url = url + '&previousAdvices=' + str(prev_advices) url = url + '&nextAdvices=' + str(next_advices) url = url + '&dateTime=' + timestamp raw_trips = self._request('GET', url) return self.parse_trips(raw_trips, requested_time)
Fetch trip possibilities for these parameters http://webservices.ns.nl/ns-api-treinplanner?<parameters> fromStation toStation dateTime: 2012-02-21T15:50 departure: true for starting at timestamp, false for arriving at timestamp previousAdvices nextAdvices
def get_state(self, force_update=False): """ Returns 0 if off and 1 if on. """ if force_update or self._state is None: return int(self.basicevent.GetBinaryState()['BinaryState']) return self._state
Returns 0 if off and 1 if on.
def get_conversation(self, conversation, **kwargs): """ Return single Conversation :calls: `GET /api/v1/conversations/:id \ <https://canvas.instructure.com/doc/api/conversations.html#method.conversations.show>`_ :param conversation: The object or ID of the conversation. :type conversation: :class:`canvasapi.conversation.Conversation` or int :rtype: :class:`canvasapi.conversation.Conversation` """ from canvasapi.conversation import Conversation conversation_id = obj_or_id(conversation, "conversation", (Conversation,)) response = self.__requester.request( 'GET', 'conversations/{}'.format(conversation_id), _kwargs=combine_kwargs(**kwargs) ) return Conversation(self.__requester, response.json())
Return single Conversation :calls: `GET /api/v1/conversations/:id \ <https://canvas.instructure.com/doc/api/conversations.html#method.conversations.show>`_ :param conversation: The object or ID of the conversation. :type conversation: :class:`canvasapi.conversation.Conversation` or int :rtype: :class:`canvasapi.conversation.Conversation`
def reloaded(name, jboss_config, timeout=60, interval=5): ''' Reloads configuration of jboss server. jboss_config: Dict with connection properties (see state description) timeout: Time to wait until jboss is back in running state. Default timeout is 60s. interval: Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than process cleanup time it may easily lead to excessive resource consumption. This step performs the following operations: * Ensures that server is in running or reload-required state (by reading server-state attribute) * Reloads configuration * Waits for server to reload and be in running state Example: .. code-block:: yaml configuration_reloaded: jboss7.reloaded: - jboss_config: {{ pillar['jboss'] }} ''' log.debug(" ======================== STATE: jboss7.reloaded (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} status = __salt__['jboss7.status'](jboss_config) if not status['success'] or status['result'] not in ('running', 'reload-required'): ret['result'] = False ret['comment'] = "Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state." return ret result = __salt__['jboss7.reload'](jboss_config) if result['success'] or \ 'Operation failed: Channel closed' in result['stdout'] or \ 'Communication error: java.util.concurrent.ExecutionException: Operation failed' in result['stdout']: wait_time = 0 status = None while (status is None or not status['success'] or status['result'] != 'running') and wait_time < timeout: time.sleep(interval) wait_time += interval status = __salt__['jboss7.status'](jboss_config) if status['success'] and status['result'] == 'running': ret['result'] = True ret['comment'] = 'Configuration reloaded' ret['changes']['reloaded'] = 'configuration' else: ret['result'] = False ret['comment'] = 'Could not reload the configuration. Timeout ({0} s) exceeded. '.format(timeout) if not status['success']: ret['comment'] = __append_comment('Could not connect to JBoss controller.', ret['comment']) else: ret['comment'] = __append_comment(('Server is in {0} state'.format(status['result'])), ret['comment']) else: ret['result'] = False ret['comment'] = 'Could not reload the configuration, stdout:'+result['stdout'] return ret
Reloads configuration of jboss server. jboss_config: Dict with connection properties (see state description) timeout: Time to wait until jboss is back in running state. Default timeout is 60s. interval: Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than process cleanup time it may easily lead to excessive resource consumption. This step performs the following operations: * Ensures that server is in running or reload-required state (by reading server-state attribute) * Reloads configuration * Waits for server to reload and be in running state Example: .. code-block:: yaml configuration_reloaded: jboss7.reloaded: - jboss_config: {{ pillar['jboss'] }}
def __set_variable_watch(self, tid, address, size, action): """ Used by L{watch_variable} and L{stalk_variable}. @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details. @rtype: L{HardwareBreakpoint} @return: Hardware breakpoint at the requested address. """ # TODO # We should merge the breakpoints instead of overwriting them. # We'll have the same problem as watch_buffer and we'll need to change # the API again. if size == 1: sizeFlag = self.BP_WATCH_BYTE elif size == 2: sizeFlag = self.BP_WATCH_WORD elif size == 4: sizeFlag = self.BP_WATCH_DWORD elif size == 8: sizeFlag = self.BP_WATCH_QWORD else: raise ValueError("Bad size for variable watch: %r" % size) if self.has_hardware_breakpoint(tid, address): warnings.warn( "Hardware breakpoint in thread %d at address %s was overwritten!" \ % (tid, HexDump.address(address, self.system.get_thread(tid).get_bits())), BreakpointWarning) bp = self.get_hardware_breakpoint(tid, address) if bp.get_trigger() != self.BP_BREAK_ON_ACCESS or \ bp.get_watch() != sizeFlag: self.erase_hardware_breakpoint(tid, address) self.define_hardware_breakpoint(tid, address, self.BP_BREAK_ON_ACCESS, sizeFlag, True, action) bp = self.get_hardware_breakpoint(tid, address) else: self.define_hardware_breakpoint(tid, address, self.BP_BREAK_ON_ACCESS, sizeFlag, True, action) bp = self.get_hardware_breakpoint(tid, address) return bp
Used by L{watch_variable} and L{stalk_variable}. @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details. @rtype: L{HardwareBreakpoint} @return: Hardware breakpoint at the requested address.
def _check_exclude(self, val): """ Validate the excluded metrics. Returns the set of excluded params. """ if val is None: exclude = frozenset() elif isinstance(val, str): exclude = frozenset([val.lower()]) else: exclude = frozenset(map(lambda s: s.lower(), val)) if len(exclude - frozenset(METRICS)) > 0: raise YellowbrickValueError( "'{}' is not a valid metric to exclude".format(repr(val)) ) return exclude
Validate the excluded metrics. Returns the set of excluded params.
async def _dump_variant(self, writer, elem, elem_type=None, params=None): """ Dumps variant type to the writer. Supports both wrapped and raw variant. :param writer: :param elem: :param elem_type: :param params: :return: """ if isinstance(elem, VariantType) or elem_type.WRAPS_VALUE: await dump_uint(writer, elem.variant_elem_type.VARIANT_CODE, 1) await self.dump_field( writer, getattr(elem, elem.variant_elem), elem.variant_elem_type ) else: fdef = find_variant_fdef(elem_type, elem) await dump_uint(writer, fdef[1].VARIANT_CODE, 1) await self.dump_field(writer, elem, fdef[1])
Dumps variant type to the writer. Supports both wrapped and raw variant. :param writer: :param elem: :param elem_type: :param params: :return:
def gaussian_polygons(points, n=10): """ Returns an array of approximately `n` `shapely.geometry.Polygon` objects for an array of `shapely.geometry.Point` objects. """ gdf = gpd.GeoDataFrame(data={'cluster_number': classify_clusters(points, n=n)}, geometry=points) polygons = [] for i in range(n): sel_points = gdf[gdf['cluster_number'] == i].geometry polygons.append(shapely.geometry.MultiPoint([(p.x, p.y) for p in sel_points]).convex_hull) polygons = [p for p in polygons if (not isinstance(p, shapely.geometry.Point)) and (not isinstance(p, shapely.geometry.LineString))] return gpd.GeoSeries(polygons)
Returns an array of approximately `n` `shapely.geometry.Polygon` objects for an array of `shapely.geometry.Point` objects.
def ShowInfo(self): """Shows information about available hashers, parsers, plugins, etc.""" self._output_writer.Write( '{0:=^80s}\n'.format(' log2timeline/plaso information ')) plugin_list = self._GetPluginData() for header, data in plugin_list.items(): table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=header) for entry_header, entry_data in sorted(data): table_view.AddRow([entry_header, entry_data]) table_view.Write(self._output_writer)
Shows information about available hashers, parsers, plugins, etc.
def resolveWithMib(self, mibViewController): """Perform MIB variable ID and associated value conversion. Parameters ---------- mibViewController : :py:class:`~pysnmp.smi.view.MibViewController` class instance representing MIB browsing functionality. Returns ------- : :py:class:`~pysnmp.smi.rfc1902.ObjectType` reference to itself Raises ------ SmiError In case of fatal MIB hanling errora Notes ----- Calling this method involves :py:meth:`~pysnmp.smi.rfc1902.ObjectIdentity.resolveWithMib` method invocation. Examples -------- >>> from pysmi.hlapi import varbinds >>> mibViewController = varbinds.AbstractVarBinds.getMibViewController( engine ) >>> objectType = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), 'Linux i386') >>> objectType.resolveWithMib(mibViewController) ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), DisplayString('Linux i386')) >>> str(objectType) 'SNMPv2-MIB::sysDescr."0" = Linux i386' >>> """ if self._state & self.ST_CLEAM: return self self._args[0].resolveWithMib(mibViewController) MibScalar, MibTableColumn = mibViewController.mibBuilder.importSymbols( 'SNMPv2-SMI', 'MibScalar', 'MibTableColumn') if not isinstance(self._args[0].getMibNode(), (MibScalar, MibTableColumn)): if not isinstance(self._args[1], AbstractSimpleAsn1Item): raise SmiError('MIB object %r is not OBJECT-TYPE ' '(MIB not loaded?)' % (self._args[0],)) self._state |= self.ST_CLEAM return self if isinstance(self._args[1], (rfc1905.UnSpecified, rfc1905.NoSuchObject, rfc1905.NoSuchInstance, rfc1905.EndOfMibView)): self._state |= self.ST_CLEAM return self syntax = self._args[0].getMibNode().getSyntax() try: self._args[1] = syntax.clone(self._args[1]) except PyAsn1Error as exc: raise SmiError( 'MIB object %r having type %r failed to cast value ' '%r: %s' % (self._args[0].prettyPrint(), syntax.__class__.__name__, self._args[1], exc)) if rfc1902.ObjectIdentifier().isSuperTypeOf( self._args[1], matchConstraints=False): self._args[1] = ObjectIdentity( self._args[1]).resolveWithMib(mibViewController) self._state |= self.ST_CLEAM debug.logger & debug.FLAG_MIB and debug.logger( 'resolved %r syntax is %r' % (self._args[0], self._args[1])) return self
Perform MIB variable ID and associated value conversion. Parameters ---------- mibViewController : :py:class:`~pysnmp.smi.view.MibViewController` class instance representing MIB browsing functionality. Returns ------- : :py:class:`~pysnmp.smi.rfc1902.ObjectType` reference to itself Raises ------ SmiError In case of fatal MIB hanling errora Notes ----- Calling this method involves :py:meth:`~pysnmp.smi.rfc1902.ObjectIdentity.resolveWithMib` method invocation. Examples -------- >>> from pysmi.hlapi import varbinds >>> mibViewController = varbinds.AbstractVarBinds.getMibViewController( engine ) >>> objectType = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), 'Linux i386') >>> objectType.resolveWithMib(mibViewController) ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), DisplayString('Linux i386')) >>> str(objectType) 'SNMPv2-MIB::sysDescr."0" = Linux i386' >>>
def new_app(self, App, prefix=None, callable=None, **params): """Invoke this method in the :meth:`build` method as many times as the number of :class:`Application` required by this :class:`MultiApp`. :param App: an :class:`Application` class. :param prefix: The prefix to use for the application, the prefix is appended to the application :ref:`config parameters <settings>` and to the application name. Each call to this method must use a different value of for this parameter. It can be ``None``. :param callable: optional callable (function of object) used during initialisation of *App* (the :class:`Application.callable`). :param params: additional key-valued parameters used when creating an instance of *App*. :return: a tuple used by the :meth:`apps` method. """ params.update(self.cfg.params) params.pop('name', None) # remove the name prefix = prefix or '' if not prefix and '' in self._apps: prefix = App.name or App.__name__.lower() if not prefix: name = self.name cfg = App.create_config(params, name=name) else: name = '%s_%s' % (prefix, self.name) cfg = App.create_config(params, prefix=prefix, name=name) # Add the config entry to the multi app config if not available for k in cfg.settings: if k not in self.cfg.settings: self.cfg.settings[k] = cfg.settings[k] return new_app(prefix, (App, name, callable, cfg))
Invoke this method in the :meth:`build` method as many times as the number of :class:`Application` required by this :class:`MultiApp`. :param App: an :class:`Application` class. :param prefix: The prefix to use for the application, the prefix is appended to the application :ref:`config parameters <settings>` and to the application name. Each call to this method must use a different value of for this parameter. It can be ``None``. :param callable: optional callable (function of object) used during initialisation of *App* (the :class:`Application.callable`). :param params: additional key-valued parameters used when creating an instance of *App*. :return: a tuple used by the :meth:`apps` method.
def main(): """Writes out newsfile if significant version bump""" last_known = '0' if os.path.isfile(metafile): with open(metafile) as fh: last_known = fh.read() import mbed_cloud current = mbed_cloud.__version__ # how significant a change in version scheme should trigger a new changelog entry # (api major, api minor, sdk major, sdk minor, sdk patch) sigfigs = 4 current_version = LooseVersion(current).version last_known_version = LooseVersion(last_known).version should_towncrier = current_version[:sigfigs] != last_known_version[:sigfigs] print('%s -- %s :: current vs previous changelog build' % (current, last_known)) if should_towncrier: print('%s >> %s :: running changelog build' % (current, last_known)) subprocess.check_call( ['towncrier', '--yes'], cwd=os.path.join(PROJECT_ROOT, 'docs', 'changelog') ) with open(metafile, 'w') as fh: fh.write(current)
Writes out newsfile if significant version bump
def _remove_unicode_keys(dictobj): """Convert keys from 'unicode' to 'str' type. workaround for <http://bugs.python.org/issue2646> """ if sys.version_info[:2] >= (3, 0): return dictobj assert isinstance(dictobj, dict) newdict = {} for key, value in dictobj.items(): if type(key) is unicode: key = key.encode('utf-8') newdict[key] = value return newdict
Convert keys from 'unicode' to 'str' type. workaround for <http://bugs.python.org/issue2646>
def post_license_request(request): """Submission to create a license acceptance request.""" uuid_ = request.matchdict['uuid'] posted_data = request.json license_url = posted_data.get('license_url') licensors = posted_data.get('licensors', []) with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("""\ SELECT l.url FROM document_controls AS dc LEFT JOIN licenses AS l ON (dc.licenseid = l.licenseid) WHERE uuid = %s::UUID""", (uuid_,)) try: # Check that the license exists existing_license_url = cursor.fetchone()[0] except TypeError: # NoneType if request.has_permission('publish.create-identifier'): cursor.execute("""\ INSERT INTO document_controls (uuid) VALUES (%s)""", (uuid_,)) existing_license_url = None else: raise httpexceptions.HTTPNotFound() if existing_license_url is None and license_url is None: raise httpexceptions.HTTPBadRequest("license_url is required") elif (license_url != existing_license_url or existing_license_url is None): cursor.execute("""\ UPDATE document_controls AS dc SET licenseid = l.licenseid FROM licenses AS l WHERE url = %s and is_valid_for_publication = 't' RETURNING dc.licenseid""", (license_url,)) try: # Check that it is a valid license id cursor.fetchone()[0] except TypeError: # None returned raise httpexceptions.HTTPBadRequest("invalid license_url") upsert_license_requests(cursor, uuid_, licensors) resp = request.response resp.status_int = 202 return resp
Submission to create a license acceptance request.
def begin_data_item_live(self, data_item): """Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive. """ with self.__live_data_items_lock: old_live_count = self.__live_data_items.get(data_item.uuid, 0) self.__live_data_items[data_item.uuid] = old_live_count + 1 if old_live_count == 0: data_item._enter_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.begin_data_item_live(dependent_data_item)
Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive.
def clean_all(self, config_file, region=None, profile_name=None): """ Clean all provisioned artifacts from both the local file and the AWS Greengrass service. :param config_file: config file containing the group to clean :param region: the region in which the group should be cleaned. [default: us-west-2] :param profile_name: the name of the `awscli` profile to use. [default: None] """ logging.info('[begin] Cleaning all provisioned artifacts') config = GroupConfigFile(config_file=config_file) if config.is_fresh() is True: raise ValueError("Config is already clean.") if region is None: region = self._region self._delete_group( config_file, region=region, profile_name=profile_name) self.clean_core(config_file, region=region) self.clean_devices(config_file, region=region) self.clean_file(config_file) logging.info('[end] Cleaned all provisioned artifacts')
Clean all provisioned artifacts from both the local file and the AWS Greengrass service. :param config_file: config file containing the group to clean :param region: the region in which the group should be cleaned. [default: us-west-2] :param profile_name: the name of the `awscli` profile to use. [default: None]
def prep_for_deserialize(model, record, using, init_list=None): # pylint:disable=unused-argument """ Convert a record from SFDC (decoded JSON) to dict(model string, pk, fields) If fixes fields of some types. If names of required fields `init_list `are specified, then only these fields are processed. """ # TODO the parameter 'using' is not currently important. attribs = record.pop('attributes') # NOQA pylint:disable=unused-variable mod = model.__module__.split('.') if hasattr(model._meta, 'app_label'): app_label = getattr(model._meta, 'app_label') elif mod[-1] == 'models': app_label = mod[-2] else: raise ImproperlyConfigured("Can't discover the app_label for %s, you must specify it via model meta options.") if len(record.keys()) == 1 and model._meta.db_table in record: # this is for objects with ManyToManyField and OneToOneField while len(record) == 1: record = list(record.values())[0] if record is None: return None fields = prep_for_deserialize_inner(model, record, init_list=init_list) if init_list and set(init_list).difference(fields).difference([SF_PK]): raise DatabaseError("Not found some expected fields") return dict( model='.'.join([app_label, model.__name__]), pk=record.pop('Id'), fields=fields, )
Convert a record from SFDC (decoded JSON) to dict(model string, pk, fields) If fixes fields of some types. If names of required fields `init_list `are specified, then only these fields are processed.
def annotate_snv(adpter, variant): """Annotate an SNV/INDEL variant Args: adapter(loqusdb.plugin.adapter) variant(cyvcf2.Variant) """ variant_id = get_variant_id(variant) variant_obj = adapter.get_variant(variant={'_id':variant_id}) annotated_variant = annotated_variant(variant, variant_obj) return annotated_variant
Annotate an SNV/INDEL variant Args: adapter(loqusdb.plugin.adapter) variant(cyvcf2.Variant)
def _is_child_wikicode(self, obj, recursive=True): """Return whether the given :class:`.Wikicode` is a descendant.""" def deref(nodes): if isinstance(nodes, _ListProxy): return nodes._parent # pylint: disable=protected-access return nodes target = deref(obj.nodes) if target is deref(self.nodes): return True if recursive: todo = [self] while todo: code = todo.pop() if target is deref(code.nodes): return True for node in code.nodes: todo += list(node.__children__()) return False
Return whether the given :class:`.Wikicode` is a descendant.
def do_commander(self): """! @brief Handle 'commander' subcommand.""" # Flatten commands list then extract primary command and its arguments. if self._args.commands is not None: cmds = [] for cmd in self._args.commands: cmds.append(flatten_args(split_command_line(arg) for arg in cmd)) else: cmds = None # Enter REPL. PyOCDCommander(self._args, cmds).run()
! @brief Handle 'commander' subcommand.
def _match_tags(repex_tags, path_tags): """Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match. """ if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match.
def send_message(self, *args, **kwargs): """See :func:`send_message`""" return send_message(*args, **self._merge_overrides(**kwargs)).run()
See :func:`send_message`
def header(self): ''' Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT. Returns None. ''' self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT) self.config.display.add_custom_header(self.VERBOSE_FORMAT, self.VERBOSE) if type(self.HEADER) == type([]): self.config.display.header(*self.HEADER, file_name=self.current_target_file_name) elif self.HEADER: self.config.display.header(self.HEADER, file_name=self.current_target_file_name)
Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT. Returns None.
def set_window_option(self, option, value): """ Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ self.server._update_windows() if isinstance(value, bool) and value: value = 'on' elif isinstance(value, bool) and not value: value = 'off' cmd = self.cmd( 'set-window-option', '-t%s:%s' % (self.get('session_id'), self.index), # '-t%s' % self.id, option, value, ) if isinstance(cmd.stderr, list) and len(cmd.stderr): handle_option_error(cmd.stderr[0])
Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
def save(self, exclude_scopes: tuple = ('Optimizer',)) -> None: """Save model parameters to self.save_path""" if not hasattr(self, 'sess'): raise RuntimeError('Your TensorFlow model {} must' ' have sess attribute!'.format(self.__class__.__name__)) path = str(self.save_path.resolve()) log.info('[saving model to {}]'.format(path)) var_list = self._get_saveable_variables(exclude_scopes) saver = tf.train.Saver(var_list) saver.save(self.sess, path)
Save model parameters to self.save_path
def gen_colors(img): """Format the output from imagemagick into a list of hex colors.""" magick_command = has_im() for i in range(0, 20, 1): raw_colors = imagemagick(16 + i, img, magick_command) if len(raw_colors) > 16: break elif i == 19: logging.error("Imagemagick couldn't generate a suitable palette.") sys.exit(1) else: logging.warning("Imagemagick couldn't generate a palette.") logging.warning("Trying a larger palette size %s", 16 + i) return [re.search("#.{6}", str(col)).group(0) for col in raw_colors[1:]]
Format the output from imagemagick into a list of hex colors.
def sanity_check_execution_spec(execution_spec): """ Sanity checks a execution_spec dict, used to define execution logic (distributed vs single, shared memories, etc..) and distributed learning behavior of agents/models. Throws an error or warns if mismatches are found. Args: execution_spec (Union[None,dict]): The spec-dict to check (or None). Dict needs to have the following keys: - type: "single", "distributed" - distributed_spec: The distributed_spec dict with the following fields: - cluster_spec: TensorFlow ClusterSpec object (required). - job: The tf-job name. - task_index: integer (required). - protocol: communication protocol (default: none, i.e. 'grpc'). - session_config: dict with options for a TensorFlow ConfigProto object (default: None). Returns: A cleaned-up (in-place) version of the given execution-spec. """ # default spec: single mode def_ = dict(type="single", distributed_spec=None, session_config=None) if execution_spec is None: return def_ assert isinstance(execution_spec, dict), "ERROR: execution-spec needs to be of type dict (but is of type {})!".\ format(type(execution_spec).__name__) type_ = execution_spec.get("type") # TODO: Figure out what exactly we need for options and what types we should support. if type_ == "distributed": def_ = dict(job="ps", task_index=0, cluster_spec={ "ps": ["localhost:22222"], "worker": ["localhost:22223"] }) def_.update(execution_spec.get("distributed_spec", {})) execution_spec["distributed_spec"] = def_ execution_spec["session_config"] = execution_spec.get("session_config") return execution_spec elif type_ == "multi-threaded": return execution_spec elif type_ == "single": return execution_spec if execution_spec.get('num_parallel') != None: assert type(execution_spec['num_parallel']) is int, "ERROR: num_parallel needs to be of type int but is of type {}!".format(type(execution_spec['num_parallel']).__name__) assert execution_spec['num_parallel'] > 0, "ERROR: num_parallel needs to be > 0 but is equal to {}".format(execution_spec['num_parallel']) return execution_spec raise TensorForceError("Unsupported execution type specified ({})!".format(type_))
Sanity checks a execution_spec dict, used to define execution logic (distributed vs single, shared memories, etc..) and distributed learning behavior of agents/models. Throws an error or warns if mismatches are found. Args: execution_spec (Union[None,dict]): The spec-dict to check (or None). Dict needs to have the following keys: - type: "single", "distributed" - distributed_spec: The distributed_spec dict with the following fields: - cluster_spec: TensorFlow ClusterSpec object (required). - job: The tf-job name. - task_index: integer (required). - protocol: communication protocol (default: none, i.e. 'grpc'). - session_config: dict with options for a TensorFlow ConfigProto object (default: None). Returns: A cleaned-up (in-place) version of the given execution-spec.
def within_miles(self, key, point, max_distance, min_distance=None): """ 增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。 :param key: 查询条件字段名 :param point: 查询地理位置 :param max_distance: 最大距离限定(英里) :param min_distance: 最小距离限定(英里) :rtype: Query """ if min_distance is not None: min_distance = min_distance / 3958.8 return self.within_radians(key, point, max_distance / 3958.8, min_distance)
增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。 :param key: 查询条件字段名 :param point: 查询地理位置 :param max_distance: 最大距离限定(英里) :param min_distance: 最小距离限定(英里) :rtype: Query
def get_distributed_seismicity_source_nodes(source): """ Returns list of nodes of attributes common to all distributed seismicity source classes :param source: Seismic source as instance of :class: `openquake.hazardlib.source.area.AreaSource` or :class: `openquake.hazardlib.source.point.PointSource` :returns: List of instances of :class:`openquake.baselib.node.Node` """ source_nodes = [] # parse msr source_nodes.append( Node("magScaleRel", text=source.magnitude_scaling_relationship.__class__.__name__)) # Parse aspect ratio source_nodes.append( Node("ruptAspectRatio", text=source.rupture_aspect_ratio)) # Parse MFD source_nodes.append(obj_to_node(source.mfd)) # Parse nodal plane distribution source_nodes.append( build_nodal_plane_dist(source.nodal_plane_distribution)) # Parse hypocentral depth distribution source_nodes.append( build_hypo_depth_dist(source.hypocenter_distribution)) return source_nodes
Returns list of nodes of attributes common to all distributed seismicity source classes :param source: Seismic source as instance of :class: `openquake.hazardlib.source.area.AreaSource` or :class: `openquake.hazardlib.source.point.PointSource` :returns: List of instances of :class:`openquake.baselib.node.Node`
def print_update(self): """ print some status information in between. """ print("\r\n") now = datetime.datetime.now() print("Update info: (from: %s)" % now.strftime("%c")) current_total_size = self.total_stined_bytes + self.total_new_bytes if self.total_errored_items: print(" * WARNING: %i omitted files!" % self.total_errored_items) print(" * fast backup: %i files" % self.total_fast_backup) print( " * new content saved: %i files (%s %.1f%%)" % ( self.total_new_file_count, human_filesize(self.total_new_bytes), to_percent(self.total_new_bytes, current_total_size), ) ) print( " * stint space via hardlinks: %i files (%s %.1f%%)" % ( self.total_file_link_count, human_filesize(self.total_stined_bytes), to_percent(self.total_stined_bytes, current_total_size), ) ) duration = default_timer() - self.start_time performance = current_total_size / duration / 1024.0 / 1024.0 print(" * present performance: %.1fMB/s\n" % performance)
print some status information in between.
def get_id(self, grp): """ Return a hash of the tuple of indices that specify the group """ thehash = hex(hash(grp)) if ISPY3: # use default encoding to get bytes thehash = thehash.encode() return self.cache.get(grp, hashlib.sha1(thehash).hexdigest())
Return a hash of the tuple of indices that specify the group
def get_versioning_status(self, headers=None): """ Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended. """ response = self.connection.make_request('GET', self.name, query_args='versioning', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: d = {} ver = re.search(self.VersionRE, body) if ver: d['Versioning'] = ver.group(1) mfa = re.search(self.MFADeleteRE, body) if mfa: d['MfaDelete'] = mfa.group(1) return d else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended.
def _co_moving2angle(self, x, y, idex): """ transforms co-moving distances Mpc into angles on the sky (radian) :param x: co-moving distance :param y: co-moving distance :param z_lens: redshift of plane :return: angles on the sky """ T_z = self._T_z_list[idex] #T_z = self._cosmo_bkg.T_xy(0, z_lens) theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y
transforms co-moving distances Mpc into angles on the sky (radian) :param x: co-moving distance :param y: co-moving distance :param z_lens: redshift of plane :return: angles on the sky
def install_builtin (translator, do_unicode): """Install _() and _n() gettext methods into default namespace.""" try: import __builtin__ as builtins except ImportError: # Python 3 import builtins # Python 3 has no ugettext has_unicode = hasattr(translator, 'ugettext') if do_unicode and has_unicode: builtins.__dict__['_'] = translator.ugettext # also install ngettext builtins.__dict__['_n'] = translator.ungettext else: builtins.__dict__['_'] = translator.gettext # also install ngettext builtins.__dict__['_n'] = translator.ngettext
Install _() and _n() gettext methods into default namespace.
async def bluetooth(dev: Device, target, value): """Get or set bluetooth settings.""" if target and value: await dev.set_bluetooth_settings(target, value) print_settings(await dev.get_bluetooth_settings())
Get or set bluetooth settings.
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the KeyWrappingSpecification struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ local_stream = BytearrayStream() if self._wrapping_method: self._wrapping_method.write( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the wrapping method attribute." ) if self._encryption_key_information: self._encryption_key_information.write( local_stream, kmip_version=kmip_version ) if self._mac_signature_key_information: self._mac_signature_key_information.write( local_stream, kmip_version=kmip_version ) if self._attribute_names: for unique_identifier in self._attribute_names: unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._encoding_option: self._encoding_option.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(KeyWrappingSpecification, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the KeyWrappingSpecification struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
def add_external_path(self, path): """ Adds an external path to the combobox if it exists on the file system. If the path is already listed in the combobox, it is removed from its current position and added back at the end. If the maximum number of paths is reached, the oldest external path is removed from the list. """ if not osp.exists(path): return self.removeItem(self.findText(path)) self.addItem(path) self.setItemData(self.count() - 1, path, Qt.ToolTipRole) while self.count() > MAX_PATH_HISTORY + EXTERNAL_PATHS: self.removeItem(EXTERNAL_PATHS)
Adds an external path to the combobox if it exists on the file system. If the path is already listed in the combobox, it is removed from its current position and added back at the end. If the maximum number of paths is reached, the oldest external path is removed from the list.
def difference_of_pandas_dfs(df_self, df_other, col_names=None): """ Returns a dataframe with all of df_other that are not in df_self, when considering the columns specified in col_names :param df_self: pandas Dataframe :param df_other: pandas Dataframe :param col_names: list of column names :return: """ df = pd.concat([df_self, df_other]) df = df.reset_index(drop=True) df_gpby = df.groupby(col_names) idx = [x[0] for x in list(df_gpby.groups.values()) if len(x) == 1] df_sym_diff = df.reindex(idx) df_diff = pd.concat([df_other, df_sym_diff]) df_diff = df_diff.reset_index(drop=True) df_gpby = df_diff.groupby(col_names) idx = [x[0] for x in list(df_gpby.groups.values()) if len(x) == 2] df_diff = df_diff.reindex(idx) return df_diff
Returns a dataframe with all of df_other that are not in df_self, when considering the columns specified in col_names :param df_self: pandas Dataframe :param df_other: pandas Dataframe :param col_names: list of column names :return:
def _format_date(self, obj) -> str: """ Short date format. :param obj: date or datetime or None :return: str """ if obj is None: return '' if isinstance(obj, datetime): obj = obj.date() return date_format(obj, 'SHORT_DATE_FORMAT')
Short date format. :param obj: date or datetime or None :return: str
def _reprJSON(self): """Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object """ return {'__Ci__': (self.id, self.specfile, self.dataProcessingRef, self.precursor, self.product, self.params, self.attrib, self.arrayInfo ) }
Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object
def getAverageBuildDuration(self, package, **kwargs): """ Return a timedelta that Koji considers to be average for this package. Calls "getAverageBuildDuration" XML-RPC. :param package: ``str``, for example "ceph" :returns: deferred that when fired returns a datetime object for the estimated duration, or None if we could find no estimate for this package. """ seconds = yield self.call('getAverageBuildDuration', package, **kwargs) if seconds is None: defer.returnValue(None) defer.returnValue(timedelta(seconds=seconds))
Return a timedelta that Koji considers to be average for this package. Calls "getAverageBuildDuration" XML-RPC. :param package: ``str``, for example "ceph" :returns: deferred that when fired returns a datetime object for the estimated duration, or None if we could find no estimate for this package.
def add_layer_to_canvas(layer, name): """Helper method to add layer to QGIS. :param layer: The layer. :type layer: QgsMapLayer :param name: Layer name. :type name: str """ if qgis_version() >= 21800: layer.setName(name) else: layer.setLayerName(name) QgsProject.instance().addMapLayer(layer, False)
Helper method to add layer to QGIS. :param layer: The layer. :type layer: QgsMapLayer :param name: Layer name. :type name: str
def CROSS(A, B): """A<B then A>B A上穿B B下穿A Arguments: A {[type]} -- [description] B {[type]} -- [description] Returns: [type] -- [description] """ var = np.where(A < B, 1, 0) return (pd.Series(var, index=A.index).diff() < 0).apply(int)
A<B then A>B A上穿B B下穿A Arguments: A {[type]} -- [description] B {[type]} -- [description] Returns: [type] -- [description]
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" n=parent.newChild(None,self.name.upper(),None) if self.type: n.newTextChild(None,"TYPE",self.type) n.newTextChild(None,"CRED",binascii.b2a_base64(self.cred)) return n
Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`
def scale(s, dtype=None): """Non-uniform scaling along the x, y, and z axes Parameters ---------- s : array-like, shape (3,) Scaling in x, y, z. dtype : dtype | None Output type (if None, don't cast). Returns ------- M : ndarray Transformation matrix describing the scaling. """ assert len(s) == 3 return np.array(np.diag(np.concatenate([s, (1.,)])), dtype)
Non-uniform scaling along the x, y, and z axes Parameters ---------- s : array-like, shape (3,) Scaling in x, y, z. dtype : dtype | None Output type (if None, don't cast). Returns ------- M : ndarray Transformation matrix describing the scaling.
def convert(self, request, response, data): """ Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion. """ # Notes are in bark.notes dictionary return self.escape(request.environ.get('bark.notes', {}).get( self.modifier.param, '-'))
Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion.
def u_base(self, theta, phi, lam, q): """Apply U to q.""" return self.append(UBase(theta, phi, lam), [q], [])
Apply U to q.
def build_phenotype(phenotype_id, adapter): """Build a small phenotype object Build a dictionary with phenotype_id and description Args: phenotype_id (str): The phenotype id adapter (scout.adapter.MongoAdapter) Returns: phenotype_obj (dict): dict( phenotype_id = str, feature = str, # description of phenotype ) """ phenotype_obj = {} phenotype = adapter.hpo_term(phenotype_id) if phenotype: phenotype_obj['phenotype_id'] = phenotype['hpo_id'] phenotype_obj['feature'] = phenotype['description'] return phenotype
Build a small phenotype object Build a dictionary with phenotype_id and description Args: phenotype_id (str): The phenotype id adapter (scout.adapter.MongoAdapter) Returns: phenotype_obj (dict): dict( phenotype_id = str, feature = str, # description of phenotype )
def set(self, property_dict): """Attempts to set the given properties of the object. An example of this is setting the nickname of the object:: cdb.set({"nickname": "My new nickname"}) note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly. """ self.metadata = self.db.update(self.path, property_dict).json()
Attempts to set the given properties of the object. An example of this is setting the nickname of the object:: cdb.set({"nickname": "My new nickname"}) note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
def group_pairs(blocks, layout_blocks_list): """Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list """ image_dict={} for block_id in layout_blocks_list: image_seq=blocks[block_id].ec_hdr.image_seq if image_seq not in image_dict: image_dict[image_seq]=[block_id] else: image_dict[image_seq].append(block_id) log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values())) return list(image_dict.values())
Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list
def _adjust_to_origin(arg, origin, unit): """ Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s) """ if origin == 'julian': original = arg j0 = Timestamp(0).to_julian_date() if unit != 'D': raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 except TypeError: raise ValueError("incompatible 'arg' type for given " "'origin'='julian'") # premptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 j_min = Timestamp.min.to_julian_date() - j0 if np.any(arg > j_max) or np.any(arg < j_min): raise tslibs.OutOfBoundsDatetime( "{original} is Out of Bounds for " "origin='julian'".format(original=original)) else: # arg must be numeric if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg))): raise ValueError( "'{arg}' is not compatible with origin='{origin}'; " "it must be numeric with a unit specified ".format( arg=arg, origin=origin)) # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) except tslibs.OutOfBoundsDatetime: raise tslibs.OutOfBoundsDatetime( "origin {origin} is Out of Bounds".format(origin=origin)) except ValueError: raise ValueError("origin {origin} cannot be converted " "to a Timestamp".format(origin=origin)) if offset.tz is not None: raise ValueError( "origin offset {} must be tz-naive".format(offset)) offset -= Timestamp(0) # convert the offset to the unit of the arg # this should be lossless in terms of precision offset = offset // tslibs.Timedelta(1, unit=unit) # scalars & ndarray-like can handle the addition if is_list_like(arg) and not isinstance( arg, (ABCSeries, ABCIndexClass, np.ndarray)): arg = np.asarray(arg) arg = arg + offset return arg
Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s)
def resolved_row(objs, geomatcher): """Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.""" def get_locations(lst): for elem in lst: try: yield elem['location'] except TypeError: yield elem geomatcher['RoW'] = geomatcher.faces.difference( reduce( set.union, [geomatcher[obj] for obj in get_locations(objs)] ) ) yield geomatcher del geomatcher['RoW']
Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.
def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points): """ Handling of quadrature expectations for Markov Gaussians (useful for time series) Fallback method for missing analytic expectations wrt Markov Gaussians Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1} if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the transpose and then transpose the result of the expectation """ num_gauss_hermite_points = 40 if num_gauss_hermite_points is None else num_gauss_hermite_points if obj2 is None: eval_func = lambda x: get_eval_func(obj1, feature1)(x) mu, cov = p.mu[:-1], p.cov[0, :-1] # cross covariances are not needed elif obj1 is None: eval_func = lambda x: get_eval_func(obj2, feature2)(x) mu, cov = p.mu[1:], p.cov[0, 1:] # cross covariances are not needed else: eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(tf.split(x, 2, 1)[0]) * get_eval_func(obj2, feature2, np.s_[:, None, :])(tf.split(x, 2, 1)[1])) mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1) # Nx2D cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2) # NxDx2D cov_bottom = tf.concat((tf.matrix_transpose(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2) cov = tf.concat((cov_top, cov_bottom), 1) # Nx2Dx2D return mvnquad(eval_func, mu, cov, num_gauss_hermite_points)
Handling of quadrature expectations for Markov Gaussians (useful for time series) Fallback method for missing analytic expectations wrt Markov Gaussians Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1} if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the transpose and then transpose the result of the expectation
def ml_acr(tree, character, prediction_method, model, states, avg_br_len, num_nodes, num_tips, freqs=None, sf=None, kappa=None, force_joint=True): """ Calculates ML states on the tree and stores them in the corresponding feature. :param states: numpy array of possible states :param prediction_method: str, MPPA (marginal approximation), MAP (max a posteriori) or JOINT :param tree: ete3.Tree, the tree of interest :param character: str, character for which the ML states are reconstructed :param model: str, evolutionary model, F81 (Felsenstein 81-like), JC (Jukes-Cantor-like) or EFT (estimate from tips) :param avg_br_len: float, average non-zero branch length of the tree. :param freqs: numpy array of predefined frequencies (or None if they are to be estimated) :param sf: float, predefined scaling factor (or None if it is to be estimated) :return: dict, mapping between reconstruction parameters and values """ n = len(states) state2index = dict(zip(states, range(n))) missing_data = 0. observed_frequencies = np.zeros(n, np.float64) for _ in tree: state = getattr(_, character, set()) if state: num_node_states = len(state) for _ in state: observed_frequencies[state2index[_]] += 1. / num_node_states else: missing_data += 1 total_count = observed_frequencies.sum() + missing_data observed_frequencies /= observed_frequencies.sum() missing_data /= total_count logger = logging.getLogger('pastml') logger.debug('Observed frequencies for {}:{}{}.' .format(character, ''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, observed_frequencies[ state2index[state]]) for state in states), '\n\tfraction of missing data:\t{:.3f}'.format( missing_data) if missing_data else '')) if freqs is not None and model not in {F81, HKY}: logging.warning('Some frequencies were specified in the parameter file, ' 'but the selected model ({}) ignores them. ' 'Use F81 (or HKY for nucleotide characters only) ' 'for taking user-specified frequencies into account.'.format(model)) optimise_frequencies = model in {F81, HKY} and freqs is None if JTT == model: frequencies = JTT_FREQUENCIES elif EFT == model: frequencies = observed_frequencies elif model in {F81, HKY} and freqs is not None: frequencies = freqs else: frequencies = np.ones(n, dtype=np.float64) / n initialize_allowed_states(tree, character, states) alter_zero_tip_allowed_states(tree, character) if sf: optimise_sf = False else: sf = 1. / avg_br_len optimise_sf = True if HKY == model: if kappa: optimise_kappa = False else: optimise_kappa = True kappa = 4. else: optimise_kappa = False likelihood = get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, is_marginal=True, model=model) if not optimise_sf and not optimise_frequencies and not optimise_kappa: logger.debug('All the parameters are fixed for {}:{}{}{}{}.' .format(character, ''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[ state2index[state]]) for state in states), '\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch' .format(sf, sf * avg_br_len), '\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '', '\n\tlog likelihood:\t{:.3f}'.format(likelihood))) else: logger.debug('Initial values for {} parameter optimisation:{}{}{}{}.' .format(character, ''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[ state2index[state]]) for state in states), '\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch' .format(sf, sf * avg_br_len), '\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '', '\n\tlog likelihood:\t{:.3f}'.format(likelihood))) if optimise_sf: (_, sf, _), likelihood = optimize_likelihood_params(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, optimise_frequencies=False, optimise_sf=optimise_sf, optimise_kappa=False, avg_br_len=avg_br_len, model=model) if optimise_frequencies or optimise_kappa: logger.debug('Pre-optimised SF for {}:{}{}.' .format(character, '\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch' .format(sf, sf * avg_br_len), '\n\tlog likelihood:\t{:.3f}'.format(likelihood))) if optimise_frequencies or optimise_kappa: (frequencies, sf, kappa), likelihood = \ optimize_likelihood_params(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, optimise_frequencies=optimise_frequencies, optimise_sf=optimise_sf, optimise_kappa=optimise_kappa, avg_br_len=avg_br_len, model=model) logger.debug('Optimised {} values:{}{}{}{}' .format(character, ''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[ state2index[state]]) for state in states) if optimise_frequencies else '', '\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch' .format(sf, sf * avg_br_len), '\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '', '\n\tlog likelihood:\t{:.3f}'.format(likelihood))) result = {LOG_LIKELIHOOD: likelihood, CHARACTER: character, METHOD: prediction_method, MODEL: model, FREQUENCIES: frequencies, SCALING_FACTOR: sf, CHANGES_PER_AVG_BRANCH: sf * avg_br_len, STATES: states, NUM_NODES: num_nodes, NUM_TIPS: num_tips} if HKY == model: result[KAPPA] = kappa results = [] def process_reconstructed_states(method): if method == prediction_method or is_meta_ml(prediction_method): method_character = get_personalized_feature_name(character, method) \ if prediction_method != method else character convert_allowed_states2feature(tree, character, states, method_character) res = result.copy() res[CHARACTER] = method_character res[METHOD] = method results.append(res) def process_restricted_likelihood_and_states(method): alter_zero_tip_allowed_states(tree, character) restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, is_marginal=True, model=model) unalter_zero_tip_allowed_states(tree, character, state2index) note_restricted_likelihood(method, restricted_likelihood) process_reconstructed_states(method) def note_restricted_likelihood(method, restricted_likelihood): logger.debug('Log likelihood for {} after {} state selection:\t{:.3f}' .format(character, method, restricted_likelihood)) result[RESTRICTED_LOG_LIKELIHOOD_FORMAT_STR.format(method)] = restricted_likelihood if prediction_method != MAP: # Calculate joint restricted likelihood restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, is_marginal=False, model=model) note_restricted_likelihood(JOINT, restricted_likelihood) unalter_zero_tip_joint_states(tree, character, state2index) choose_ancestral_states_joint(tree, character, states, frequencies) process_reconstructed_states(JOINT) if is_marginal(prediction_method): initialize_allowed_states(tree, character, states) alter_zero_tip_allowed_states(tree, character) get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, is_marginal=True, model=model) calculate_top_down_likelihood(tree, character, frequencies, sf, kappa=kappa, model=model) unalter_zero_tip_allowed_states(tree, character, state2index) calculate_marginal_likelihoods(tree, character, frequencies) # check_marginal_likelihoods(tree, feature) result[MARGINAL_PROBABILITIES] = convert_likelihoods_to_probabilities(tree, character, states) choose_ancestral_states_map(tree, character, states) process_restricted_likelihood_and_states(MAP) if MPPA == prediction_method or is_meta_ml(prediction_method): if ALL == prediction_method: pars_acr_results = parsimonious_acr(tree, character, MP, states, num_nodes, num_tips) results.extend(pars_acr_results) for pars_acr_res in pars_acr_results: _parsimonious_states2allowed_states(tree, pars_acr_res[CHARACTER], character, state2index) alter_zero_tip_allowed_states(tree, character) restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa, is_marginal=True, model=model) note_restricted_likelihood(pars_acr_res[METHOD], restricted_likelihood) result[NUM_SCENARIOS], result[NUM_UNRESOLVED_NODES], result[NUM_STATES_PER_NODE] = \ choose_ancestral_states_mppa(tree, character, states, force_joint=force_joint) result[NUM_STATES_PER_NODE] /= num_nodes result[PERC_UNRESOLVED] = result[NUM_UNRESOLVED_NODES] * 100 / num_nodes logger.debug('{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' .format(result[NUM_UNRESOLVED_NODES], 's are' if result[NUM_UNRESOLVED_NODES] != 1 else ' is', result[PERC_UNRESOLVED], character, MPPA, result[NUM_STATES_PER_NODE], 's' if result[NUM_STATES_PER_NODE] > 1 else '')) process_restricted_likelihood_and_states(MPPA) return results
Calculates ML states on the tree and stores them in the corresponding feature. :param states: numpy array of possible states :param prediction_method: str, MPPA (marginal approximation), MAP (max a posteriori) or JOINT :param tree: ete3.Tree, the tree of interest :param character: str, character for which the ML states are reconstructed :param model: str, evolutionary model, F81 (Felsenstein 81-like), JC (Jukes-Cantor-like) or EFT (estimate from tips) :param avg_br_len: float, average non-zero branch length of the tree. :param freqs: numpy array of predefined frequencies (or None if they are to be estimated) :param sf: float, predefined scaling factor (or None if it is to be estimated) :return: dict, mapping between reconstruction parameters and values
def configure_root(): """Configure the root logger.""" root_logger = logging.getLogger() # clear any existing handles to streams because we don't want duplicate logs # NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM, which is usually the case(because it is stdout). This is fine because we will be re-creating that handle. Otherwise we might be deleting a handle that won't be re-created, which could result in dropped logs. for hdlr in root_logger.handlers: if isinstance(hdlr, logging.StreamHandler): root_logger.removeHandler(hdlr) # configure the root logger root_logger.setLevel(ROOT_LOG_LEVEL) hdlr = logging.StreamHandler(ROOT_LOG_STREAM) formatter = colorlog.ColoredFormatter( '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s', reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', } ) hdlr.setFormatter(formatter) root_logger.addHandler(hdlr)
Configure the root logger.
def video_l1_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1]) targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) weights = weights_fn(targets) # Shift targets by 0.5 so later just casting to int gives the prediction. # So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5. # Later (in merics or infer) this is cast to int anyway. Also, we have no # loss beyond cutoff = 0.2 as these are already correct predictions. targets = tf.to_float(targets) + 0.5 loss = video_l1_internal_loss(logits, targets, model_hparams) return tf.reduce_sum(loss * weights), tf.reduce_sum(weights)
Compute loss numerator and denominator for one shard of output.
def luminosity_within_ellipse_in_units(self, major_axis : dim.Length, unit_luminosity='eps', kpc_per_arcsec=None, exposure_time=None): """Compute the total luminosity of the galaxy's light profiles, within an ellipse of specified major axis. This is performed via integration of each light profile and is centred, oriented and aligned with each light model's individual geometry. See *light_profiles.luminosity_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts. """ if self.has_light_profile: return sum(map(lambda p: p.luminosity_within_ellipse_in_units(major_axis=major_axis, unit_luminosity=unit_luminosity, kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time), self.light_profiles)) else: return None
Compute the total luminosity of the galaxy's light profiles, within an ellipse of specified major axis. This is performed via integration of each light profile and is centred, oriented and aligned with each light model's individual geometry. See *light_profiles.luminosity_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts.
def _build_epsf_step(self, stars, epsf=None): """ A single iteration of improving an ePSF. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object, optional The initial ePSF model. If not input, then the ePSF will be built from scratch. Returns ------- epsf : `EPSFModel` object The updated ePSF. """ if len(stars) < 1: raise ValueError('stars must contain at least one EPSFStar or ' 'LinkedEPSFStar object.') if epsf is None: # create an initial ePSF (array of zeros) epsf = self._create_initial_epsf(stars) else: # improve the input ePSF epsf = copy.deepcopy(epsf) # compute a 3D stack of 2D residual images residuals = self._resample_residuals(stars, epsf) self._residuals.append(residuals) # compute the sigma-clipped median along the 3D stack with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) warnings.simplefilter('ignore', category=AstropyUserWarning) residuals = self.sigclip(residuals, axis=0, masked=False, return_bounds=False) if HAS_BOTTLENECK: residuals = bottleneck.nanmedian(residuals, axis=0) else: residuals = np.nanmedian(residuals, axis=0) self._residuals_sigclip.append(residuals) # interpolate any missing data (np.nan) mask = ~np.isfinite(residuals) if np.any(mask): residuals = _interpolate_missing_data(residuals, mask, method='cubic') # fill any remaining nans (outer points) with zeros residuals[~np.isfinite(residuals)] = 0. self._residuals_interp.append(residuals) # add the residuals to the previous ePSF image new_epsf = epsf.normalized_data + residuals # smooth the ePSF new_epsf = self._smooth_epsf(new_epsf) # recenter the ePSF new_epsf = self._recenter_epsf(new_epsf, epsf, centroid_func=self.recentering_func, box_size=self.recentering_boxsize, maxiters=self.recentering_maxiters, center_accuracy=1.0e-4) # normalize the ePSF data new_epsf /= np.sum(new_epsf, dtype=np.float64) # return the new ePSF object xcenter = (new_epsf.shape[1] - 1) / 2. ycenter = (new_epsf.shape[0] - 1) / 2. epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter), normalize=False, oversampling=epsf.oversampling) return epsf_new
A single iteration of improving an ePSF. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object, optional The initial ePSF model. If not input, then the ePSF will be built from scratch. Returns ------- epsf : `EPSFModel` object The updated ePSF.
def connect(self, protocol_factory): """ Connect to the C{protocolFactory} to the AMQP broker specified by the URI of this endpoint. @param protocol_factory: An L{AMQFactory} building L{AMQClient} objects. @return: A L{Deferred} that results in an L{AMQClient} upon successful connection otherwise a L{Failure} wrapping L{ConnectError} or L{NoProtocol <twisted.internet.error.NoProtocol>}. """ # XXX Since AMQClient requires these parameters at __init__ time, we # need to override them in the provided factory. protocol_factory.set_vhost(self._vhost) protocol_factory.set_heartbeat(self._heartbeat) description = "tcp:{}:{}:timeout={}".format( self._host, self._port, self._timeout) endpoint = clientFromString(self._reactor, description) deferred = endpoint.connect(protocol_factory) return deferred.addCallback(self._authenticate)
Connect to the C{protocolFactory} to the AMQP broker specified by the URI of this endpoint. @param protocol_factory: An L{AMQFactory} building L{AMQClient} objects. @return: A L{Deferred} that results in an L{AMQClient} upon successful connection otherwise a L{Failure} wrapping L{ConnectError} or L{NoProtocol <twisted.internet.error.NoProtocol>}.
def step_until_intersect(pos, field_line, sign, time, direction=None, step_size_goal=5., field_step_size=None): """Starting at pos, method steps along magnetic unit vector direction towards the supplied field line trace. Determines the distance of closest approach to field line. Routine is used when calculting the mapping of electric fields along magnetic field lines. Voltage remains constant along the field but the distance between field lines does not.This routine may be used to form the last leg when trying to trace out a closed field line loop. Routine will create a high resolution field line trace (.01 km step size) near the location of closest approach to better determine where the intersection occurs. Parameters ---------- pos : array-like X, Y, and Z ECEF locations to start from field_line : array-like (:,3) X, Y, and Z ECEF locations of field line trace, produced by the field_line_trace method. sign : int if 1, move along positive unit vector. Negwtive direction for -1. time : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : string ('meridional', 'zonal', or 'aligned') Which unit vector direction to move slong when trying to intersect with supplied field line trace. See step_along_mag_unit_vector method for more. step_size_goal : float step size goal that method will try to match when stepping towards field line. Returns ------- (float, array, float) Total distance taken along vector direction; the position after taking the step [x, y, z] in ECEF; distance of closest approach from input pos towards the input field line trace. """ # work on a copy, probably not needed field_copy = field_line # set a high last minimum distance to ensure first loop does better than this last_min_dist = 2500000. # scalar is the distance along unit vector line that we are taking scalar = 0. # repeat boolean repeat=True # first run boolean first=True # factor is a divisor applied to the remaining distance between point and field line # I slowly take steps towards the field line and I don't want to overshoot # each time my minimum distance increases, I step back, increase factor, reducing # my next step size, then I try again factor = 1 while repeat: # take a total step along magnetic unit vector # try to take steps near user provided step_size_goal unit_steps = np.abs(scalar//step_size_goal) if unit_steps == 0: unit_steps = 1 # print (unit_steps, scalar/unit_steps) pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) # find closest point along field line trace diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) min_idx = np.argmin(diff_mag) if first: # first time in while loop, create some information # make a high resolution field line trace around closest distance # want to take a field step size in each direction # maintain accuracy of high res trace below to be .01 km init = field_copy[min_idx,:] field_copy = full_field_line(init, time, 0., step_size=0.01, max_steps=int(field_step_size/.01), recurse=False) # difference with position diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # find closest one min_idx = np.argmin(diff_mag) # # reduce number of elements we really need to check # field_copy = field_copy[min_idx-100:min_idx+100] # # difference with position # diff = field_copy - pos_step # diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # # find closest one # min_idx = np.argmin(diff_mag) first = False # pull out distance of closest point min_dist = diff_mag[min_idx] # check how the solution is doing # if well, add more distance to the total step and recheck if closer # if worse, step back and try a smaller step if min_dist > last_min_dist: # last step we took made the solution worse if factor > 4: # we've tried enough, stop looping repeat = False # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # calculate latest position pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) else: # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # increase the divisor used to reduce the distance # actually stepped per increment factor = factor + 1. # try a new increment to total distance scalar = scalar + last_min_dist/(2*factor) else: # we did better, move even closer, a fraction of remaining distance # increment scalar, but only by a fraction scalar = scalar + min_dist/(2*factor) # we have a new standard to judge against, set it last_min_dist = min_dist.copy() # return magnitude of step return scalar, pos_step, min_dist
Starting at pos, method steps along magnetic unit vector direction towards the supplied field line trace. Determines the distance of closest approach to field line. Routine is used when calculting the mapping of electric fields along magnetic field lines. Voltage remains constant along the field but the distance between field lines does not.This routine may be used to form the last leg when trying to trace out a closed field line loop. Routine will create a high resolution field line trace (.01 km step size) near the location of closest approach to better determine where the intersection occurs. Parameters ---------- pos : array-like X, Y, and Z ECEF locations to start from field_line : array-like (:,3) X, Y, and Z ECEF locations of field line trace, produced by the field_line_trace method. sign : int if 1, move along positive unit vector. Negwtive direction for -1. time : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : string ('meridional', 'zonal', or 'aligned') Which unit vector direction to move slong when trying to intersect with supplied field line trace. See step_along_mag_unit_vector method for more. step_size_goal : float step size goal that method will try to match when stepping towards field line. Returns ------- (float, array, float) Total distance taken along vector direction; the position after taking the step [x, y, z] in ECEF; distance of closest approach from input pos towards the input field line trace.
def register_metric(self, name, metric, time_bucket_in_sec): """Registers a given metric :param name: name of the metric :param metric: IMetric object to be registered :param time_bucket_in_sec: time interval for update to the metrics manager """ if name in self.metrics_map: raise RuntimeError("Another metric has already been registered with name: %s" % name) Log.debug("Register metric: %s, with interval: %s", name, str(time_bucket_in_sec)) self.metrics_map[name] = metric if time_bucket_in_sec in self.time_bucket_in_sec_to_metrics_name: self.time_bucket_in_sec_to_metrics_name[time_bucket_in_sec].append(name) else: self.time_bucket_in_sec_to_metrics_name[time_bucket_in_sec] = [name] self._register_timer_task(time_bucket_in_sec)
Registers a given metric :param name: name of the metric :param metric: IMetric object to be registered :param time_bucket_in_sec: time interval for update to the metrics manager
def find_num_contigs(contig_lengths_dict): """ Count the total number of contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: num_contigs_dict: dictionary of strain name: total number of contigs """ # Initialise the dictionary num_contigs_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Use the len() method to count the number of entries in the list num_contigs_dict[file_name] = len(contig_lengths) return num_contigs_dict
Count the total number of contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: num_contigs_dict: dictionary of strain name: total number of contigs
def prettyMatcherList(things): """Try to construct a nicely-formatted string for a list of matcher objects. Those may be compiled regular expressions or strings...""" norm = [] for x in makeSequence(things): if hasattr(x, 'pattern'): norm.append(x.pattern) else: norm.append(x) return "('%s')" % "', '".join(norm)
Try to construct a nicely-formatted string for a list of matcher objects. Those may be compiled regular expressions or strings...
def has_pubmed(edge_data: EdgeData) -> bool: """Check if the edge has a PubMed citation.""" return CITATION in edge_data and CITATION_TYPE_PUBMED == edge_data[CITATION][CITATION_TYPE]
Check if the edge has a PubMed citation.
def sigint_handler(self, signum: int, frame) -> None: """Signal handler for SIGINTs which typically come from Ctrl-C events. If you need custom SIGINT behavior, then override this function. :param signum: signal number :param frame """ if self.cur_pipe_proc_reader is not None: # Pass the SIGINT to the current pipe process self.cur_pipe_proc_reader.send_sigint() # Check if we are allowed to re-raise the KeyboardInterrupt if not self.sigint_protection: raise KeyboardInterrupt("Got a keyboard interrupt")
Signal handler for SIGINTs which typically come from Ctrl-C events. If you need custom SIGINT behavior, then override this function. :param signum: signal number :param frame
def __encode_items(self, items): """Encodes the InvoiceItems into a JSON serializable format items = [('item_1',InvoiceItem(name='VIP Ticket', quantity=2, unit_price='3500', total_price='7000', description='VIP Tickets for party')),...] """ xs = [item._asdict() for (_key, item) in items.items()] return list(map(lambda x: dict(zip(x.keys(), x.values())), xs))
Encodes the InvoiceItems into a JSON serializable format items = [('item_1',InvoiceItem(name='VIP Ticket', quantity=2, unit_price='3500', total_price='7000', description='VIP Tickets for party')),...]
def hide_arp_holder_arp_entry_mac_address_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop('arp_ip_address') mac_address_value = ET.SubElement(arp_entry, "mac-address-value") mac_address_value.text = kwargs.pop('mac_address_value') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def alexnet(pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r"""AlexNet model from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters. """ net = AlexNet(**kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('alexnet', root=root), ctx=ctx) return net
r"""AlexNet model from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters.
def plot_series(self, xres, varied_data, varied_idx, **kwargs): """ Plots the results from :meth:`solve_series`. Parameters ---------- xres : array Of shape ``(varied_data.size, self.nx)``. varied_data : array See :meth:`solve_series`. varied_idx : int or str See :meth:`solve_series`. \\*\\*kwargs : Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`. """ for attr in 'names latex_names'.split(): if kwargs.get(attr, None) is None: kwargs[attr] = getattr(self, attr) ax = plot_series(xres, varied_data, **kwargs) if self.par_by_name and isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) if self.latex_param_names: ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx]) elif self.param_names: ax.set_xlabel(self.param_names[varied_idx]) return ax
Plots the results from :meth:`solve_series`. Parameters ---------- xres : array Of shape ``(varied_data.size, self.nx)``. varied_data : array See :meth:`solve_series`. varied_idx : int or str See :meth:`solve_series`. \\*\\*kwargs : Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files): """ For theFile (a Node) update any file_tests and search for graphics files then find all included files and call ScanFiles recursively for each of them""" content = theFile.get_text_contents() if Verbose: print(" scanning ",str(theFile)) for i in range(len(file_tests_search)): if file_tests[i][0] is None: if Verbose: print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]) file_tests[i][0] = file_tests_search[i].search(content) if Verbose and file_tests[i][0]: print(" found match for ",file_tests[i][1][-1]) # for newglossary insert the suffixes in file_tests[i] if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary': findresult = file_tests_search[i].findall(content) for l in range(len(findresult)) : (file_tests[i][1]).insert(0,'.'+findresult[l][3]) (file_tests[i][1]).insert(0,'.'+findresult[l][2]) (file_tests[i][1]).insert(0,'.'+findresult[l][0]) suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ] newglossary_suffix.append(suffix_list) if Verbose: print(" new suffixes for newglossary ",newglossary_suffix) incResult = includeOnly_re.search(content) if incResult: aux_files.append(os.path.join(targetdir, incResult.group(1))) if Verbose: print("\include file names : ", aux_files) # recursively call this on each of the included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(theFile),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) if srcNode is not None: file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files) if Verbose: print(" done scanning ",str(theFile)) return file_tests
For theFile (a Node) update any file_tests and search for graphics files then find all included files and call ScanFiles recursively for each of them
def clear(self): """Removes all SSH keys from a user's system.""" r = self._h._http_resource( method='DELETE', resource=('user', 'keys'), ) return r.ok
Removes all SSH keys from a user's system.
def sbar_(self, stack_index=None, label=None, style=None, opts=None, options={}): """ Get a stacked bar chart """ self.opts(dict(stack_index=stack_index, color_index=stack_index)) try: if stack_index is None: self.err(self.sbar_, "Please provide a stack index parameter") options["stack_index"] = stack_index return self._get_chart("bar", style=style, opts=opts, label=label, options=options) except Exception as e: self.err(e, self.sbar_, "Can not draw stacked bar chart")
Get a stacked bar chart
def attached_partition(self): """ :class:`~zhmcclient.Partition`: The partition to which this virtual storage resource is attached. The returned partition object has only a minimal set of properties set ('object-id', 'object-uri', 'class', 'parent'). Note that a virtual storage resource is always attached to a partition, as long as it exists. Authorization requirements: * Object-access permission to the storage group owning this virtual storage resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ if self._attached_partition is None: part_mgr = self.manager.storage_group.manager.cpc.partitions part = part_mgr.resource_object(self.get_property('partition-uri')) self._attached_partition = part return self._attached_partition
:class:`~zhmcclient.Partition`: The partition to which this virtual storage resource is attached. The returned partition object has only a minimal set of properties set ('object-id', 'object-uri', 'class', 'parent'). Note that a virtual storage resource is always attached to a partition, as long as it exists. Authorization requirements: * Object-access permission to the storage group owning this virtual storage resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def prettify_xml(elem): """Return a pretty-printed XML string for the Element. """ from xml.dom import minidom import xml.etree.cElementTree as et rough_string = et.tostring(elem, 'utf-8') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
Return a pretty-printed XML string for the Element.
def validate_unique(self, *args, **kwargs): """Checked whether more than one EighthSignup exists for a User on a given EighthBlock.""" super(EighthSignup, self).validate_unique(*args, **kwargs) if self.has_conflict(): raise ValidationError({NON_FIELD_ERRORS: ("EighthSignup already exists for the User and the EighthScheduledActivity's block",)})
Checked whether more than one EighthSignup exists for a User on a given EighthBlock.
def DEFINE_multi_enum_class( # pylint: disable=invalid-name,redefined-builtin name, default, enum_class, help, flag_values=_flagvalues.FLAGS, module_name=None, **args): """Registers a flag whose value can be a list of enum members. Use the flag on the command line multiple times to place multiple enum values into the list. Args: name: str, the flag name. default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the default value of the flag; see `DEFINE_multi`; only differences are documented here. If the value is a single Enum, it is treated as a single-item list of that Enum value. If it is an iterable, text values within the iterable will be converted to the equivalent Enum objects. enum_class: class, the Enum class with all the possible values for the flag. help: str, the help message. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. module_name: A string, the name of the Python module declaring this flag. If not provided, it will be computed using the stack trace of this call. **args: Dictionary with extra keyword args that are passed to the Flag __init__. """ DEFINE_flag( _flag.MultiEnumClassFlag(name, default, help, enum_class), flag_values, module_name, **args)
Registers a flag whose value can be a list of enum members. Use the flag on the command line multiple times to place multiple enum values into the list. Args: name: str, the flag name. default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the default value of the flag; see `DEFINE_multi`; only differences are documented here. If the value is a single Enum, it is treated as a single-item list of that Enum value. If it is an iterable, text values within the iterable will be converted to the equivalent Enum objects. enum_class: class, the Enum class with all the possible values for the flag. help: str, the help message. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. module_name: A string, the name of the Python module declaring this flag. If not provided, it will be computed using the stack trace of this call. **args: Dictionary with extra keyword args that are passed to the Flag __init__.
def _fix_path(self, path): """ Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str """ if path.endswith('/'): path = path.rstrip('/') return safe_str(path)
Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str
def next_event(self, delete=False): """Go to next event.""" if delete: msg = "Delete this event? This cannot be undone." msgbox = QMessageBox(QMessageBox.Question, 'Delete event', msg) msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msgbox.setDefaultButton(QMessageBox.Yes) response = msgbox.exec_() if response == QMessageBox.No: return event_sel = self.event_sel if event_sel is None: return notes = self.parent.notes if not self.current_event_row: row = notes.find_row(event_sel.marker.x(), event_sel.marker.x() + event_sel.marker.width()) else: row = self.current_event_row same_type = self.action['next_of_same_type'].isChecked() if same_type: target = notes.idx_annot_list.item(row, 2).text() if delete: notes.delete_row() msg = 'Deleted event from {} to {}.'.format(event_sel.marker.x(), event_sel.marker.x() + event_sel.marker.width()) self.parent.statusBar().showMessage(msg) row -= 1 if row + 1 == notes.idx_annot_list.rowCount(): return if not same_type: next_row = row + 1 else: next_row = None types = notes.idx_annot_list.property('name')[row + 1:] for i, ty in enumerate(types): if ty == target: next_row = row + 1 + i break if next_row is None: return self.current_event_row = next_row notes.go_to_marker(next_row, 0, 'annot') notes.idx_annot_list.setCurrentCell(next_row, 0)
Go to next event.
def save(self): """ Saves the settings contents """ content = self.dumps() fileutils.save_text_to_file(content, self.file_path)
Saves the settings contents
def prepend_name_scope(name, import_scope): """Prepends name scope to a name.""" # Based on tensorflow/python/framework/ops.py implementation. if import_scope: try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", tf.compat.as_str_any(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name
Prepends name scope to a name.
def to_model(self): """Return a bravado-core Error instance""" e = ApiPool().current_server_api.model.Error( status=self.status, error=self.code.upper(), error_description=str(self), ) if self.error_id: e.error_id = self.error_id if self.user_message: e.user_message = self.user_message if self.error_caught: e.error_caught = pformat(self.error_caught) return e
Return a bravado-core Error instance