code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def one_version(self, index=0): ''' Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last ''' def prep(df): start = sorted(df._start.tolist())[index] return df[df._start == start] return pd.concat([prep(df) for _, df in self.groupby(self._oid)])
Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last
def items(self): """ A generator yielding ``(key, value)`` attribute pairs, sorted by key name. """ for key in sorted(self.attrs): yield key, self.attrs[key]
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
def expandPath(self, path): """ Follows the path and expand all nodes along the way. Returns (item, index) tuple of the last node in the path (the leaf node). This can be reused e.g. to select it. """ iiPath = self.model().findItemAndIndexPath(path) for (item, index) in iiPath[1:]: # skip invisible root assert index.isValid(), "Sanity check: invalid index in path for item: {}".format(item) self.expand(index) leaf = iiPath[-1] return leaf
Follows the path and expand all nodes along the way. Returns (item, index) tuple of the last node in the path (the leaf node). This can be reused e.g. to select it.
def __init_object(self): """Create a new object for the pool.""" # Check to see if the user created a specific initalization function for this object. if self.init_function is not None: new_obj = self.init_function() self.__enqueue(new_obj) else: raise TypeError("The Pool must have a non None function to fill the pool.")
Create a new object for the pool.
def manage_subscription(): """Shows how to interact with a parameter subscription.""" subscription = processor.create_parameter_subscription([ '/YSS/SIMULATOR/BatteryVoltage1' ]) sleep(5) print('Adding extra items to the existing subscription...') subscription.add([ '/YSS/SIMULATOR/Alpha', '/YSS/SIMULATOR/BatteryVoltage2', 'MDB:OPS Name/SIMULATOR_PrimBusVoltage1', ]) sleep(5) print('Shrinking subscription...') subscription.remove('/YSS/SIMULATOR/Alpha') print('Cancelling the subscription...') subscription.cancel() print('Last values from cache:') print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage1')) print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage2')) print(subscription.get_value('/YSS/SIMULATOR/Alpha')) print(subscription.get_value('MDB:OPS Name/SIMULATOR_PrimBusVoltage1'))
Shows how to interact with a parameter subscription.
def main(): """Entry point for stand-alone execution.""" conf.init(), db.init(conf.DbPath) inqueue = LineQueue(sys.stdin).queue outqueue = type("", (), {"put": lambda self, x: print("\r%s" % x, end=" ")})() if "--quiet" in sys.argv: outqueue = None if conf.MouseEnabled: inqueue.put("mouse_start") if conf.KeyboardEnabled: inqueue.put("keyboard_start") start(inqueue, outqueue)
Entry point for stand-alone execution.
def cycle_complexity(self, cycle='V'): """Cycle complexity of V, W, AMLI, and F(1,1) cycle with simple relaxation. Cycle complexity is an approximate measure of the number of floating point operations (FLOPs) required to perform a single multigrid cycle relative to the cost a single smoothing operation. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- cc : float Defined as F_sum / F_0, where F_sum is the total number of nonzeros in the matrix on all levels encountered during a cycle and F_0 is the number of nonzeros in the matrix on the finest level. Notes ----- This is only a rough estimate of the true cycle complexity. The estimate assumes that the cost of pre and post-smoothing are (each) equal to the number of nonzeros in the matrix on that level. This assumption holds for smoothers like Jacobi and Gauss-Seidel. However, the true cycle complexity of cycle using more expensive methods, like block Gauss-Seidel will be underestimated. Additionally, if the cycle used in practice isn't a (1,1)-cycle, then this cost estimate will be off. """ cycle = str(cycle).upper() nnz = [level.A.nnz for level in self.levels] def V(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + V(level + 1) def W(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + 2 * W(level + 1) def F(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + F(level + 1) + V(level + 1) if cycle == 'V': flops = V(0) elif (cycle == 'W') or (cycle == 'AMLI'): flops = W(0) elif cycle == 'F': flops = F(0) else: raise TypeError('Unrecognized cycle type (%s)' % cycle) return float(flops) / float(nnz[0])
Cycle complexity of V, W, AMLI, and F(1,1) cycle with simple relaxation. Cycle complexity is an approximate measure of the number of floating point operations (FLOPs) required to perform a single multigrid cycle relative to the cost a single smoothing operation. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- cc : float Defined as F_sum / F_0, where F_sum is the total number of nonzeros in the matrix on all levels encountered during a cycle and F_0 is the number of nonzeros in the matrix on the finest level. Notes ----- This is only a rough estimate of the true cycle complexity. The estimate assumes that the cost of pre and post-smoothing are (each) equal to the number of nonzeros in the matrix on that level. This assumption holds for smoothers like Jacobi and Gauss-Seidel. However, the true cycle complexity of cycle using more expensive methods, like block Gauss-Seidel will be underestimated. Additionally, if the cycle used in practice isn't a (1,1)-cycle, then this cost estimate will be off.
def read(self, length=-1): """ Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file """ _complain_ifclosed(self.closed) # NOTE: libhdfs read stops at block boundaries: it is *essential* # to ensure that we actually read the required number of bytes. if length < 0: length = self.size chunks = [] while 1: if length <= 0: break c = self.f.read(min(self.buff_size, length)) if c == b"": break chunks.append(c) length -= len(c) data = b"".join(chunks) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file
def get_normalization_min_max(array, norm_min, norm_max): """Get the minimum and maximum of the normalization of the array, which sets the lower and upper limits of the \ colormap. If norm_min / norm_max are not supplied, the minimum / maximum values of the array of data are used. Parameters ----------- array : data.array.scaled_array.ScaledArray The 2D array of data which is plotted. norm_min : float or None The minimum array value the colormap map spans (all values below this value are plotted the same color). norm_max : float or None The maximum array value the colormap map spans (all values above this value are plotted the same color). """ if norm_min is None: norm_min = array.min() if norm_max is None: norm_max = array.max() return norm_min, norm_max
Get the minimum and maximum of the normalization of the array, which sets the lower and upper limits of the \ colormap. If norm_min / norm_max are not supplied, the minimum / maximum values of the array of data are used. Parameters ----------- array : data.array.scaled_array.ScaledArray The 2D array of data which is plotted. norm_min : float or None The minimum array value the colormap map spans (all values below this value are plotted the same color). norm_max : float or None The maximum array value the colormap map spans (all values above this value are plotted the same color).
def work_request(self, worker_name, md5, subkeys=None): """ Make a work request for an existing stored sample. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample (or sample_set!) subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output of the worker. """ # Pull the worker output work_results = self._recursive_work_resolver(worker_name, md5) # Subkeys (Fixme this is super klutzy) if subkeys: if isinstance(subkeys, str): subkeys = [subkeys] try: sub_results = {} for subkey in subkeys: tmp = work_results[worker_name] # Traverse any subkeys for key in subkey.split('.')[:-1]: tmp = tmp[key] # Last subkey key = subkey.split('.')[-1] if key == '*': for key in tmp.keys(): sub_results[key] = tmp[key] else: sub_results[key] = tmp[key] # Set the output work_results = sub_results except (KeyError, TypeError): raise RuntimeError('Could not get one or more subkeys for: %s' % (work_results)) # Clean it and ship it return self.data_store.clean_for_serialization(work_results)
Make a work request for an existing stored sample. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample (or sample_set!) subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output of the worker.
def find_err_pattern(self, pattern): """ This function will read the standard error of the program and return a matching pattern if found. EG. prog_obj.FindErrPattern("Update of mySQL failed") """ if self.wdir != '': stderr = "%s/%s"%(self.wdir, self.stderr) else: stderr = self.stderr response = [] # First we check if the file we want to print does exists if os.path.exists(stderr): with open_(stderr, 'r') as f: for line in f: if pattern in line: response.append(line.strip()) else: # FILE DOESN'T EXIST debug.log("Error: The stderr file %s does not exist!"%(stderr)) return response
This function will read the standard error of the program and return a matching pattern if found. EG. prog_obj.FindErrPattern("Update of mySQL failed")
def delete_one(self, filter, collation=None): """Delete a single document matching the filter. >>> db.test.count({'x': 1}) 3 >>> result = db.test.delete_one({'x': 1}) >>> result.deleted_count 1 >>> db.test.count({'x': 1}) 2 :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 3.4 Added the `collation` option. .. versionadded:: 3.0 """ with self._socket_for_writes() as sock_info: return DeleteResult(self._delete(sock_info, filter, False, collation=collation), self.write_concern.acknowledged)
Delete a single document matching the filter. >>> db.test.count({'x': 1}) 3 >>> result = db.test.delete_one({'x': 1}) >>> result.deleted_count 1 >>> db.test.count({'x': 1}) 2 :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 3.4 Added the `collation` option. .. versionadded:: 3.0
def set_debug(self, debug): """ Set the debug settings for this node. This should be a modified :class:`~Debug` instance. This will take effect immediately on the specified node. :param Debug debug: debug object with specified settings :raises NodeCommandFailed: fail to communicate with node :return: None .. seealso:: :class:`~Debug` for example usage """ self.make_request( NodeCommandFailed, method='create', resource='send_diagnostic', json=debug.serialize())
Set the debug settings for this node. This should be a modified :class:`~Debug` instance. This will take effect immediately on the specified node. :param Debug debug: debug object with specified settings :raises NodeCommandFailed: fail to communicate with node :return: None .. seealso:: :class:`~Debug` for example usage
async def AddToUnit(self, storages): ''' storages : typing.Sequence[~StorageAddParams] Returns -> typing.Sequence[~AddStorageResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Storage', request='AddToUnit', version=4, params=_params) _params['storages'] = storages reply = await self.rpc(msg) return reply
storages : typing.Sequence[~StorageAddParams] Returns -> typing.Sequence[~AddStorageResult]
def get_all_pattern_variables(self, patternnumber): """Get all variables for a given pattern at one time. Args: patternnumber (integer): 0-7 Returns: A descriptive multiline string. """ _checkPatternNumber(patternnumber) outputstring = '' for stepnumber in range(8): outputstring += 'SP{0}: {1} Time{0}: {2}\n'.format(stepnumber, \ self.get_pattern_step_setpoint( patternnumber, stepnumber), \ self.get_pattern_step_time( patternnumber, stepnumber) ) outputstring += 'Actual step: {0}\n'.format(self.get_pattern_actual_step( patternnumber) ) outputstring += 'Additional cycles: {0}\n'.format(self.get_pattern_additional_cycles( patternnumber) ) outputstring += 'Linked pattern: {0}\n'.format(self.get_pattern_link_topattern( patternnumber) ) return outputstring
Get all variables for a given pattern at one time. Args: patternnumber (integer): 0-7 Returns: A descriptive multiline string.
def parse_code(url): """ Parse the code parameter from the a URL :param str url: URL to parse :return: code query parameter :rtype: str """ result = urlparse(url) query = parse_qs(result.query) return query['code']
Parse the code parameter from the a URL :param str url: URL to parse :return: code query parameter :rtype: str
def list_snapshots(connection, volume): """ List all snapshots for the volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: None """ logger.info( '+----------------' '+----------------------' '+---------------------------+') logger.info( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |'.format( snapshot='Snapshot ID', snapshot_name='Snapshot name', created='Created')) logger.info( '+----------------' '+----------------------' '+---------------------------+') vid = get_volume_id(connection, volume) if vid: vol = connection.get_all_volumes(volume_ids=[vid])[0] for snap in vol.snapshots(): logger.info( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |'.format( snapshot=snap.id, snapshot_name=snap.tags.get('Name', ''), created=snap.start_time)) logger.info( '+----------------' '+----------------------' '+---------------------------+')
List all snapshots for the volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: None
def stream_to_packet(data): """ Chop a stream of data into MODBUS packets. :param data: stream of data :returns: a tuple of the data that is a packet with the remaining data, or ``None`` """ if len(data) < 6: return None # unpack the length pktlen = struct.unpack(">H", data[4:6])[0] + 6 if (len(data) < pktlen): return None return (data[:pktlen], data[pktlen:])
Chop a stream of data into MODBUS packets. :param data: stream of data :returns: a tuple of the data that is a packet with the remaining data, or ``None``
def on_message(self, message): """ When enaml.js sends a message """ #: Decode message change = tornado.escape.json_decode(message) #print change #: Get the owner ID ref = change.get('ref') if not ref: return #: Get the server side representation of the node #: If found will return the View declaration node node = self.view.xpath('//*[@ref="{}"]'.format(ref), first=True) if node is None: return #: Handle the event if change.get('type') and change.get('name'): if change['type'] == 'event': #: Trigger the event trigger = getattr(node, change['name']) trigger() if change['type'] == 'update': #: Trigger the update setattr(node, change['name'], change['value'])
When enaml.js sends a message
def _function_contents(func): """ The signature is as follows (should be byte/chars): < _code_contents (see above) from func.__code__ > ,( comma separated _object_contents for function argument defaults) ,( comma separated _object_contents for any closure contents ) See also: https://docs.python.org/3/reference/datamodel.html - func.__code__ - The code object representing the compiled function body. - func.__defaults__ - A tuple containing default argument values for those arguments that have defaults, or None if no arguments have a default value - func.__closure__ - None or a tuple of cells that contain bindings for the function's free variables. :Returns: Signature contents of a function. (in bytes) """ contents = [_code_contents(func.__code__, func.__doc__)] # The function contents depends on the value of defaults arguments if func.__defaults__: function_defaults_contents = [_object_contents(cc) for cc in func.__defaults__] defaults = bytearray(b',(') defaults.extend(bytearray(b',').join(function_defaults_contents)) defaults.extend(b')') contents.append(defaults) else: contents.append(b',()') # The function contents depends on the closure captured cell values. closure = func.__closure__ or [] try: closure_contents = [_object_contents(x.cell_contents) for x in closure] except AttributeError: closure_contents = [] contents.append(b',(') contents.append(bytearray(b',').join(closure_contents)) contents.append(b')') retval = bytearray(b'').join(contents) return retval
The signature is as follows (should be byte/chars): < _code_contents (see above) from func.__code__ > ,( comma separated _object_contents for function argument defaults) ,( comma separated _object_contents for any closure contents ) See also: https://docs.python.org/3/reference/datamodel.html - func.__code__ - The code object representing the compiled function body. - func.__defaults__ - A tuple containing default argument values for those arguments that have defaults, or None if no arguments have a default value - func.__closure__ - None or a tuple of cells that contain bindings for the function's free variables. :Returns: Signature contents of a function. (in bytes)
def getchar(self): u'''Get next character from queue.''' Cevent = INPUT_RECORD() count = DWORD(0) while 1: status = self.ReadConsoleInputW(self.hin, byref(Cevent), 1, byref(count)) if (status and (count.value == 1) and (Cevent.EventType == 1) and Cevent.Event.KeyEvent.bKeyDown): sym = keysym(Cevent.Event.KeyEvent.wVirtualKeyCode) if len(sym) == 0: sym = Cevent.Event.KeyEvent.uChar.AsciiChar return sym
u'''Get next character from queue.
def add_event(self, event): """ Adds an IEvent event to this command set. :param event: an event instance to be added """ self._events.append(event) self._events_by_name[event.get_name] = event
Adds an IEvent event to this command set. :param event: an event instance to be added
def calc_nearest_point(bus1, network): """ Function that finds the geographical nearest point in a network from a given bus. Parameters ----- bus1: float id of bus network: Pypsa network container network including the comparable buses Returns ------ bus0 : float bus_id of nearest point """ bus1_index = network.buses.index[network.buses.index == bus1] forbidden_buses = np.append( bus1_index.values, network.lines.bus1[ network.lines.bus0 == bus1].values) forbidden_buses = np.append( forbidden_buses, network.lines.bus0[network.lines.bus1 == bus1].values) forbidden_buses = np.append( forbidden_buses, network.links.bus0[network.links.bus1 == bus1].values) forbidden_buses = np.append( forbidden_buses, network.links.bus1[network.links.bus0 == bus1].values) x0 = network.buses.x[network.buses.index.isin(bus1_index)] y0 = network.buses.y[network.buses.index.isin(bus1_index)] comparable_buses = network.buses[~network.buses.index.isin( forbidden_buses)] x1 = comparable_buses.x y1 = comparable_buses.y distance = (x1.values - x0.values)*(x1.values - x0.values) + \ (y1.values - y0.values)*(y1.values - y0.values) min_distance = distance.min() bus0 = comparable_buses[(((x1.values - x0.values)*(x1.values - x0.values ) + (y1.values - y0.values)*(y1.values - y0.values)) == min_distance)] bus0 = bus0.index[bus0.index == bus0.index.max()] bus0 = ''.join(bus0.values) return bus0
Function that finds the geographical nearest point in a network from a given bus. Parameters ----- bus1: float id of bus network: Pypsa network container network including the comparable buses Returns ------ bus0 : float bus_id of nearest point
def coarsegrain(P, n): """ Coarse-grains transition matrix P to n sets using PCCA Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using: ..math: \tilde{P} = M^T P M (M^T M)^{-1} See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_. References ---------- [1] S. Kube and M. Weber A coarse graining method for the identification of transition rates between molecular conformations. J. Chem. Phys. 126, 024103 (2007) [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules J. Chem. Phys. 139, 184114 (2013) """ M = pcca(P, n) # coarse-grained transition matrix W = np.linalg.inv(np.dot(M.T, M)) A = np.dot(np.dot(M.T, P), M) P_coarse = np.dot(W, A) # symmetrize and renormalize to eliminate numerical errors from msmtools.analysis import stationary_distribution pi_coarse = np.dot(M.T, stationary_distribution(P)) X = np.dot(np.diag(pi_coarse), P_coarse) P_coarse = X / X.sum(axis=1)[:, None] return P_coarse
Coarse-grains transition matrix P to n sets using PCCA Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using: ..math: \tilde{P} = M^T P M (M^T M)^{-1} See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_. References ---------- [1] S. Kube and M. Weber A coarse graining method for the identification of transition rates between molecular conformations. J. Chem. Phys. 126, 024103 (2007) [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules J. Chem. Phys. 139, 184114 (2013)
def delete_replication(Bucket, region=None, key=None, keyid=None, profile=None): ''' Delete the replication config from the given bucket Returns {deleted: true} if replication configuration was deleted and returns {deleted: False} if replication configuration was not deleted. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.delete_replication my_bucket ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_bucket_replication(Bucket=Bucket) return {'deleted': True, 'name': Bucket} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
Delete the replication config from the given bucket Returns {deleted: true} if replication configuration was deleted and returns {deleted: False} if replication configuration was not deleted. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.delete_replication my_bucket
def isfinite(data: mx.nd.NDArray) -> mx.nd.NDArray: """Performs an element-wise check to determine if the NDArray contains an infinite element or not. TODO: remove this funciton after upgrade to MXNet 1.4.* in favor of mx.ndarray.contrib.isfinite() """ is_data_not_nan = data == data is_data_not_infinite = data.abs() != np.inf return mx.nd.logical_and(is_data_not_infinite, is_data_not_nan)
Performs an element-wise check to determine if the NDArray contains an infinite element or not. TODO: remove this funciton after upgrade to MXNet 1.4.* in favor of mx.ndarray.contrib.isfinite()
def _socket_readlines(self, blocking=False): """ Generator for complete lines, received from the server """ try: self.sock.setblocking(0) except socket.error as e: self.logger.error("socket error when setblocking(0): %s" % str(e)) raise ConnectionDrop("connection dropped") while True: short_buf = b'' newline = b'\r\n' select.select([self.sock], [], [], None if blocking else 0) try: short_buf = self.sock.recv(4096) # sock.recv returns empty if the connection drops if not short_buf: self.logger.error("socket.recv(): returned empty") raise ConnectionDrop("connection dropped") except socket.error as e: self.logger.error("socket error on recv(): %s" % str(e)) if "Resource temporarily unavailable" in str(e): if not blocking: if len(self.buf) == 0: break self.buf += short_buf while newline in self.buf: line, self.buf = self.buf.split(newline, 1) yield line
Generator for complete lines, received from the server
def modify_parameters(self, modifier_function): """ Make modifications on the parameters of the legislation Call this function in `apply()` if the reform asks for legislation parameter modifications. :param modifier_function: A function that takes an object of type :any:`ParameterNode` and should return an object of the same type. """ baseline_parameters = self.baseline.parameters baseline_parameters_copy = copy.deepcopy(baseline_parameters) reform_parameters = modifier_function(baseline_parameters_copy) if not isinstance(reform_parameters, ParameterNode): return ValueError( 'modifier_function {} in module {} must return a ParameterNode' .format(modifier_function.__name__, modifier_function.__module__,) ) self.parameters = reform_parameters self._parameters_at_instant_cache = {}
Make modifications on the parameters of the legislation Call this function in `apply()` if the reform asks for legislation parameter modifications. :param modifier_function: A function that takes an object of type :any:`ParameterNode` and should return an object of the same type.
def to_dict(self): """ A wrapper for to_dict the makes sure that all the private information as well as extra arguments are included. This method should *not* be used for exporting information about the key. :return: A dictionary representation of the JSON Web key """ res = self.serialize(private=True) res.update(self.extra_args) return res
A wrapper for to_dict the makes sure that all the private information as well as extra arguments are included. This method should *not* be used for exporting information about the key. :return: A dictionary representation of the JSON Web key
def database_to_intermediary(database_uri, schema=None): """ Introspect from the database (given the database_uri) to create the intermediary representation. """ from sqlalchemy.ext.automap import automap_base from sqlalchemy import create_engine Base = automap_base() engine = create_engine(database_uri) if schema is not None: Base.metadata.schema = schema # reflect the tables Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship) return declarative_to_intermediary(Base)
Introspect from the database (given the database_uri) to create the intermediary representation.
def atc(jobid): ''' Print the at(1) script that will run for the passed job id. This is mostly for debugging so the output will just be text. CLI Example: .. code-block:: bash salt '*' at.atc <jobid> ''' # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() output = _cmd('at', '-c', six.text_type(jobid)) if output is None: return '\'at.atc\' is not available.' elif output == '': return {'error': 'invalid job id \'{0}\''.format(jobid)} return output
Print the at(1) script that will run for the passed job id. This is mostly for debugging so the output will just be text. CLI Example: .. code-block:: bash salt '*' at.atc <jobid>
def ssh_authorized_key_exists(public_key, application_name, user=None): """Check if given key is in the authorized_key file. :param public_key: Public key. :type public_key: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str :returns: Whether given key is in the authorized_key file. :rtype: boolean """ with open(authorized_keys(application_name, user)) as keys: return ('%s' % public_key) in keys.read()
Check if given key is in the authorized_key file. :param public_key: Public key. :type public_key: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str :returns: Whether given key is in the authorized_key file. :rtype: boolean
def alphas(shape, alpha_value, name=None): """Creates a tensor with all elements set to `alpha_value`. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to alpha. Parameters ---------- shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`. The shape of the desired tensor alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64` The value used to fill the resulting `Tensor`. name: str A name for the operation (optional). Returns ------- A `Tensor` with all elements set to alpha. Examples -------- >>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]] """ with ops.name_scope(name, "alphas", [shape]) as name: alpha_tensor = convert_to_tensor(alpha_value) alpha_dtype = dtypes.as_dtype(alpha_tensor.dtype).base_dtype if not isinstance(shape, ops.Tensor): try: shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape)) except (TypeError, ValueError): shape = ops.convert_to_tensor(shape, dtype=dtypes.int32) if not shape._shape_tuple(): shape = reshape(shape, [-1]) # Ensure it's a vector try: output = constant(alpha_value, shape=shape, dtype=alpha_dtype, name=name) except (TypeError, ValueError): output = fill(shape, constant(alpha_value, dtype=alpha_dtype), name=name) if output.dtype.base_dtype != alpha_dtype: raise AssertionError("Dtypes do not corresponds: %s and %s" % (output.dtype.base_dtype, alpha_dtype)) return output
Creates a tensor with all elements set to `alpha_value`. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to alpha. Parameters ---------- shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`. The shape of the desired tensor alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64` The value used to fill the resulting `Tensor`. name: str A name for the operation (optional). Returns ------- A `Tensor` with all elements set to alpha. Examples -------- >>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]]
def set_log_level(self): """ Set log level according to command-line options @returns: logger object """ if self.options.debug: self.logger.setLevel(logging.DEBUG) elif self.options.quiet: self.logger.setLevel(logging.ERROR) else: self.logger.setLevel(logging.INFO) self.logger.addHandler(logging.StreamHandler()) return self.logger
Set log level according to command-line options @returns: logger object
def get_next_step(self): """Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None """ if is_raster_layer(self.parent.layer): new_step = self.parent.step_kw_band_selector else: new_step = self.parent.step_kw_layermode return new_step
Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None
def dict_of_sets_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None """ set_objs = dictionary.get(key, set()) set_objs.add(value) dictionary[key] = set_objs
Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None
def start_ray_process(command, process_type, env_updates=None, cwd=None, use_valgrind=False, use_gdb=False, use_valgrind_profiler=False, use_perftools_profiler=False, use_tmux=False, stdout_file=None, stderr_file=None): """Start one of the Ray processes. TODO(rkn): We need to figure out how these commands interact. For example, it may only make sense to start a process in gdb if we also start it in tmux. Similarly, certain combinations probably don't make sense, like simultaneously running the process in valgrind and the profiler. Args: command (List[str]): The command to use to start the Ray process. process_type (str): The type of the process that is being started (e.g., "raylet"). env_updates (dict): A dictionary of additional environment variables to run the command with (in addition to the caller's environment variables). cwd (str): The directory to run the process in. use_valgrind (bool): True if we should start the process in valgrind. use_gdb (bool): True if we should start the process in gdb. use_valgrind_profiler (bool): True if we should start the process in the valgrind profiler. use_perftools_profiler (bool): True if we should profile the process using perftools. use_tmux (bool): True if we should start the process in tmux. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: Information about the process that was started including a handle to the process that was started. """ # Detect which flags are set through environment variables. valgrind_env_var = "RAY_{}_VALGRIND".format(process_type.upper()) if os.environ.get(valgrind_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_env_var) use_valgrind = True valgrind_profiler_env_var = "RAY_{}_VALGRIND_PROFILER".format( process_type.upper()) if os.environ.get(valgrind_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_profiler_env_var) use_valgrind_profiler = True perftools_profiler_env_var = "RAY_{}_PERFTOOLS_PROFILER".format( process_type.upper()) if os.environ.get(perftools_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", perftools_profiler_env_var) use_perftools_profiler = True tmux_env_var = "RAY_{}_TMUX".format(process_type.upper()) if os.environ.get(tmux_env_var) == "1": logger.info("Detected environment variable '%s'.", tmux_env_var) use_tmux = True gdb_env_var = "RAY_{}_GDB".format(process_type.upper()) if os.environ.get(gdb_env_var) == "1": logger.info("Detected environment variable '%s'.", gdb_env_var) use_gdb = True if sum( [use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler ]) > 1: raise ValueError( "At most one of the 'use_gdb', 'use_valgrind', " "'use_valgrind_profiler', and 'use_perftools_profiler' flags can " "be used at a time.") if env_updates is None: env_updates = {} if not isinstance(env_updates, dict): raise ValueError("The 'env_updates' argument must be a dictionary.") modified_env = os.environ.copy() modified_env.update(env_updates) if use_gdb: if not use_tmux: raise ValueError( "If 'use_gdb' is true, then 'use_tmux' must be true as well.") # TODO(suquark): Any better temp file creation here? gdb_init_path = "/tmp/ray/gdb_init_{}_{}".format( process_type, time.time()) ray_process_path = command[0] ray_process_args = command[1:] run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args]) with open(gdb_init_path, "w") as gdb_init_file: gdb_init_file.write("run {}".format(run_args)) command = ["gdb", ray_process_path, "-x", gdb_init_path] if use_valgrind: command = [ "valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--leak-check-heuristics=stdstring", "--error-exitcode=1" ] + command if use_valgrind_profiler: command = ["valgrind", "--tool=callgrind"] + command if use_perftools_profiler: modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"] modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"] if use_tmux: # The command has to be created exactly as below to ensure that it # works on all versions of tmux. (Tested with tmux 1.8-5, travis' # version, and tmux 2.1) command = ["tmux", "new-session", "-d", "{}".format(" ".join(command))] process = subprocess.Popen( command, env=modified_env, cwd=cwd, stdout=stdout_file, stderr=stderr_file) return ProcessInfo( process=process, stdout_file=stdout_file.name if stdout_file is not None else None, stderr_file=stderr_file.name if stderr_file is not None else None, use_valgrind=use_valgrind, use_gdb=use_gdb, use_valgrind_profiler=use_valgrind_profiler, use_perftools_profiler=use_perftools_profiler, use_tmux=use_tmux)
Start one of the Ray processes. TODO(rkn): We need to figure out how these commands interact. For example, it may only make sense to start a process in gdb if we also start it in tmux. Similarly, certain combinations probably don't make sense, like simultaneously running the process in valgrind and the profiler. Args: command (List[str]): The command to use to start the Ray process. process_type (str): The type of the process that is being started (e.g., "raylet"). env_updates (dict): A dictionary of additional environment variables to run the command with (in addition to the caller's environment variables). cwd (str): The directory to run the process in. use_valgrind (bool): True if we should start the process in valgrind. use_gdb (bool): True if we should start the process in gdb. use_valgrind_profiler (bool): True if we should start the process in the valgrind profiler. use_perftools_profiler (bool): True if we should profile the process using perftools. use_tmux (bool): True if we should start the process in tmux. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: Information about the process that was started including a handle to the process that was started.
def BHI(self, params): """ BHI label Branch to the instruction at label if the C flag is set and the Z flag is not set """ label = self.get_one_parameter(self.ONE_PARAMETER, params) self.check_arguments(label_exists=(label,)) # BHI label def BHI_func(): if self.is_C_set() and not self.is_Z_set(): self.register['PC'] = self.labels[label] return BHI_func
BHI label Branch to the instruction at label if the C flag is set and the Z flag is not set
def modify_environment(self, environment_id, **kwargs): ''' modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment ''' request_data = {'id': environment_id} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to modify environment')
modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment
def delete_external_feed_courses(self, course_id, external_feed_id): """ Delete an external feed. Deletes the external feed. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_feed_id """ID""" path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
Delete an external feed. Deletes the external feed.
def static2dplot(var, time): """ If the static option is set in tplot, and is supplied with a time, then the spectrogram plot(s) for which it is set will have another window pop up, with y and z values plotted at the specified time. """ # Grab names of data loaded in as tplot variables. names = list(pytplot.data_quants.keys()) # Get data we'll actually work with here. valid_variables = tplot_utilities.get_data(names) # Don't plot anything unless we have spectrograms with which to work. if valid_variables: # Get z label labels = tplot_utilities.get_labels_axis_types(names) # Put together data in easy-to-access format for plots. data = {} for name in valid_variables: bins = tplot_utilities.get_bins(name) time_values, z_values = tplot_utilities.get_z_t_values(name) data[name] = [bins, z_values, time_values] # Set up the 2D static plot pytplot.static_window = pg.GraphicsWindow() pytplot.static_window.resize(1000, 600) pytplot.static_window.setWindowTitle('Static Window') plot = pytplot.static_window.addPlot(title='2D Static Plot', row=0, col=0) # Make it so that whenever this first starts up, you just have an empty plot plot_data = plot.plot([], []) if var in valid_variables: # Get min/max values of data's time range (in both datetime and seconds since epoch) t_min = np.nanmin(time_values) t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values)) t_min_conv_back = tplot_utilities.str_to_int(t_min_str) t_max = np.nanmax(time_values) t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values)) t_max_conv_back = tplot_utilities.str_to_int(t_max_str) # Convert user input to seconds since epoch user_time = tplot_utilities.str_to_int(time) # Covering situation where user entered a time not in the dataset! # As long as they used a time in the dataset, this will not trigger. if user_time not in range(t_min_conv_back, t_max_conv_back+1): while True: try: user_time = tplot_utilities.str_to_int(input( 'Chosen time not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S). '.format( t_min_str, t_max_str))) except: continue else: if user_time not in range(int(t_min), int(t_max)): continue else: break # Get time closest to the user's time choice time_array = np.array(data[var][2]) array = np.asarray(time_array) idx = (np.abs(array - user_time)).argmin() # If user indicated they wanted the interactive plot's axes to be logged, log 'em. # But first make sure that values in x and y are loggable! x_axis = False y_axis = False # Checking x axis if np.nanmin(data[name][0][:]) < 0: print('Negative data is incompatible with log plotting.') elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log': x_axis = True # Checking y axis if np.nanmin(list(data[name][1][idx])) < 0: print('Negative data is incompatible with log plotting') elif np.nanmin(list(data[name][1][idx])) >= 0 and labels[name][3] == 'log': y_axis = True # Set plot labels plot.setLabel('bottom', '{}'.format(labels[name][0])) plot.setLabel('left', '{}'.format(labels[name][1])) plot.setLogMode(x=x_axis, y=y_axis) # Update x and y range if user modified it tplot_utilities.set_x_range(name, x_axis, plot) tplot_utilities.set_y_range(name, y_axis, plot) # Plot data based on time we're hovering over plot_data.setData(data[name][0][:], list(data[name][1][idx]))
If the static option is set in tplot, and is supplied with a time, then the spectrogram plot(s) for which it is set will have another window pop up, with y and z values plotted at the specified time.
def optimization_at(self, circuit: Circuit, index: int, op: ops.Operation ) -> Optional[PointOptimizationSummary]: """Describes how to change operations near the given location. For example, this method could realize that the given operation is an X gate and that in the very next moment there is a Z gate. It would indicate that they should be combined into a Y gate by returning PointOptimizationSummary(clear_span=2, clear_qubits=op.qubits, new_operations=cirq.Y(op.qubits[0])) Args: circuit: The circuit to improve. index: The index of the moment with the operation to focus on. op: The operation to focus improvements upon. Returns: A description of the optimization to perform, or else None if no change should be made. """
Describes how to change operations near the given location. For example, this method could realize that the given operation is an X gate and that in the very next moment there is a Z gate. It would indicate that they should be combined into a Y gate by returning PointOptimizationSummary(clear_span=2, clear_qubits=op.qubits, new_operations=cirq.Y(op.qubits[0])) Args: circuit: The circuit to improve. index: The index of the moment with the operation to focus on. op: The operation to focus improvements upon. Returns: A description of the optimization to perform, or else None if no change should be made.
async def send_api(container, targetname, name, params = {}): """ Send API and discard the result """ handle = object() apiEvent = ModuleAPICall(handle, targetname, name, params = params) await container.wait_for_send(apiEvent)
Send API and discard the result
def output(self, filename): """ Output the graph in filename Args: filename(string) """ if filename == '': filename = 'contracts.dot' if not filename.endswith('.dot'): filename += ".dot" info = 'Inheritance Graph: ' + filename self.info(info) with open(filename, 'w', encoding='utf8') as f: f.write('digraph "" {\n') for c in self.contracts: f.write(self._summary(c)) f.write('}')
Output the graph in filename Args: filename(string)
def add_edge_end_unused(intersection, duplicates, intersections): """Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end. This is a helper for :func:`~._surface_intersection.add_intersection`. It assumes that * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0`` * A "misclassified" intersection in ``intersections`` that matches ``intersection`` will be the "same" if it matches both ``index_first`` and ``index_second`` and if it matches the start index exactly Args: intersection (.Intersection): An intersection to be added. duplicates (List[.Intersection]): List of duplicate intersections. intersections (List[.Intersection]): List of "accepted" (i.e. non-duplicate) intersections. """ found = None for other in intersections: if ( intersection.index_first == other.index_first and intersection.index_second == other.index_second ): if intersection.s == 0.0 and other.s == 0.0: found = other break if intersection.t == 0.0 and other.t == 0.0: found = other break if found is not None: intersections.remove(found) duplicates.append(found) intersections.append(intersection)
Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end. This is a helper for :func:`~._surface_intersection.add_intersection`. It assumes that * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0`` * A "misclassified" intersection in ``intersections`` that matches ``intersection`` will be the "same" if it matches both ``index_first`` and ``index_second`` and if it matches the start index exactly Args: intersection (.Intersection): An intersection to be added. duplicates (List[.Intersection]): List of duplicate intersections. intersections (List[.Intersection]): List of "accepted" (i.e. non-duplicate) intersections.
def if_running(meth): """Decorator for service methods that must be ran only if service is in running state.""" @wraps(meth) def check_running(self, *args, **kwargs): if not self.running: return return meth(self, *args, **kwargs) return check_running
Decorator for service methods that must be ran only if service is in running state.
def session_id(self): """ Return the session id of the current connection. The session id is issued (through an API request) the first time it is requested, but no sooner. This is because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once issued, the session id will stay the same until the connection is closed. """ if self._session_id is None: req = self.request("POST /4/sessions") self._session_id = req.get("session_key") or req.get("session_id") return CallableString(self._session_id)
Return the session id of the current connection. The session id is issued (through an API request) the first time it is requested, but no sooner. This is because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once issued, the session id will stay the same until the connection is closed.
def get_file_path_validator(default_file_param=None): """ Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename. """ def validator(namespace): if not hasattr(namespace, 'path'): return path = namespace.path dir_name, file_name = os.path.split(path) if path else (None, '') if default_file_param and '.' not in file_name: dir_name = path file_name = os.path.split(getattr(namespace, default_file_param))[1] namespace.directory_name = dir_name namespace.file_name = file_name del namespace.path return validator
Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename.
def qos_map_dscp_cos_mark_dscp_in_values(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") dscp_cos = ET.SubElement(map, "dscp-cos") dscp_cos_map_name_key = ET.SubElement(dscp_cos, "dscp-cos-map-name") dscp_cos_map_name_key.text = kwargs.pop('dscp_cos_map_name') mark = ET.SubElement(dscp_cos, "mark") dscp_in_values = ET.SubElement(mark, "dscp-in-values") dscp_in_values.text = kwargs.pop('dscp_in_values') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def prevention(): """The |Transition| for the prevention example from Actual Causation Figure 5D. """ tpm = np.array([ [0.5, 0.5, 1], [0.5, 0.5, 0], [0.5, 0.5, 1], [0.5, 0.5, 1], [0.5, 0.5, 1], [0.5, 0.5, 0], [0.5, 0.5, 1], [0.5, 0.5, 1] ]) cm = np.array([ [0, 0, 1], [0, 0, 1], [0, 0, 0] ]) network = Network(tpm, cm, node_labels=['A', 'B', 'F']) x_state = (1, 1, 1) y_state = (1, 1, 1) return Transition(network, x_state, y_state, (0, 1), (2,))
The |Transition| for the prevention example from Actual Causation Figure 5D.
def ngrok_url(): """ If ngrok is running, it exposes an API on port 4040. We can use that to figure out what URL it has assigned, and suggest that to the user. https://ngrok.com/docs#list-tunnels """ try: ngrok_resp = requests.get("http://localhost:4040/api/tunnels") except requests.ConnectionError: # I guess ngrok isn't running. return None ngrok_data = ngrok_resp.json() secure_urls = [ tunnel["public_url"] for tunnel in ngrok_data["tunnels"] if tunnel["proto"] == "https" ] return secure_urls[0]
If ngrok is running, it exposes an API on port 4040. We can use that to figure out what URL it has assigned, and suggest that to the user. https://ngrok.com/docs#list-tunnels
def search(self, query, options): """ Perform Bugzilla search """ query["query_format"] = "advanced" log.debug("Search query:") log.debug(pretty(query)) # Fetch bug info try: result = self.server.query(query) except xmlrpclib.Fault as error: # Ignore non-existent users (this is necessary for users with # several email aliases to allow them using --merge/--total) if "not a valid username" in unicode(error): log.debug(error) return [] # Otherwise suggest to bake bugzilla cookies log.error("An error encountered, while searching for bugs.") log.debug(error) raise ReportError( "Have you baked cookies using the 'bugzilla login' command?") log.debug("Search result:") log.debug(pretty(result)) bugs = dict((bug.id, bug) for bug in result) # Fetch bug history log.debug("Fetching bug history") result = self.server._proxy.Bug.history({'ids': bugs.keys()}) log.debug(pretty(result)) history = dict((bug["id"], bug["history"]) for bug in result["bugs"]) # Fetch bug comments log.debug("Fetching bug comments") result = self.server._proxy.Bug.comments({'ids': bugs.keys()}) log.debug(pretty(result)) comments = dict( (int(bug), data["comments"]) for bug, data in result["bugs"].items()) # Create bug objects return [ self.parent.bug( bugs[id], history[id], comments[id], parent=self.parent) for id in bugs]
Perform Bugzilla search
def set_until(self, frame, lineno=None): """Stop on the next line number.""" self.state = Until(frame, frame.f_lineno)
Stop on the next line number.
def set_led_brightness(self, brightness): """Set the LED brightness for the current group/button.""" set_cmd = self._create_set_property_msg("_led_brightness", 0x07, brightness) self._send_method(set_cmd, self._property_set)
Set the LED brightness for the current group/button.
def randomToggle(self, randomize): """Sets the reorder function on this StimulusModel to a randomizer or none, alternately""" if randomize: self._stim.setReorderFunc(order_function('random'), 'random') else: self._stim.reorder = None
Sets the reorder function on this StimulusModel to a randomizer or none, alternately
def fetch(self): """ Fetch a StepInstance :returns: Fetched StepInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.StepInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return StepInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], sid=self._solution['sid'], )
Fetch a StepInstance :returns: Fetched StepInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.StepInstance
def NewFile(self, filename, encoding, options): """parse an XML file from the filesystem or the network. The parsing flags @options are a combination of xmlParserOption. This reuses the existing @reader xmlTextReader. """ ret = libxml2mod.xmlReaderNewFile(self._o, filename, encoding, options) return ret
parse an XML file from the filesystem or the network. The parsing flags @options are a combination of xmlParserOption. This reuses the existing @reader xmlTextReader.
def intern_atom(self, name, only_if_exists = 0): """Intern the string name, returning its atom number. If only_if_exists is true and the atom does not already exist, it will not be created and X.NONE is returned.""" r = request.InternAtom(display = self.display, name = name, only_if_exists = only_if_exists) return r.atom
Intern the string name, returning its atom number. If only_if_exists is true and the atom does not already exist, it will not be created and X.NONE is returned.
def memo_Y(f): """ Memoized Y combinator. .. testsetup:: from proso.func import memo_Y .. testcode:: @memo_Y def fib(f): def inner_fib(n): if n > 1: return f(n - 1) + f(n - 2) else: return n return inner_fib print(fib(100)) .. testoutput:: 354224848179261915075 """ sub = {} def Yf(*args): hashable_args = tuple([repr(x) for x in args]) if args: if hashable_args not in sub: ret = sub[hashable_args] = f(Yf)(*args) else: ret = sub[hashable_args] return ret return f(Yf)() return f(Yf)
Memoized Y combinator. .. testsetup:: from proso.func import memo_Y .. testcode:: @memo_Y def fib(f): def inner_fib(n): if n > 1: return f(n - 1) + f(n - 2) else: return n return inner_fib print(fib(100)) .. testoutput:: 354224848179261915075
def pluralize(word) : """Pluralize an English noun.""" rules = [ ['(?i)(quiz)$' , '\\1zes'], ['^(?i)(ox)$' , '\\1en'], ['(?i)([m|l])ouse$' , '\\1ice'], ['(?i)(matr|vert|ind)ix|ex$' , '\\1ices'], ['(?i)(x|ch|ss|sh)$' , '\\1es'], ['(?i)([^aeiouy]|qu)ies$' , '\\1y'], ['(?i)([^aeiouy]|qu)y$' , '\\1ies'], ['(?i)(hive)$' , '\\1s'], ['(?i)(?:([^f])fe|([lr])f)$' , '\\1\\2ves'], ['(?i)sis$' , 'ses'], ['(?i)([ti])um$' , '\\1a'], ['(?i)(buffal|tomat)o$' , '\\1oes'], ['(?i)(bu)s$' , '\\1ses'], ['(?i)(alias|status)' , '\\1es'], ['(?i)(octop|vir)us$' , '\\1i'], ['(?i)(ax|test)is$' , '\\1es'], ['(?i)s$' , 's'], ['(?i)$' , 's'] ] uncountable_words = ['equipment', 'information', 'rice', 'money', 'species', 'series', 'fish', 'sheep'] irregular_words = { 'person' : 'people', 'man' : 'men', 'child' : 'children', 'sex' : 'sexes', 'move' : 'moves' } lower_cased_word = word.lower(); for uncountable_word in uncountable_words: if lower_cased_word[-1*len(uncountable_word):] == uncountable_word : return word for irregular in irregular_words.keys(): match = re.search('('+irregular+')$',word, re.IGNORECASE) if match: return re.sub('(?i)'+irregular+'$', match.expand('\\1')[0]+irregular_words[irregular][1:], word) for rule in range(len(rules)): match = re.search(rules[rule][0], word, re.IGNORECASE) if match : groups = match.groups() for k in range(0,len(groups)) : if groups[k] == None : rules[rule][1] = rules[rule][1].replace('\\'+str(k+1), '') return re.sub(rules[rule][0], rules[rule][1], word) return word
Pluralize an English noun.
def cli(env, storage_type, size, iops, tier, location, snapshot_size, service_offering, billing): """Order a file storage volume. Valid size and iops options can be found here: https://console.bluemix.net/docs/infrastructure/FileStorage/index.html#provisioning """ file_manager = SoftLayer.FileStorageManager(env.client) storage_type = storage_type.lower() hourly_billing_flag = False if billing.lower() == "hourly": hourly_billing_flag = True if service_offering != 'storage_as_a_service': click.secho('{} is a legacy storage offering'.format(service_offering), fg='red') if hourly_billing_flag: raise exceptions.CLIAbort( 'Hourly billing is only available for the storage_as_a_service service offering' ) if storage_type == 'performance': if iops is None: raise exceptions.CLIAbort('Option --iops required with Performance') if service_offering == 'performance' and snapshot_size is not None: raise exceptions.CLIAbort( '--snapshot-size is not available for performance service offerings. ' 'Use --service-offering storage_as_a_service' ) try: order = file_manager.order_file_volume( storage_type=storage_type, location=location, size=size, iops=iops, snapshot_size=snapshot_size, service_offering=service_offering, hourly_billing_flag=hourly_billing_flag ) except ValueError as ex: raise exceptions.ArgumentError(str(ex)) if storage_type == 'endurance': if tier is None: raise exceptions.CLIAbort( 'Option --tier required with Endurance in IOPS/GB [0.25,2,4,10]' ) try: order = file_manager.order_file_volume( storage_type=storage_type, location=location, size=size, tier_level=float(tier), snapshot_size=snapshot_size, service_offering=service_offering, hourly_billing_flag=hourly_billing_flag ) except ValueError as ex: raise exceptions.ArgumentError(str(ex)) if 'placedOrder' in order.keys(): click.echo("Order #{0} placed successfully!".format( order['placedOrder']['id'])) for item in order['placedOrder']['items']: click.echo(" > %s" % item['description']) else: click.echo("Order could not be placed! Please verify your options and try again.")
Order a file storage volume. Valid size and iops options can be found here: https://console.bluemix.net/docs/infrastructure/FileStorage/index.html#provisioning
def _parse_weights(weight_args, default_weight=0.6): """Parse list of weight assignments.""" weights_dict = {} r_group_weight = default_weight for weight_arg in weight_args: for weight_assignment in weight_arg.split(','): if '=' not in weight_assignment: raise ValueError( 'Invalid weight assignment: {}'.format(weight_assignment)) key, value = weight_assignment.split('=', 1) value = float(value) if key == 'R': r_group_weight = value elif key == '*': default_weight = value elif hasattr(Atom, key): weights_dict[Atom(key)] = value else: raise ValueError('Invalid element: {}'.format(key)) return weights_dict, r_group_weight, default_weight
Parse list of weight assignments.
def ReadCronJobRun(self, job_id, run_id): """Reads a single cron job run from the db.""" for run in itervalues(self.cronjob_runs): if run.cron_job_id == job_id and run.run_id == run_id: return run raise db.UnknownCronJobRunError( "Run with job id %s and run id %s not found." % (job_id, run_id))
Reads a single cron job run from the db.
def search(self, **kwargs): """ Method to search pool's based on extends search. :param search: Dict containing QuerySets to find pool's. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing pool's """ return super(ApiPool, self).get(self.prepare_url('api/v3/pool/', kwargs))
Method to search pool's based on extends search. :param search: Dict containing QuerySets to find pool's. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing pool's
def is_choked_turbulent_g(x, Fgamma, xT=None, xTP=None): r'''Calculates if a gas flow in IEC 60534 calculations is critical or not, for use in IEC 60534 gas valve sizing calculations. Either xT or xTP must be provided, depending on the calculation process. .. math:: x \ge F_\gamma x_T .. math:: x \ge F_\gamma x_{TP} Parameters ---------- x : float Differential pressure over inlet pressure, [-] Fgamma : float Specific heat ratio factor [-] xT : float, optional Pressure difference ratio factor of a valve without fittings at choked flow [-] xTP : float Pressure difference ratio factor of a valve with fittings at choked flow [-] Returns ------- choked : bool Whether or not the flow is choked [-] Examples -------- Example 3, compressible flow, non-choked with attached fittings: >>> is_choked_turbulent_g(0.544, 0.929, 0.6) False >>> is_choked_turbulent_g(0.544, 0.929, xTP=0.625) False References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007 ''' if xT: return x >= Fgamma*xT elif xTP: return x >= Fgamma*xTP else: raise Exception('Either xT or xTP is needed')
r'''Calculates if a gas flow in IEC 60534 calculations is critical or not, for use in IEC 60534 gas valve sizing calculations. Either xT or xTP must be provided, depending on the calculation process. .. math:: x \ge F_\gamma x_T .. math:: x \ge F_\gamma x_{TP} Parameters ---------- x : float Differential pressure over inlet pressure, [-] Fgamma : float Specific heat ratio factor [-] xT : float, optional Pressure difference ratio factor of a valve without fittings at choked flow [-] xTP : float Pressure difference ratio factor of a valve with fittings at choked flow [-] Returns ------- choked : bool Whether or not the flow is choked [-] Examples -------- Example 3, compressible flow, non-choked with attached fittings: >>> is_choked_turbulent_g(0.544, 0.929, 0.6) False >>> is_choked_turbulent_g(0.544, 0.929, xTP=0.625) False References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007
def get_template_names(self): """ datagrid的默认模板 """ names = super(EasyUIDeleteView, self).get_template_names() names.append('easyui/confirm_delete.html') return names
datagrid的默认模板
def draw(self, mode='triangles', indices=None, check_error=True): """ Draw the attribute arrays in the specified mode. Parameters ---------- mode : str | GL_ENUM 'points', 'lines', 'line_strip', 'line_loop', 'triangles', 'triangle_strip', or 'triangle_fan'. indices : array Array of indices to draw. check_error: Check error after draw. """ # Invalidate buffer (data has already been sent) self._buffer = None # Check if mode is valid mode = check_enum(mode) if mode not in ['points', 'lines', 'line_strip', 'line_loop', 'triangles', 'triangle_strip', 'triangle_fan']: raise ValueError('Invalid draw mode: %r' % mode) # Check leftover variables, warn, discard them # In GLIR we check whether all attributes are indeed set for name in self._pending_variables: logger.warn('Variable %r is given but not known.' % name) self._pending_variables = {} # Check attribute sizes attributes = [vbo for vbo in self._user_variables.values() if isinstance(vbo, DataBuffer)] sizes = [a.size for a in attributes] if len(attributes) < 1: raise RuntimeError('Must have at least one attribute') if not all(s == sizes[0] for s in sizes[1:]): msg = '\n'.join(['%s: %s' % (str(a), a.size) for a in attributes]) raise RuntimeError('All attributes must have the same size, got:\n' '%s' % msg) # Get the glir queue that we need now canvas = get_current_canvas() assert canvas is not None # Associate canvas canvas.context.glir.associate(self.glir) # Indexbuffer if isinstance(indices, IndexBuffer): canvas.context.glir.associate(indices.glir) logger.debug("Program drawing %r with index buffer" % mode) gltypes = {np.dtype(np.uint8): 'UNSIGNED_BYTE', np.dtype(np.uint16): 'UNSIGNED_SHORT', np.dtype(np.uint32): 'UNSIGNED_INT'} selection = indices.id, gltypes[indices.dtype], indices.size canvas.context.glir.command('DRAW', self._id, mode, selection) elif indices is None: selection = 0, attributes[0].size logger.debug("Program drawing %r with %r" % (mode, selection)) canvas.context.glir.command('DRAW', self._id, mode, selection) else: raise TypeError("Invalid index: %r (must be IndexBuffer)" % indices) # Process GLIR commands canvas.context.flush_commands()
Draw the attribute arrays in the specified mode. Parameters ---------- mode : str | GL_ENUM 'points', 'lines', 'line_strip', 'line_loop', 'triangles', 'triangle_strip', or 'triangle_fan'. indices : array Array of indices to draw. check_error: Check error after draw.
def cdr(ol,**kwargs): ''' from elist.elist import * ol=[1,2,3,4] id(ol) new = cdr(ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cdr(ol,mode="original") rslt id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" if(mode == "new"): cpol = copy.deepcopy(ol) return(cpol[1:]) else: ol.pop(0) return(ol)
from elist.elist import * ol=[1,2,3,4] id(ol) new = cdr(ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cdr(ol,mode="original") rslt id(rslt)
def get_encapsulated_payload_class(self): """ get the class that holds the encapsulated payload of the TZSP packet :return: class representing the payload, Raw() on error """ try: return TZSP.ENCAPSULATED_PROTOCOL_CLASSES[self.encapsulated_protocol] # noqa: E501 except KeyError: warning( 'unknown or invalid encapsulation type (%i) - returning payload as raw()' % self.encapsulated_protocol) # noqa: E501 return Raw
get the class that holds the encapsulated payload of the TZSP packet :return: class representing the payload, Raw() on error
def create_wordpress(self, service_id, version_number, name, path, comment=None): """Create a wordpress for the specified service and version.""" body = self._formdata({ "name": name, "path": path, "comment": comment, }, FastlyWordpress.FIELDS) content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body) return FastlyWordpress(self, content)
Create a wordpress for the specified service and version.
def get_unique_ajps( benchmark_runs ): """ Determines which join parameters are unique """ br_ajps = {} for br in benchmark_runs: for ajp in br.additional_join_parameters: if ajp not in br_ajps: br_ajps[ajp] = set() br_ajps[ajp].add( br.additional_join_parameters[ajp]['short_name'] ) unique_ajps = [] for ajp in br_ajps: if len( br_ajps[ajp] ) > 1: unique_ajps.append( ajp ) return unique_ajps
Determines which join parameters are unique
def roundClosestValid(val, res, decimals=None): """ round to closest resolution """ if decimals is None and "." in str(res): decimals = len(str(res).split('.')[1]) return round(round(val / res) * res, decimals)
round to closest resolution
def revoke_cert( ca_name, CN, cacert_path=None, ca_filename=None, cert_path=None, cert_filename=None, crl_file=None, digest='sha256', ): ''' Revoke a certificate. .. versionadded:: 2015.8.0 ca_name Name of the CA. CN Common name matching the certificate signing request. cacert_path Absolute path to ca certificates root directory. ca_filename Alternative filename for the CA. cert_path Path to the cert file. cert_filename Alternative filename for the certificate, useful when using special characters in the CN. crl_file Full path to the CRL file. digest The message digest algorithm. Must be a string describing a digest algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically). For example, "md5" or "sha1". Default: 'sha256' CLI Example: .. code-block:: bash salt '*' tls.revoke_cert ca_name='koji' \ ca_filename='ca' \ crl_file='/etc/openvpn/team1/crl.pem' ''' set_ca_path(cacert_path) ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name) if ca_filename is None: ca_filename = '{0}_ca_cert'.format(ca_name) if cert_path is None: cert_path = '{0}/{1}/certs'.format(_cert_base_path(), ca_name) if cert_filename is None: cert_filename = '{0}'.format(CN) try: with salt.utils.files.fopen('{0}/{1}/{2}.crt'.format( cert_base_path(), ca_name, ca_filename)) as fp_: ca_cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, fp_.read() ) with salt.utils.files.fopen('{0}/{1}/{2}.key'.format( cert_base_path(), ca_name, ca_filename)) as fp_: ca_key = OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, fp_.read() ) except IOError: return 'There is no CA named "{0}"'.format(ca_name) client_cert = _read_cert('{0}/{1}.crt'.format(cert_path, cert_filename)) if client_cert is None: return 'There is no client certificate named "{0}"'.format(CN) index_file, expire_date, serial_number, subject = _get_basic_info( ca_name, client_cert, ca_dir) index_serial_subject = '{0}\tunknown\t{1}'.format( serial_number, subject) index_v_data = 'V\t{0}\t\t{1}'.format( expire_date, index_serial_subject) index_r_data_pattern = re.compile( r"R\t" + expire_date + r"\t\d{12}Z\t" + re.escape(index_serial_subject)) index_r_data = 'R\t{0}\t{1}\t{2}'.format( expire_date, _four_digit_year_to_two_digit(datetime.utcnow()), index_serial_subject) ret = {} with salt.utils.files.fopen(index_file) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if index_r_data_pattern.match(line): revoke_date = line.split('\t')[2] try: datetime.strptime(revoke_date, two_digit_year_fmt) return ('"{0}/{1}.crt" was already revoked, ' 'serial number: {2}').format( cert_path, cert_filename, serial_number ) except ValueError: ret['retcode'] = 1 ret['comment'] = ("Revocation date '{0}' does not match" "format '{1}'".format( revoke_date, two_digit_year_fmt)) return ret elif index_serial_subject in line: __salt__['file.replace']( index_file, index_v_data, index_r_data, backup=False) break crl = OpenSSL.crypto.CRL() with salt.utils.files.fopen(index_file) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('R'): fields = line.split('\t') revoked = OpenSSL.crypto.Revoked() revoked.set_serial(salt.utils.stringutils.to_bytes(fields[3])) revoke_date_2_digit = datetime.strptime(fields[2], two_digit_year_fmt) revoked.set_rev_date(salt.utils.stringutils.to_bytes( revoke_date_2_digit.strftime(four_digit_year_fmt) )) crl.add_revoked(revoked) crl_text = crl.export(ca_cert, ca_key, digest=salt.utils.stringutils.to_bytes(digest)) if crl_file is None: crl_file = '{0}/{1}/crl.pem'.format( _cert_base_path(), ca_name ) if os.path.isdir(crl_file): ret['retcode'] = 1 ret['comment'] = 'crl_file "{0}" is an existing directory'.format( crl_file) return ret with salt.utils.files.fopen(crl_file, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(crl_text)) return ('Revoked Certificate: "{0}/{1}.crt", ' 'serial number: {2}').format( cert_path, cert_filename, serial_number )
Revoke a certificate. .. versionadded:: 2015.8.0 ca_name Name of the CA. CN Common name matching the certificate signing request. cacert_path Absolute path to ca certificates root directory. ca_filename Alternative filename for the CA. cert_path Path to the cert file. cert_filename Alternative filename for the certificate, useful when using special characters in the CN. crl_file Full path to the CRL file. digest The message digest algorithm. Must be a string describing a digest algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically). For example, "md5" or "sha1". Default: 'sha256' CLI Example: .. code-block:: bash salt '*' tls.revoke_cert ca_name='koji' \ ca_filename='ca' \ crl_file='/etc/openvpn/team1/crl.pem'
def set_save_directory(base, source): """Sets the root save directory for saving screenshots. Screenshots will be saved in subdirectories under this directory by browser window size. """ root = os.path.join(base, source) if not os.path.isdir(root): os.makedirs(root) world.screenshot_root = root
Sets the root save directory for saving screenshots. Screenshots will be saved in subdirectories under this directory by browser window size.
def getDateFields(fc): """ Returns a list of fields that are of type DATE Input: fc - feature class or table path Output: List of date field names as strings """ if arcpyFound == False: raise Exception("ArcPy is required to use this function") return [field.name for field in arcpy.ListFields(fc, field_type="Date")]
Returns a list of fields that are of type DATE Input: fc - feature class or table path Output: List of date field names as strings
def natural_neighbor_point(xp, yp, variable, grid_loc, tri, neighbors, triangle_info): r"""Generate a natural neighbor interpolation of the observations to the given point. This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if the grid point has no natural neighbors. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations yp: (N, ) ndarray y-coordinates of observations variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]) grid_loc: (float, float) Coordinates of the grid point at which to calculate the interpolation. tri: object Delaunay triangulation of the observations. neighbors: (N, ) ndarray Simplex codes of the grid point's natural neighbors. The codes will correspond to codes in the triangulation. triangle_info: dictionary Pre-calculated triangle attributes for quick look ups. Requires items 'cc' (circumcenters) and 'r' (radii) to be associated with each simplex code key from the delaunay triangulation. Returns ------- value: float Interpolated value for the grid location """ edges = geometry.find_local_boundary(tri, neighbors) edge_vertices = [segment[0] for segment in geometry.order_edges(edges)] num_vertices = len(edge_vertices) p1 = edge_vertices[0] p2 = edge_vertices[1] c1 = geometry.circumcenter(grid_loc, tri.points[p1], tri.points[p2]) polygon = [c1] area_list = [] total_area = 0.0 for i in range(num_vertices): p3 = edge_vertices[(i + 2) % num_vertices] try: c2 = geometry.circumcenter(grid_loc, tri.points[p3], tri.points[p2]) polygon.append(c2) for check_tri in neighbors: if p2 in tri.simplices[check_tri]: polygon.append(triangle_info[check_tri]['cc']) pts = [polygon[i] for i in ConvexHull(polygon).vertices] value = variable[(tri.points[p2][0] == xp) & (tri.points[p2][1] == yp)] cur_area = geometry.area(pts) total_area += cur_area area_list.append(cur_area * value[0]) except (ZeroDivisionError, qhull.QhullError) as e: message = ('Error during processing of a grid. ' 'Interpolation will continue but be mindful ' 'of errors in output. ') + str(e) log.warning(message) return np.nan polygon = [c2] p2 = p3 return sum(x / total_area for x in area_list)
r"""Generate a natural neighbor interpolation of the observations to the given point. This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if the grid point has no natural neighbors. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations yp: (N, ) ndarray y-coordinates of observations variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]) grid_loc: (float, float) Coordinates of the grid point at which to calculate the interpolation. tri: object Delaunay triangulation of the observations. neighbors: (N, ) ndarray Simplex codes of the grid point's natural neighbors. The codes will correspond to codes in the triangulation. triangle_info: dictionary Pre-calculated triangle attributes for quick look ups. Requires items 'cc' (circumcenters) and 'r' (radii) to be associated with each simplex code key from the delaunay triangulation. Returns ------- value: float Interpolated value for the grid location
def dumps(obj, **kwargs): ''' Serialize `obj` to a JSON formatted `str`. Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param kwargs: all the arguments that `json.dumps <http://docs.python.org/ 2/library/json.html#json.dumps>`_ accepts. :raises: commentjson.JSONLibraryException :returns str: serialized string. ''' try: return json.dumps(obj, **kwargs) except Exception as e: raise JSONLibraryException(e)
Serialize `obj` to a JSON formatted `str`. Accepts the same arguments as `json` module in stdlib. :param obj: a JSON serializable Python object. :param kwargs: all the arguments that `json.dumps <http://docs.python.org/ 2/library/json.html#json.dumps>`_ accepts. :raises: commentjson.JSONLibraryException :returns str: serialized string.
def insert_column(self, name, data, colnum=None): """ Insert a new column. parameters ---------- name: string The column name data: The data to write into the new column. colnum: int, optional The column number for the new column, zero-offset. Default is to add the new column after the existing ones. Notes ----- This method is used un-modified by ascii tables as well. """ if name in self._colnames: raise ValueError("column '%s' already exists" % name) if IS_PY3 and data.dtype.char == 'U': # fast dtype conversion using an empty array # we could hack at the actual text description, but using # the numpy API is probably safer # this also avoids doing a dtype conversion on every array # element which could b expensive descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr else: descr = data.dtype.descr if len(descr) > 1: raise ValueError("you can only insert a single column, " "requested: %s" % descr) this_descr = descr[0] this_descr = [name, this_descr[1]] if len(data.shape) > 1: this_descr += [data.shape[1:]] this_descr = tuple(this_descr) name, fmt, dims = _npy2fits( this_descr, table_type=self._table_type_str) if dims is not None: dims = [dims] if colnum is None: new_colnum = len(self._info['colinfo']) + 1 else: new_colnum = colnum+1 self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims) self._update_info() self.write_column(name, data)
Insert a new column. parameters ---------- name: string The column name data: The data to write into the new column. colnum: int, optional The column number for the new column, zero-offset. Default is to add the new column after the existing ones. Notes ----- This method is used un-modified by ascii tables as well.
def process_pem(self, data, name): """ PEM processing - splitting further by the type of the records :param data: :param name: :return: """ try: ret = [] data = to_string(data) parts = re.split(r'-----BEGIN', data) if len(parts) == 0: return None if len(parts[0]) == 0: parts.pop(0) crt_arr = ['-----BEGIN' + x for x in parts] for idx, pem_rec in enumerate(crt_arr): pem_rec = pem_rec.strip() if len(pem_rec) == 0: continue if startswith(pem_rec, '-----BEGIN CERTIFICATE REQUEST'): return self.process_pem_csr(pem_rec, name, idx) elif startswith(pem_rec, '-----BEGIN CERTIF'): return self.process_pem_cert(pem_rec, name, idx) elif startswith(pem_rec, '-----BEGIN '): # fallback return self.process_pem_rsakey(pem_rec, name, idx) return ret except Exception as e: logger.debug('Exception processing PEM file %s : %s' % (name, e)) self.trace_logger.log(e) return None
PEM processing - splitting further by the type of the records :param data: :param name: :return:
def __substitute_replace_pairs(self): """ Substitutes all replace pairs in the source of the stored routine. """ self._set_magic_constants() routine_source = [] i = 0 for line in self._routine_source_code_lines: self._replace['__LINE__'] = "'%d'" % (i + 1) for search, replace in self._replace.items(): tmp = re.findall(search, line, re.IGNORECASE) if tmp: line = line.replace(tmp[0], replace) routine_source.append(line) i += 1 self._routine_source_code = "\n".join(routine_source)
Substitutes all replace pairs in the source of the stored routine.
def get_sorted_structure(self, key=None, reverse=False): """ Get a sorted copy of the structure. The parameters have the same meaning as in list.sort. By default, sites are sorted by the electronegativity of the species. Args: key: Specifies a function of one argument that is used to extract a comparison key from each list element: key=str.lower. The default value is None (compare the elements directly). reverse (bool): If set to True, then the list elements are sorted as if each comparison were reversed. """ sites = sorted(self, key=key, reverse=reverse) return self.__class__.from_sites(sites, charge=self._charge)
Get a sorted copy of the structure. The parameters have the same meaning as in list.sort. By default, sites are sorted by the electronegativity of the species. Args: key: Specifies a function of one argument that is used to extract a comparison key from each list element: key=str.lower. The default value is None (compare the elements directly). reverse (bool): If set to True, then the list elements are sorted as if each comparison were reversed.
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken, knowledge_base): """Parse the sysctl output.""" _ = stderr, time_taken, args, knowledge_base # Unused. self.CheckReturn(cmd, return_val) result = rdf_protodict.AttributedDict() # The KeyValueParser generates an ordered dict by default. The sysctl vals # aren't ordering dependent, but there's no need to un-order it. for k, v in iteritems(self.lexer.ParseToOrderedDict(stdout)): key = k.replace(".", "_") if len(v) == 1: v = v[0] result[key] = v return [result]
Parse the sysctl output.
def get_orderbook_ticker(self, **params): """Latest price for a symbol or symbols. https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker :param symbol: :type symbol: str :returns: API response .. code-block:: python { "symbol": "LTCBTC", "bidPrice": "4.00000000", "bidQty": "431.00000000", "askPrice": "4.00000200", "askQty": "9.00000000" } OR .. code-block:: python [ { "symbol": "LTCBTC", "bidPrice": "4.00000000", "bidQty": "431.00000000", "askPrice": "4.00000200", "askQty": "9.00000000" }, { "symbol": "ETHBTC", "bidPrice": "0.07946700", "bidQty": "9.00000000", "askPrice": "100000.00000000", "askQty": "1000.00000000" } ] :raises: BinanceRequestException, BinanceAPIException """ return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
Latest price for a symbol or symbols. https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker :param symbol: :type symbol: str :returns: API response .. code-block:: python { "symbol": "LTCBTC", "bidPrice": "4.00000000", "bidQty": "431.00000000", "askPrice": "4.00000200", "askQty": "9.00000000" } OR .. code-block:: python [ { "symbol": "LTCBTC", "bidPrice": "4.00000000", "bidQty": "431.00000000", "askPrice": "4.00000200", "askQty": "9.00000000" }, { "symbol": "ETHBTC", "bidPrice": "0.07946700", "bidQty": "9.00000000", "askPrice": "100000.00000000", "askQty": "1000.00000000" } ] :raises: BinanceRequestException, BinanceAPIException
def align(expnums, ccd, version='s', dry_run=False): """Create a 'shifts' file that transforms the space/flux/time scale of all images to the first image. This function relies on the .fwhm, .trans.jmp, .phot and .zeropoint.used files for inputs. The scaling we are computing here is for use in planting sources into the image at the same sky/flux locations while accounting for motions of sources with time. :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to, the first frame in the list is the reference. :param ccd: which ccd to work on. :param version: Add sources to the 'o', 'p' or 's' images :param dry_run: don't push results to VOSpace. """ # Get the images and supporting files that we need from the VOSpace area # get_image and get_file check if the image/file is already on disk. # re-computed fluxes from the PSF stars and then recompute x/y/flux scaling. # some dictionaries to hold the various scale pos = {} apcor = {} mags = {} zmag = {} mjdates = {} for expnum in expnums: filename = storage.get_image(expnum, ccd=ccd, version=version) zmag[expnum] = storage.get_zeropoint(expnum, ccd, prefix=None, version=version) mjdates[expnum] = float(fits.open(filename)[0].header.get('MJD-OBS')) apcor[expnum] = [float(x) for x in open(storage.get_file(expnum, ccd=ccd, version=version, ext=storage.APCOR_EXT)).read().split()] keys = ['crval1', 'cd1_1', 'cd1_2', 'crval2', 'cd2_1', 'cd2_2'] # load the .trans.jmp values into a 'wcs' like dictionary. # .trans.jmp maps current frame to reference frame in pixel coordinates. # the reference frame of all the frames supplied must be the same. shifts = dict(zip(keys, [float(x) for x in open(storage.get_file(expnum, ccd=ccd, version=version, ext='trans.jmp')).read().split()])) shifts['crpix1'] = 0.0 shifts['crpix2'] = 0.0 # now create a wcs object based on those transforms, this wcs links the current frame's # pixel coordinates to the reference frame's pixel coordinates. w = get_wcs(shifts) # get the PHOT file that was produced by the mkpsf routine logging.debug("Reading .phot file {}".format(expnum)) phot = ascii.read(storage.get_file(expnum, ccd=ccd, version=version, ext='phot'), format='daophot') # compute the small-aperture magnitudes of the stars used in the PSF import daophot logging.debug("Running phot on {}".format(filename)) mags[expnum] = daophot.phot(filename, phot['XCENTER'], phot['YCENTER'], aperture=apcor[expnum][0], sky=apcor[expnum][1] + 1, swidth=apcor[expnum][0], zmag=zmag[expnum]) # covert the x/y positions to positions in Frame 1 based on the trans.jmp values. logging.debug("Doing the XY translation to refrence frame: {}".format(w)) (x, y) = w.wcs_pix2world(mags[expnum]["XCENTER"], mags[expnum]["YCENTER"], 1) pos[expnum] = numpy.transpose([x, y]) # match this exposures PSF stars position against those in the first image of the set. logging.debug("Matching lists") idx1, idx2 = util.match_lists(pos[expnums[0]], pos[expnum]) # compute the magnitdue offset between the current frame and the reference. dmags = numpy.ma.array(mags[expnums[0]]["MAG"] - apcor[expnums[0]][2] - (mags[expnum]["MAG"][idx1] - apcor[expnum][2]), mask=idx1.mask) dmags.sort() logging.debug("Computed dmags between input and reference: {}".format(dmags)) error_count = 0 error_count += 1 logging.debug("{}".format(error_count)) # compute the median and determine if that shift is small compared to the scatter. try: midx = int(numpy.sum(numpy.any([~dmags.mask], axis=0)) / 2.0) dmag = float(dmags[midx]) logging.debug("Computed a mag delta of: {}".format(dmag)) except Exception as e: logging.error(str(e)) logging.error("Failed to compute mag offset between plant and found using: {}".format(dmags)) dmag = 99.99 error_count += 1 logging.debug("{}".format(error_count)) try: if math.fabs(dmag) > 3 * (dmags.std() + 0.01): logging.warning("Magnitude shift {} between {} and {} is large: {}".format(dmag, expnums[0], expnum, shifts)) except Exception as e: logging.error(str(e)) error_count += 1 logging.debug("{}".format(error_count)) shifts['dmag'] = dmag shifts['emag'] = dmags.std() shifts['nmag'] = len(dmags.mask) - dmags.mask.sum() shifts['dmjd'] = mjdates[expnums[0]] - mjdates[expnum] shift_file = os.path.basename(storage.get_uri(expnum, ccd, version, '.shifts')) error_count += 1 logging.debug("{}".format(error_count)) try: fh = open(shift_file, 'w') fh.write(json.dumps(shifts, sort_keys=True, indent=4, separators=(',', ': '))) fh.write('\n') fh.close() except Exception as e: logging.error("Creation of SHIFTS file failed while trying to write: {}".format(shifts)) raise e error_count += 1 logging.debug("{}".format(error_count)) if not dry_run: storage.copy(shift_file, storage.get_uri(expnum, ccd, version, '.shifts'))
Create a 'shifts' file that transforms the space/flux/time scale of all images to the first image. This function relies on the .fwhm, .trans.jmp, .phot and .zeropoint.used files for inputs. The scaling we are computing here is for use in planting sources into the image at the same sky/flux locations while accounting for motions of sources with time. :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to, the first frame in the list is the reference. :param ccd: which ccd to work on. :param version: Add sources to the 'o', 'p' or 's' images :param dry_run: don't push results to VOSpace.
def mkdummy(name, **attrs): """Make a placeholder object that uses its own name for its repr""" return type( name, (), dict(__repr__=(lambda self: "<%s>" % name), **attrs) )()
Make a placeholder object that uses its own name for its repr
def average(numbers, averagetype='mean'): """ Find the average of a list of numbers :type numbers: list :param numbers: The list of numbers to find the average of. :type averagetype: string :param averagetype: The type of average to find. >>> average([1, 2, 3, 4, 5], 'median') 3 """ try: # Try to get the mean of the numbers statistics.mean(numbers) except RuntimeError: # Raise a warning raise ValueError('Unable to parse the list.') # If the lowercase version of the average type is 'mean' if averagetype.lower() == 'mean': # Return the answer return statistics.mean(numbers) # If the lowercase version of the average type is 'mode' elif averagetype.lower() == 'mode': # Return the answer return statistics.mode(numbers) # If the lowercase version of the average type is 'median' elif averagetype.lower() == 'median': # Return the answer return statistics.median(numbers) # If the lowercase version of the average type is 'min' elif averagetype.lower() == 'min': # Return the answer return min(numbers) # If the lowercase version of the average type is 'max' elif averagetype.lower() == 'max': # Return the answer return max(numbers) # If the lowercase version of the average type is 'range' elif averagetype.lower() == 'range': # Return the answer return max(numbers) - min(numbers) # Raise a warning raise ValueError('Invalid average type provided.')
Find the average of a list of numbers :type numbers: list :param numbers: The list of numbers to find the average of. :type averagetype: string :param averagetype: The type of average to find. >>> average([1, 2, 3, 4, 5], 'median') 3
def add(self, *args): """Add constraints to the model.""" self._constrs.extend(self._moma._prob.add_linear_constraints(*args))
Add constraints to the model.
def number_of_changes(slots, events, original_schedule, X, **kwargs): """ A function that counts the number of changes between a given schedule and an array (either numpy array of lp array). """ changes = 0 original_array = schedule_to_array(original_schedule, events=events, slots=slots) for row, event in enumerate(original_array): for col, slot in enumerate(event): if slot == 0: changes += X[row, col] else: changes += 1 - X[row, col] return changes
A function that counts the number of changes between a given schedule and an array (either numpy array of lp array).
def set_attrs(self): """ set our object attributes """ self.attrs.encoding = self.encoding self.attrs.errors = self.errors
set our object attributes
def _call_java(sc, java_obj, name, *args): """ Method copied from pyspark.ml.wrapper. Uses private Spark APIs. """ m = getattr(java_obj, name) java_args = [_py2java(sc, arg) for arg in args] return _java2py(sc, m(*java_args))
Method copied from pyspark.ml.wrapper. Uses private Spark APIs.
def get_param_doc(doc, param): """Get the documentation and datatype for a parameter This function returns the documentation and the argument for a napoleon like structured docstring `doc` Parameters ---------- doc: str The base docstring to use param: str The argument to use Returns ------- str The documentation of the given `param` str The datatype of the given `param`""" arg_doc = docstrings.keep_params_s(doc, [param]) or \ docstrings.keep_types_s(doc, [param]) dtype = None if arg_doc: lines = arg_doc.splitlines() arg_doc = dedents('\n' + '\n'.join(lines[1:])) param_desc = lines[0].split(':', 1) if len(param_desc) > 1: dtype = param_desc[1].strip() return arg_doc, dtype
Get the documentation and datatype for a parameter This function returns the documentation and the argument for a napoleon like structured docstring `doc` Parameters ---------- doc: str The base docstring to use param: str The argument to use Returns ------- str The documentation of the given `param` str The datatype of the given `param`
def get_vcenter_data_model(self, api, vcenter_name): """ :param api: :param str vcenter_name: :rtype: VMwarevCenterResourceModel """ if not vcenter_name: raise ValueError('VMWare vCenter name is empty') vcenter_instance = api.GetResourceDetails(vcenter_name) vcenter_resource_model = self.resource_model_parser.convert_to_vcenter_model(vcenter_instance) return vcenter_resource_model
:param api: :param str vcenter_name: :rtype: VMwarevCenterResourceModel
def postChunked(host, selector, fields, files): """ Attempt to replace postMultipart() with nearly-identical interface. (The files tuple no longer requires the filename, and we only return the response body.) Uses the urllib2_file.py originally from http://fabien.seisen.org which was also drawn heavily from http://code.activestate.com/recipes/146306/ . This urllib2_file.py is more desirable because of the chunked uploading from a file pointer (no need to read entire file into memory) and the ability to work from behind a proxy (due to its basis on urllib2). """ params = urllib.urlencode(fields) url = 'http://%s%s?%s' % (host, selector, params) u = urllib2.urlopen(url, files) result = u.read() [fp.close() for (key, fp) in files] return result
Attempt to replace postMultipart() with nearly-identical interface. (The files tuple no longer requires the filename, and we only return the response body.) Uses the urllib2_file.py originally from http://fabien.seisen.org which was also drawn heavily from http://code.activestate.com/recipes/146306/ . This urllib2_file.py is more desirable because of the chunked uploading from a file pointer (no need to read entire file into memory) and the ability to work from behind a proxy (due to its basis on urllib2).
def customer_discount_webhook_handler(event): """Handle updates to customer discount objects. Docs: https://stripe.com/docs/api#discounts Because there is no concept of a "Discount" model in dj-stripe (due to the lack of a stripe id on them), this is a little different to the other handlers. """ crud_type = CrudType.determine(event=event) discount_data = event.data.get("object", {}) coupon_data = discount_data.get("coupon", {}) customer = event.customer if crud_type.created or crud_type.updated: coupon, _ = _handle_crud_like_event( target_cls=models.Coupon, event=event, data=coupon_data, id=coupon_data.get("id") ) coupon_start = discount_data.get("start") coupon_end = discount_data.get("end") else: coupon = None coupon_start = None coupon_end = None customer.coupon = coupon customer.coupon_start = convert_tstamp(coupon_start) customer.coupon_end = convert_tstamp(coupon_end) customer.save()
Handle updates to customer discount objects. Docs: https://stripe.com/docs/api#discounts Because there is no concept of a "Discount" model in dj-stripe (due to the lack of a stripe id on them), this is a little different to the other handlers.
def executemany(self, command, params=None, max_attempts=5): """Execute multiple SQL queries without returning a result.""" attempts = 0 while attempts < max_attempts: try: # Execute statement self._cursor.executemany(command, params) self._commit() return True except Exception as e: attempts += 1 self.reconnect() continue
Execute multiple SQL queries without returning a result.
def handle_oauth2_response(self, args): """Handles an oauth2 authorization response.""" client = self.make_client() remote_args = { 'code': args.get('code'), 'client_secret': self.consumer_secret, 'redirect_uri': session.get('%s_oauthredir' % self.name) } log.debug('Prepare oauth2 remote args %r', remote_args) remote_args.update(self.access_token_params) headers = copy(self._access_token_headers) if self.access_token_method == 'POST': headers.update({'Content-Type': 'application/x-www-form-urlencoded'}) body = client.prepare_request_body(**remote_args) resp, content = self.http_request( self.expand_url(self.access_token_url), headers=headers, data=to_bytes(body, self.encoding), method=self.access_token_method, ) elif self.access_token_method == 'GET': qs = client.prepare_request_body(**remote_args) url = self.expand_url(self.access_token_url) url += ('?' in url and '&' or '?') + qs resp, content = self.http_request( url, headers=headers, method=self.access_token_method, ) else: raise OAuthException( 'Unsupported access_token_method: %s' % self.access_token_method ) data = parse_response(resp, content, content_type=self.content_type) if resp.code not in (200, 201): raise OAuthException( 'Invalid response from %s' % self.name, type='invalid_response', data=data ) return data
Handles an oauth2 authorization response.
def change_generated_target_suffix (type, properties, suffix): """ Change the suffix previously registered for this type/properties combination. If suffix is not yet specified, sets it. """ assert isinstance(type, basestring) assert is_iterable_typed(properties, basestring) assert isinstance(suffix, basestring) change_generated_target_ps(1, type, properties, suffix)
Change the suffix previously registered for this type/properties combination. If suffix is not yet specified, sets it.
def email_message( self, recipient, # type: Text subject_template, # type: Text body_template, # type: Text sender=None, # type: Optional[AbstractUser] message_class=EmailMessage, **kwargs ): """ Returns an invitation email message. This can be easily overridden. For instance, to send an HTML message, use the EmailMultiAlternatives message_class and attach the additional conent. """ from_email = "%s %s <%s>" % ( sender.first_name, sender.last_name, email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1], ) reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name, sender.email) headers = {"Reply-To": reply_to} kwargs.update({"sender": sender, "recipient": recipient}) subject_template = loader.get_template(subject_template) body_template = loader.get_template(body_template) subject = subject_template.render( kwargs ).strip() # Remove stray newline characters body = body_template.render(kwargs) return message_class(subject, body, from_email, [recipient], headers=headers)
Returns an invitation email message. This can be easily overridden. For instance, to send an HTML message, use the EmailMultiAlternatives message_class and attach the additional conent.
def framework_find(fn, executable_path=None, env=None): """ Find a framework using dyld semantics in a very loose manner. Will take input such as: Python Python.framework Python.framework/Versions/Current """ try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError: pass fmwk_index = fn.rfind('.framework') if fmwk_index == -1: fmwk_index = len(fn) fn += '.framework' fn = os.path.join(fn, os.path.basename(fn[:fmwk_index])) return dyld_find(fn, executable_path=executable_path, env=env)
Find a framework using dyld semantics in a very loose manner. Will take input such as: Python Python.framework Python.framework/Versions/Current
def get_job_model(self): """ Returns a new JobModel instance with current loaded job data attached. :return: JobModel """ if not self.job: raise Exception('Job not loaded yet. Use load(id) first.') return JobModel(self.job_id, self.job, self.home_config['storage_dir'])
Returns a new JobModel instance with current loaded job data attached. :return: JobModel