code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def categorize_by_attr(self, attribute): ''' Function to categorize a FileList by a File object attribute (eg. 'segment', 'ifo', 'description'). Parameters ----------- attribute : string File object attribute to categorize FileList Returns -------- keys : list A list of values for an attribute groups : list A list of FileLists ''' # need to sort FileList otherwise using groupby without sorting does # 'AAABBBCCDDAABB' -> ['AAA','BBB','CC','DD','AA','BB'] # and using groupby with sorting does # 'AAABBBCCDDAABB' -> ['AAAAA','BBBBB','CC','DD'] flist = sorted(self, key=attrgetter(attribute), reverse=True) # use groupby to create lists groups = [] keys = [] for k, g in groupby(flist, attrgetter(attribute)): groups.append(FileList(g)) keys.append(k) return keys, groups
Function to categorize a FileList by a File object attribute (eg. 'segment', 'ifo', 'description'). Parameters ----------- attribute : string File object attribute to categorize FileList Returns -------- keys : list A list of values for an attribute groups : list A list of FileLists
def new_account(self, label=None): """ Creates new account, appends it to the :class:`Wallet`'s account list and returns it. :param label: account label as `str` :rtype: :class:`Account` """ acc, addr = self._backend.new_account(label=label) assert acc.index == len(self.accounts) self.accounts.append(acc) return acc
Creates new account, appends it to the :class:`Wallet`'s account list and returns it. :param label: account label as `str` :rtype: :class:`Account`
def any(self, cond): """ Check if a condition is met by any document in a list, where a condition can also be a sequence (e.g. list). >>> Query().f1.any(Query().f2 == 1) Matches:: {'f1': [{'f2': 1}, {'f2': 0}]} >>> Query().f1.any([1, 2, 3]) Matches:: {'f1': [1, 2]} {'f1': [3, 4, 5]} :param cond: Either a query that at least one document has to match or a list of which at least one document has to be contained in the tested document. """ if callable(cond): def _cmp(value): return is_sequence(value) and any(cond(e) for e in value) else: def _cmp(value): return is_sequence(value) and any(e in cond for e in value) return self._generate_test( lambda value: _cmp(value), ('any', self._path, freeze(cond)) )
Check if a condition is met by any document in a list, where a condition can also be a sequence (e.g. list). >>> Query().f1.any(Query().f2 == 1) Matches:: {'f1': [{'f2': 1}, {'f2': 0}]} >>> Query().f1.any([1, 2, 3]) Matches:: {'f1': [1, 2]} {'f1': [3, 4, 5]} :param cond: Either a query that at least one document has to match or a list of which at least one document has to be contained in the tested document.
def get_attached_pipettes(self): """ Gets model names of attached pipettes :return: :dict with keys 'left' and 'right' and a model string for each mount, or 'uncommissioned' if no model string available """ left_data = { 'mount_axis': 'z', 'plunger_axis': 'b', 'model': self.model_by_mount['left']['model'], 'id': self.model_by_mount['left']['id'] } left_model = left_data.get('model') if left_model: tip_length = pipette_config.load( left_model, left_data['id']).tip_length left_data.update({'tip_length': tip_length}) right_data = { 'mount_axis': 'a', 'plunger_axis': 'c', 'model': self.model_by_mount['right']['model'], 'id': self.model_by_mount['right']['id'] } right_model = right_data.get('model') if right_model: tip_length = pipette_config.load( right_model, right_data['id']).tip_length right_data.update({'tip_length': tip_length}) return { 'left': left_data, 'right': right_data }
Gets model names of attached pipettes :return: :dict with keys 'left' and 'right' and a model string for each mount, or 'uncommissioned' if no model string available
def get_exception(): """Return full formatted traceback as a string.""" trace = "" exception = "" exc_list = traceback.format_exception_only( sys.exc_info()[0], sys.exc_info()[1] ) for entry in exc_list: exception += entry tb_list = traceback.format_tb(sys.exc_info()[2]) for entry in tb_list: trace += entry return "%s\n%s" % (exception, trace)
Return full formatted traceback as a string.
def element_info(cls_or_slf, node, siblings, level, value_dims): """ Return the information summary for an Element. This consists of the dotted name followed by an value dimension names. """ info = cls_or_slf.component_type(node) if len(node.kdims) >= 1: info += cls_or_slf.tab + '[%s]' % ','.join(d.name for d in node.kdims) if value_dims and len(node.vdims) >= 1: info += cls_or_slf.tab + '(%s)' % ','.join(d.name for d in node.vdims) return level, [(level, info)]
Return the information summary for an Element. This consists of the dotted name followed by an value dimension names.
def load_cyassimp(file_obj, file_type=None, resolver=None, **kwargs): """ Load a file using the cyassimp bindings. The easiest way to install these is with conda: conda install -c menpo/label/master cyassimp Parameters --------- file_obj: str, or file object File path or object containing mesh data file_type : str File extension, aka 'stl' resolver : trimesh.visual.resolvers.Resolver Used to load referenced data (like texture files) kwargs : dict Passed through to mesh constructor Returns --------- meshes : (n,) list of dict Contain kwargs for Trimesh constructor """ if hasattr(file_obj, 'read'): # if it has a read attribute it is probably a file object with tempfile.NamedTemporaryFile( suffix=str(file_type)) as file_temp: file_temp.write(file_obj.read()) # file name should be bytes scene = cyassimp.AIImporter( file_temp.name.encode('utf-8')) scene.build_scene() else: scene = cyassimp.AIImporter(file_obj.encode('utf-8')) scene.build_scene() meshes = [] for m in scene.meshes: mesh_kwargs = kwargs.copy() mesh_kwargs.update({'vertices': m.points, 'faces': m.trilist}) meshes.append(mesh_kwargs) if len(meshes) == 1: return meshes[0] return meshes
Load a file using the cyassimp bindings. The easiest way to install these is with conda: conda install -c menpo/label/master cyassimp Parameters --------- file_obj: str, or file object File path or object containing mesh data file_type : str File extension, aka 'stl' resolver : trimesh.visual.resolvers.Resolver Used to load referenced data (like texture files) kwargs : dict Passed through to mesh constructor Returns --------- meshes : (n,) list of dict Contain kwargs for Trimesh constructor
def rotate_lv(*, device, size, debug, forward): """Rotate a logical volume by a single PE. If forward: Move the first physical extent of an LV to the end else: Move the last physical extent of a LV to the start then poke LVM to refresh the mapping. """ import augeas class Augeas(augeas.Augeas): def get_int(self, key): return int(self.get(key + '/int')) def set_int(self, key, val): return self.set(key + '/int', '%d' % val) def incr(self, key, by=1): orig = self.get_int(key) self.set_int(key, orig + by) def decr(self, key): self.incr(key, by=-1) lv_info = subprocess.check_output( 'lvm lvs --noheadings --rows --units=b --nosuffix ' '-o vg_name,vg_uuid,lv_name,lv_uuid,lv_attr --'.split() + [device.devpath], universal_newlines=True).splitlines() vgname, vg_uuid, lvname, lv_uuid, lv_attr = (fi.lstrip() for fi in lv_info) active = lv_attr[4] == 'a' # Make sure the volume isn't in use by unmapping it quiet_call( ['lvm', 'lvchange', '-an', '--', '{}/{}'.format(vgname, lvname)]) with tempfile.TemporaryDirectory(suffix='.blocks') as tdname: vgcfgname = tdname + '/vg.cfg' print('Loading LVM metadata... ', end='', flush=True) quiet_call( ['lvm', 'vgcfgbackup', '--file', vgcfgname, '--', vgname]) aug = Augeas( loadpath=pkg_resources.resource_filename('blocks', 'augeas'), root='/dev/null', flags=augeas.Augeas.NO_MODL_AUTOLOAD | augeas.Augeas.SAVE_NEWFILE) vgcfg = open(vgcfgname) vgcfg_orig = vgcfg.read() aug.set('/raw/vgcfg', vgcfg_orig) aug.text_store('LVM.lns', '/raw/vgcfg', '/vg') print('ok') # There is no easy way to quote for XPath, so whitelist assert all(ch in ASCII_ALNUM_WHITELIST for ch in vgname), vgname assert all(ch in ASCII_ALNUM_WHITELIST for ch in lvname), lvname aug.defvar('vg', '/vg/{}/dict'.format(vgname)) assert aug.get('$vg/id/str') == vg_uuid aug.defvar('lv', '$vg/logical_volumes/dict/{}/dict'.format(lvname)) assert aug.get('$lv/id/str') == lv_uuid rotate_aug(aug, forward, size) aug.text_retrieve('LVM.lns', '/raw/vgcfg', '/vg', '/raw/vgcfg.new') open(vgcfgname + '.new', 'w').write(aug.get('/raw/vgcfg.new')) rotate_aug(aug, not forward, size) aug.text_retrieve('LVM.lns', '/raw/vgcfg', '/vg', '/raw/vgcfg.backagain') open(vgcfgname + '.backagain', 'w').write(aug.get('/raw/vgcfg.backagain')) if debug: print('CHECK STABILITY') subprocess.call( ['git', '--no-pager', 'diff', '--no-index', '--patience', '--color-words', '--', vgcfgname, vgcfgname + '.backagain']) if forward: print('CHECK CORRECTNESS (forward)') else: print('CHECK CORRECTNESS (backward)') subprocess.call( ['git', '--no-pager', 'diff', '--no-index', '--patience', '--color-words', '--', vgcfgname, vgcfgname + '.new']) if forward: print( 'Rotating the second extent to be the first... ', end='', flush=True) else: print( 'Rotating the last extent to be the first... ', end='', flush=True) quiet_call( ['lvm', 'vgcfgrestore', '--file', vgcfgname + '.new', '--', vgname]) # Make sure LVM updates the mapping, this is pretty critical quiet_call( ['lvm', 'lvchange', '--refresh', '--', '{}/{}'.format(vgname, lvname)]) if active: quiet_call( ['lvm', 'lvchange', '-ay', '--', '{}/{}'.format(vgname, lvname)]) print('ok')
Rotate a logical volume by a single PE. If forward: Move the first physical extent of an LV to the end else: Move the last physical extent of a LV to the start then poke LVM to refresh the mapping.
def is_stationarity(self, tolerance=0.2, sample=None): """ Checks if the given markov chain is stationary and checks the steady state probablity values for the state are consistent. Parameters: ----------- tolerance: float represents the diff between actual steady state value and the computed value sample: [State(i,j)] represents the list of state which the markov chain has sampled Return Type: ------------ Boolean True, if the markov chain converges to steady state distribution within the tolerance False, if the markov chain does not converge to steady state distribution within tolerance Examples: --------- >>> from pgmpy.models.MarkovChain import MarkovChain >>> from pgmpy.factors.discrete import State >>> model = MarkovChain() >>> model.add_variables_from(['intel', 'diff'], [3, 2]) >>> intel_tm = {0: {0: 0.2, 1: 0.4, 2:0.4}, 1: {0: 0, 1: 0.5, 2: 0.5}, 2: {0: 0.3, 1: 0.3, 2: 0.4}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.5, 1: 0.5}, 1: {0: 0.25, 1:0.75}} >>> model.add_transition_model('diff', diff_tm) >>> model.is_stationarity() True """ keys = self.transition_models.keys() return_val = True for k in keys: # convert dict to numpy matrix transition_mat = np.array([np.array(list(self.transition_models[k][i].values())) for i in self.transition_models[k].keys()], dtype=np.float) S, U = eig(transition_mat.T) stationary = np.array(U[:, np.where(np.abs(S - 1.) < 1e-8)[0][0]].flat) stationary = (stationary / np.sum(stationary)).real probabilites = [] window_size = 10000 if sample is None else len(sample) for i in range(0, transition_mat.shape[0]): probabilites.extend(self.prob_from_sample([State(k, i)], window_size=window_size)) if any(np.abs(i) > tolerance for i in np.subtract(probabilites, stationary)): return_val = return_val and False else: return_val = return_val and True return return_val
Checks if the given markov chain is stationary and checks the steady state probablity values for the state are consistent. Parameters: ----------- tolerance: float represents the diff between actual steady state value and the computed value sample: [State(i,j)] represents the list of state which the markov chain has sampled Return Type: ------------ Boolean True, if the markov chain converges to steady state distribution within the tolerance False, if the markov chain does not converge to steady state distribution within tolerance Examples: --------- >>> from pgmpy.models.MarkovChain import MarkovChain >>> from pgmpy.factors.discrete import State >>> model = MarkovChain() >>> model.add_variables_from(['intel', 'diff'], [3, 2]) >>> intel_tm = {0: {0: 0.2, 1: 0.4, 2:0.4}, 1: {0: 0, 1: 0.5, 2: 0.5}, 2: {0: 0.3, 1: 0.3, 2: 0.4}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.5, 1: 0.5}, 1: {0: 0.25, 1:0.75}} >>> model.add_transition_model('diff', diff_tm) >>> model.is_stationarity() True
def create_proxy_model(self, model, parent, name, multiplicity='ZERO_MANY', **kwargs): """Add this model as a proxy to another parent model. This will add a model as a proxy model to another parent model. It ensure that it will copy the whole sub-assembly to the 'parent' model. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :param model: the catalog proxy model the new proxied model should be based upon :type model: :class:`models.Part` :param parent: parent part instance :type parent: :class:`models.Part` :param name: new part name :type name: basestring :param multiplicity: choose between ZERO_ONE, ONE, ZERO_MANY, ONE_MANY or M_N, default is `ZERO_MANY` :type multiplicity: basestring :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: the new proxy :class:`models.Part` with category `MODEL` :raises IllegalArgumentError: When the provided arguments are incorrect :raises APIError: if the `Part` could not be created """ if model.category != Category.MODEL: raise IllegalArgumentError("The model should be of category MODEL") if parent.category != Category.MODEL: raise IllegalArgumentError("The parent should be of category MODEL") data = { "name": name, "model": model.id, "parent": parent.id, "multiplicity": multiplicity } return self._create_part(action='create_proxy_model', data=data, **kwargs)
Add this model as a proxy to another parent model. This will add a model as a proxy model to another parent model. It ensure that it will copy the whole sub-assembly to the 'parent' model. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :param model: the catalog proxy model the new proxied model should be based upon :type model: :class:`models.Part` :param parent: parent part instance :type parent: :class:`models.Part` :param name: new part name :type name: basestring :param multiplicity: choose between ZERO_ONE, ONE, ZERO_MANY, ONE_MANY or M_N, default is `ZERO_MANY` :type multiplicity: basestring :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: the new proxy :class:`models.Part` with category `MODEL` :raises IllegalArgumentError: When the provided arguments are incorrect :raises APIError: if the `Part` could not be created
def show_state_usage(queue=False, **kwargs): ''' Retrieve the highstate data from the salt master to analyse used and unused states Custom Pillar data can be passed with the ``pillar`` kwarg. CLI Example: .. code-block:: bash salt '*' state.show_state_usage ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict pillar = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar is not None \ and not isinstance(pillar, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc) st_.push_active() try: ret = st_.compile_state_usage() finally: st_.pop_active() _set_retcode(ret) return ret
Retrieve the highstate data from the salt master to analyse used and unused states Custom Pillar data can be passed with the ``pillar`` kwarg. CLI Example: .. code-block:: bash salt '*' state.show_state_usage
def unitize(vectors, check_valid=False, threshold=None): """ Unitize a vector or an array or row- vectors. Parameters --------- vectors : (n,m) or (j) float Vector or vectors to be unitized check_valid : bool If set, will return mask of nonzero vectors threshold : float Cutoff for a value to be considered zero. Returns --------- unit : (n,m) or (j) float Input vectors but unitized valid : (n,) bool or bool Mask of nonzero vectors returned if `check_valid` """ # make sure we have a numpy array vectors = np.asanyarray(vectors) # allow user to set zero threshold if threshold is None: threshold = TOL_ZERO if len(vectors.shape) == 2: # for (m, d) arrays take the per- row unit vector # using sqrt and avoiding exponents is slightly faster # also dot with ones is faser than .sum(axis=1) norm = np.sqrt(np.dot(vectors * vectors, [1.0] * vectors.shape[1])) # non-zero norms valid = norm > threshold # in-place reciprocal of nonzero norms norm[valid] **= -1 # tile reciprocal of norm tiled = np.tile(norm, (vectors.shape[1], 1)).T # multiply by reciprocal of norm unit = vectors * tiled elif len(vectors.shape) == 1: # treat 1D arrays as a single vector norm = np.sqrt((vectors * vectors).sum()) valid = norm > threshold if valid: unit = vectors / norm else: unit = vectors.copy() else: raise ValueError('vectors must be (n, ) or (n, d)!') if check_valid: return unit[valid], valid return unit
Unitize a vector or an array or row- vectors. Parameters --------- vectors : (n,m) or (j) float Vector or vectors to be unitized check_valid : bool If set, will return mask of nonzero vectors threshold : float Cutoff for a value to be considered zero. Returns --------- unit : (n,m) or (j) float Input vectors but unitized valid : (n,) bool or bool Mask of nonzero vectors returned if `check_valid`
def _touch_dir(self, path): """ A helper function to create a directory if it doesn't exist. path: A string containing a full path to the directory to be created. """ try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise
A helper function to create a directory if it doesn't exist. path: A string containing a full path to the directory to be created.
def _compute_error(comp_cov, covariance_, precision_, score_metric="frobenius"): """Computes the covariance error vs. comp_cov. Parameters ---------- comp_cov : array-like, shape = (n_features, n_features) The precision to compare with. This should normally be the test sample covariance/precision. scaling : bool If True, the squared error norm is divided by n_features. If False (default), the squared error norm is not rescaled. score_metric : str The type of norm used to compute the error between the estimated self.precision, self.covariance and the reference `comp_cov`. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) - 'kl': kl-divergence - 'quadratic': quadratic loss - 'log_likelihood': negative log likelihood squared : bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned. """ if score_metric == "frobenius": return np.linalg.norm(np.triu(comp_cov - covariance_, 1), ord="fro") elif score_metric == "spectral": error = comp_cov - covariance_ return np.amax(np.linalg.svdvals(np.dot(error.T, error))) elif score_metric == "kl": return metrics.kl_loss(comp_cov, precision_) elif score_metric == "quadratic": return metrics.quadratic_loss(comp_cov, precision_) elif score_metric == "log_likelihood": return -metrics.log_likelihood(comp_cov, precision_) else: raise NotImplementedError( ("Must be frobenius, spectral, kl, " "quadratic, or log_likelihood") )
Computes the covariance error vs. comp_cov. Parameters ---------- comp_cov : array-like, shape = (n_features, n_features) The precision to compare with. This should normally be the test sample covariance/precision. scaling : bool If True, the squared error norm is divided by n_features. If False (default), the squared error norm is not rescaled. score_metric : str The type of norm used to compute the error between the estimated self.precision, self.covariance and the reference `comp_cov`. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) - 'kl': kl-divergence - 'quadratic': quadratic loss - 'log_likelihood': negative log likelihood squared : bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned.
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = TextView(self.get_context(), None, d.style or '@attr/textViewStyle')
Create the underlying widget.
def greet(self, name, sleep=0): # type: (AName, ASleep) -> AGreeting """Optionally sleep <sleep> seconds, then return a greeting to <name>""" print("Manufacturing greeting...") sleep_for(sleep) greeting = "Hello %s" % name return greeting
Optionally sleep <sleep> seconds, then return a greeting to <name>
def Convert(self, metadata, process, token=None): """Converts Process to ExportedNetworkConnection.""" conn_converter = NetworkConnectionToExportedNetworkConnectionConverter( options=self.options) return conn_converter.BatchConvert( [(metadata, conn) for conn in process.connections], token=token)
Converts Process to ExportedNetworkConnection.
def get_cgi_parameter_int(form: cgi.FieldStorage, key: str) -> Optional[int]: """ Extracts an integer parameter from a CGI form, or ``None`` if the key is absent or the string value is not convertible to ``int``. """ return get_int_or_none(get_cgi_parameter_str(form, key))
Extracts an integer parameter from a CGI form, or ``None`` if the key is absent or the string value is not convertible to ``int``.
def subscribe(self, peer_jid): """ Asks for subscription Args: peer_jid (str): the JID you ask for subscriptiion """ self.roster.subscribe(aioxmpp.JID.fromstr(peer_jid).bare())
Asks for subscription Args: peer_jid (str): the JID you ask for subscriptiion
def find_service(self, uuid): """Return the first child service found that has the specified UUID. Will return None if no service that matches is found. """ for service in self.list_services(): if service.uuid == uuid: return service return None
Return the first child service found that has the specified UUID. Will return None if no service that matches is found.
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on EC2, and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) location = vm_.get('location', get_location(vm_)) # do we launch a regular vm or a spot instance? # see http://goo.gl/hYZ13f for more information on EC2 API spot_config = get_spot_config(vm_) if spot_config is not None: if 'spot_price' not in spot_config: raise SaltCloudSystemExit( 'Spot instance config for {0} requires a spot_price ' 'attribute.'.format(vm_['name']) ) params = {'Action': 'RequestSpotInstances', 'InstanceCount': '1', 'Type': spot_config['type'] if 'type' in spot_config else 'one-time', 'SpotPrice': spot_config['spot_price']} # All of the necessary launch parameters for a VM when using # spot instances are the same except for the prefix below # being tacked on. spot_prefix = 'LaunchSpecification.' # regular EC2 instance else: # WARNING! EXPERIMENTAL! # This allows more than one instance to be spun up in a single call. # The first instance will be called by the name provided, but all other # instances will be nameless (or more specifically, they will use the # InstanceId as the name). This interface is expected to change, so # use at your own risk. min_instance = config.get_cloud_config_value( 'min_instance', vm_, __opts__, search_global=False, default=1 ) max_instance = config.get_cloud_config_value( 'max_instance', vm_, __opts__, search_global=False, default=1 ) params = {'Action': 'RunInstances', 'MinCount': min_instance, 'MaxCount': max_instance} # Normal instances should have no prefix. spot_prefix = '' image_id = get_imageid(vm_) params[spot_prefix + 'ImageId'] = image_id userdata = None userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is None: userdata = config.get_cloud_config_value( 'userdata', vm_, __opts__, search_global=False, default=None ) else: log.trace('userdata_file: %s', userdata_file) if os.path.exists(userdata_file): with salt.utils.files.fopen(userdata_file, 'r') as fh_: userdata = salt.utils.stringutils.to_unicode(fh_.read()) userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata) if userdata is not None: try: params[spot_prefix + 'UserData'] = base64.b64encode( salt.utils.stringutils.to_bytes(userdata) ) except Exception as exc: log.exception('Failed to encode userdata: %s', exc) vm_size = config.get_cloud_config_value( 'size', vm_, __opts__, search_global=False ) params[spot_prefix + 'InstanceType'] = vm_size ex_keyname = keyname(vm_) if ex_keyname: params[spot_prefix + 'KeyName'] = ex_keyname ex_securitygroup = securitygroup(vm_) if ex_securitygroup: if not isinstance(ex_securitygroup, list): params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup else: for counter, sg_ in enumerate(ex_securitygroup): params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_ ex_iam_profile = iam_profile(vm_) if ex_iam_profile: try: if ex_iam_profile.startswith('arn:aws:iam:'): params[ spot_prefix + 'IamInstanceProfile.Arn' ] = ex_iam_profile else: params[ spot_prefix + 'IamInstanceProfile.Name' ] = ex_iam_profile except AttributeError: raise SaltCloudConfigError( '\'iam_profile\' should be a string value.' ) az_ = get_availability_zone(vm_) if az_ is not None: params[spot_prefix + 'Placement.AvailabilityZone'] = az_ tenancy_ = get_tenancy(vm_) if tenancy_ is not None: if spot_config is not None: raise SaltCloudConfigError( 'Spot instance config for {0} does not support ' 'specifying tenancy.'.format(vm_['name']) ) params['Placement.Tenancy'] = tenancy_ subnetid_ = get_subnetid(vm_) if subnetid_ is not None: params[spot_prefix + 'SubnetId'] = subnetid_ ex_securitygroupid = securitygroupid(vm_) if ex_securitygroupid: if not isinstance(ex_securitygroupid, list): params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid else: for counter, sg_ in enumerate(ex_securitygroupid): params[ spot_prefix + 'SecurityGroupId.{0}'.format(counter) ] = sg_ placementgroup_ = get_placementgroup(vm_) if placementgroup_ is not None: params[spot_prefix + 'Placement.GroupName'] = placementgroup_ blockdevicemappings_holder = block_device_mappings(vm_) if blockdevicemappings_holder: for _bd in blockdevicemappings_holder: if 'tag' in _bd: _bd.pop('tag') ex_blockdevicemappings = blockdevicemappings_holder if ex_blockdevicemappings: params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping', ex_blockdevicemappings)) network_interfaces = config.get_cloud_config_value( 'network_interfaces', vm_, __opts__, search_global=False ) if network_interfaces: eni_devices = [] for interface in network_interfaces: log.debug('Create network interface: %s', interface) _new_eni = _create_eni_if_necessary(interface, vm_) eni_devices.append(_new_eni) params.update(_param_from_config(spot_prefix + 'NetworkInterface', eni_devices)) set_ebs_optimized = config.get_cloud_config_value( 'ebs_optimized', vm_, __opts__, search_global=False ) if set_ebs_optimized is not None: if not isinstance(set_ebs_optimized, bool): raise SaltCloudConfigError( '\'ebs_optimized\' should be a boolean value.' ) params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized set_del_root_vol_on_destroy = config.get_cloud_config_value( 'del_root_vol_on_destroy', vm_, __opts__, search_global=False ) set_termination_protection = config.get_cloud_config_value( 'termination_protection', vm_, __opts__, search_global=False ) if set_termination_protection is not None: if not isinstance(set_termination_protection, bool): raise SaltCloudConfigError( '\'termination_protection\' should be a boolean value.' ) params.update(_param_from_config(spot_prefix + 'DisableApiTermination', set_termination_protection)) if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool): raise SaltCloudConfigError( '\'del_root_vol_on_destroy\' should be a boolean value.' ) vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy if set_del_root_vol_on_destroy: # first make sure to look up the root device name # as Ubuntu and CentOS (and most likely other OSs) # use different device identifiers log.info('Attempting to look up root device name for image id %s on ' 'VM %s', image_id, vm_['name']) rd_params = { 'Action': 'DescribeImages', 'ImageId.1': image_id } try: rd_data = aws.query(rd_params, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') if 'error' in rd_data: return rd_data['error'] log.debug('EC2 Response: \'%s\'', rd_data) except Exception as exc: log.error( 'Error getting root device name for image id %s for ' 'VM %s: \n%s', image_id, vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) raise # make sure we have a response if not rd_data: err_msg = 'There was an error querying EC2 for the root device ' \ 'of image id {0}. Empty response.'.format(image_id) raise SaltCloudSystemExit(err_msg) # pull the root device name from the result and use it when # launching the new VM rd_name = None rd_type = None if 'blockDeviceMapping' in rd_data[0]: # Some ami instances do not have a root volume. Ignore such cases if rd_data[0]['blockDeviceMapping'] is not None: item = rd_data[0]['blockDeviceMapping']['item'] if isinstance(item, list): item = item[0] rd_name = item['deviceName'] # Grab the volume type rd_type = item['ebs'].get('volumeType', None) log.info('Found root device name: %s', rd_name) if rd_name is not None: if ex_blockdevicemappings: dev_list = [ dev['DeviceName'] for dev in ex_blockdevicemappings ] else: dev_list = [] if rd_name in dev_list: # Device already listed, just grab the index dev_index = dev_list.index(rd_name) else: dev_index = len(dev_list) # Add the device name in since it wasn't already there params[ '{0}BlockDeviceMapping.{1}.DeviceName'.format( spot_prefix, dev_index ) ] = rd_name # Set the termination value termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index) params[termination_key] = six.text_type(set_del_root_vol_on_destroy).lower() # Use default volume type if not specified if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and \ 'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]: type_key = '{0}BlockDeviceMapping.{1}.Ebs.VolumeType'.format(spot_prefix, dev_index) params[type_key] = rd_type set_del_all_vols_on_destroy = config.get_cloud_config_value( 'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False ) if set_del_all_vols_on_destroy and not isinstance(set_del_all_vols_on_destroy, bool): raise SaltCloudConfigError( '\'del_all_vols_on_destroy\' should be a boolean value.' ) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']( 'requesting', params, list(params) ), 'location': location, }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) provider = get_provider(vm_) try: data = aws.query(params, 'instancesSet', location=location, provider=provider, opts=__opts__, sigver='4') if 'error' in data: return data['error'] except Exception as exc: log.error( 'Error creating %s on EC2 when trying to run the initial ' 'deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) raise # if we're using spot instances, we need to wait for the spot request # to become active before we continue if spot_config: sir_id = data[0]['spotInstanceRequestId'] vm_['spotRequestId'] = sir_id def __query_spot_instance_request(sir_id, location): params = {'Action': 'DescribeSpotInstanceRequests', 'SpotInstanceRequestId.1': sir_id} data = aws.query(params, location=location, provider=provider, opts=__opts__, sigver='4') if not data: log.error( 'There was an error while querying EC2. Empty response' ) # Trigger a failure in the wait for spot instance method return False if isinstance(data, dict) and 'error' in data: log.warning('There was an error in the query. %s', data['error']) # Trigger a failure in the wait for spot instance method return False log.debug('Returned query data: %s', data) state = data[0].get('state') if state == 'active': return data if state == 'open': # Still waiting for an active state log.info('Spot instance status: %s', data[0]['status']['message']) return None if state in ['cancelled', 'failed', 'closed']: # Request will never be active, fail log.error('Spot instance request resulted in state \'{0}\'. ' 'Nothing else we can do here.') return False __utils__['cloud.fire_event']( 'event', 'waiting for spot instance', 'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = _wait_for_spot_instance( __query_spot_instance_request, update_args=(sir_id, location), timeout=config.get_cloud_config_value( 'wait_for_spot_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_spot_interval', vm_, __opts__, default=30), interval_multiplier=config.get_cloud_config_value( 'wait_for_spot_interval_multiplier', vm_, __opts__, default=1), max_failures=config.get_cloud_config_value( 'wait_for_spot_max_failures', vm_, __opts__, default=10), ) log.debug('wait_for_spot_instance data %s', data) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # Cancel the existing spot instance request params = {'Action': 'CancelSpotInstanceRequests', 'SpotInstanceRequestId.1': sir_id} data = aws.query(params, location=location, provider=provider, opts=__opts__, sigver='4') log.debug('Canceled spot instance request %s. Data ' 'returned: %s', sir_id, data) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) return data, vm_
Put together all of the information necessary to request an instance on EC2, and then fire off the request the instance. Returns data about the instance
def read_firmware(self): """Read the firmware version of the OPC-N2. Firmware v18+ only. :rtype: dict :Example: >>> alpha.read_firmware() { 'major': 18, 'minor': 2, 'version': 18.2 } """ # Send the command byte and sleep for 9 ms self.cnxn.xfer([0x12]) sleep(10e-3) self.firmware['major'] = self.cnxn.xfer([0x00])[0] self.firmware['minor'] = self.cnxn.xfer([0x00])[0] # Build the firmware version self.firmware['version'] = float('{}.{}'.format(self.firmware['major'], self.firmware['minor'])) sleep(0.1) return self.firmware
Read the firmware version of the OPC-N2. Firmware v18+ only. :rtype: dict :Example: >>> alpha.read_firmware() { 'major': 18, 'minor': 2, 'version': 18.2 }
def topDownCompute(self, encoded): """ See the function description in base.py """ scaledResult = self.encoder.topDownCompute(encoded)[0] scaledValue = scaledResult.value value = math.pow(10, scaledValue) return EncoderResult(value=value, scalar=value, encoding = scaledResult.encoding)
See the function description in base.py
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): """ Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified) """ if index_col: index_col = 0 else: index_col = None if header: header = 0 else: header = None df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t') if return_meta: json_fname = fname.replace('tsv', 'json') meta = pd.read_json(json_fname) return df, meta else: return df
Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified)
def signing_base(self, request, consumer, token): """Concatenates the consumer key and secret with the token's secret.""" sig = '%s&' % escape(consumer.secret) if token: sig = sig + escape(token.secret) return sig, sig
Concatenates the consumer key and secret with the token's secret.
def _on_receive(self, msg): """ Callback registered for the handle with :class:`Router`; appends data to the internal queue. """ _vv and IOLOG.debug('%r._on_receive(%r)', self, msg) self._latch.put(msg) if self.notify: self.notify(self)
Callback registered for the handle with :class:`Router`; appends data to the internal queue.
def get_assessment_section(self, assessment_section_id): """Gets an assessemnts section by ``Id``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (osid.assessment.AssessmentSection) - the assessment section raise: IllegalState - ``has_assessment_begun()`` is ``false`` raise: NotFound - ``assessment_section_id`` is not found raise: NullArgument - ``assessment_section_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ return get_section_util(assessment_section_id, runtime=self._runtime, proxy=self._proxy)
Gets an assessemnts section by ``Id``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (osid.assessment.AssessmentSection) - the assessment section raise: IllegalState - ``has_assessment_begun()`` is ``false`` raise: NotFound - ``assessment_section_id`` is not found raise: NullArgument - ``assessment_section_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def build(self, **kw): """Actually build the node. This is called by the Taskmaster after it's decided that the Node is out-of-date and must be rebuilt, and after the prepare() method has gotten everything, uh, prepared. This method is called from multiple threads in a parallel build, so only do thread safe stuff here. Do thread unsafe stuff in built(). """ try: self.get_executor()(self, **kw) except SCons.Errors.BuildError as e: e.node = self raise
Actually build the node. This is called by the Taskmaster after it's decided that the Node is out-of-date and must be rebuilt, and after the prepare() method has gotten everything, uh, prepared. This method is called from multiple threads in a parallel build, so only do thread safe stuff here. Do thread unsafe stuff in built().
def __set_unit_price(self, value): ''' Sets the unit price @param value:str ''' try: if value < 0: raise ValueError() self.__unit_price = Decimal(str(value)) except ValueError: raise ValueError("Unit Price must be a positive number")
Sets the unit price @param value:str
def summary(args): """ %prog summary input.bed scaffolds.fasta Print out summary statistics per map, followed by consensus summary of scaffold anchoring based on multiple maps. """ p = OptionParser(summary.__doc__) p.set_table(sep="|", align=True) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inputbed, scaffolds = args pf = inputbed.rsplit(".", 1)[0] mapbed = pf + ".bed" chr_agp = pf + ".chr.agp" sep = opts.sep align = opts.align cc = Map(mapbed) mapnames = cc.mapnames s = Sizes(scaffolds) total, l50, n50 = s.summary r = {} maps = [] fw = must_open(opts.outfile, "w") print("*** Summary for each individual map ***", file=fw) for mapname in mapnames: markers = [x for x in cc if x.mapname == mapname] ms = MapSummary(markers, l50, s) r["Linkage Groups", mapname] = ms.num_lgs ms.export_table(r, mapname, total) maps.append(ms) print(tabulate(r, sep=sep, align=align), file=fw) r = {} agp = AGP(chr_agp) print("*** Summary for consensus map ***", file=fw) consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap) oriented_scaffolds = set(x.component_id for x in agp \ if (not x.is_gap) and x.orientation != '?') unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds for mapname, sc in (("Anchored", consensus_scaffolds), ("Oriented", oriented_scaffolds), ("Unplaced", unplaced_scaffolds)): markers = [x for x in cc if x.seqid in sc] ms = MapSummary(markers, l50, s, scaffolds=sc) ms.export_table(r, mapname, total) print(tabulate(r, sep=sep, align=align), file=fw)
%prog summary input.bed scaffolds.fasta Print out summary statistics per map, followed by consensus summary of scaffold anchoring based on multiple maps.
def firmware_download_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") firmware_download = ET.Element("firmware_download") config = firmware_download input = ET.SubElement(firmware_download, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_for(self, dynamic_part): """ Return a variation of the current dynamic field based on the given dynamic part. Use the "format" attribute to create the final name """ if not hasattr(self, '_instance'): raise ImplementationError('"get_for" can be used only on a bound field') name = self.get_name_for(dynamic_part) return self._instance.get_field(name)
Return a variation of the current dynamic field based on the given dynamic part. Use the "format" attribute to create the final name
def repertoire(self, direction, mechanism, purview): """Return the cause or effect repertoire function based on a direction. Args: direction (str): The temporal direction, specifiying the cause or effect repertoire. """ system = self.system[direction] node_labels = system.node_labels if not set(purview).issubset(self.purview_indices(direction)): raise ValueError('{} is not a {} purview in {}'.format( fmt.fmt_mechanism(purview, node_labels), direction, self)) if not set(mechanism).issubset(self.mechanism_indices(direction)): raise ValueError('{} is no a {} mechanism in {}'.format( fmt.fmt_mechanism(mechanism, node_labels), direction, self)) return system.repertoire(direction, mechanism, purview)
Return the cause or effect repertoire function based on a direction. Args: direction (str): The temporal direction, specifiying the cause or effect repertoire.
def pad(img, padding, fill=0, padding_mode='constant'): r"""Pad the given PIL Image on all sides with specified padding mode and fill value. Args: img (PIL Image): Image to be padded. padding (int or tuple): Padding on each border. If a single int is provided this is used to pad all borders. If tuple of length 2 is provided this is the padding on left/right and top/bottom respectively. If a tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. fill: Pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. This value is only used when the padding_mode is constant padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. - constant: pads with a constant value, this value is specified with fill - edge: pads with the last value on the edge of the image - reflect: pads with reflection of image (without repeating the last value on the edge) padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2] - symmetric: pads with reflection of image (repeating the last value on the edge) padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] Returns: PIL Image: Padded image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) if not isinstance(padding, (numbers.Number, tuple)): raise TypeError('Got inappropriate padding arg') if not isinstance(fill, (numbers.Number, str, tuple)): raise TypeError('Got inappropriate fill arg') if not isinstance(padding_mode, str): raise TypeError('Got inappropriate padding_mode arg') if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding))) assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ 'Padding mode should be either constant, edge, reflect or symmetric' if padding_mode == 'constant': if img.mode == 'P': palette = img.getpalette() image = ImageOps.expand(img, border=padding, fill=fill) image.putpalette(palette) return image return ImageOps.expand(img, border=padding, fill=fill) else: if isinstance(padding, int): pad_left = pad_right = pad_top = pad_bottom = padding if isinstance(padding, Sequence) and len(padding) == 2: pad_left = pad_right = padding[0] pad_top = pad_bottom = padding[1] if isinstance(padding, Sequence) and len(padding) == 4: pad_left = padding[0] pad_top = padding[1] pad_right = padding[2] pad_bottom = padding[3] if img.mode == 'P': palette = img.getpalette() img = np.asarray(img) img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) img = Image.fromarray(img) img.putpalette(palette) return img img = np.asarray(img) # RGB image if len(img.shape) == 3: img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) # Grayscale image if len(img.shape) == 2: img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) return Image.fromarray(img)
r"""Pad the given PIL Image on all sides with specified padding mode and fill value. Args: img (PIL Image): Image to be padded. padding (int or tuple): Padding on each border. If a single int is provided this is used to pad all borders. If tuple of length 2 is provided this is the padding on left/right and top/bottom respectively. If a tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. fill: Pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. This value is only used when the padding_mode is constant padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. - constant: pads with a constant value, this value is specified with fill - edge: pads with the last value on the edge of the image - reflect: pads with reflection of image (without repeating the last value on the edge) padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2] - symmetric: pads with reflection of image (repeating the last value on the edge) padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] Returns: PIL Image: Padded image.
def disaggregate_radiation(data_daily, sun_times=None, pot_rad=None, method='pot_rad', angstr_a=0.25, angstr_b=0.5, bristcamp_a=0.75, bristcamp_c=2.4, mean_course=None): """general function for radiation disaggregation Args: daily_data: daily values sun_times: daily dataframe including results of the util.sun_times function pot_rad: hourly dataframe including potential radiation method: keyword specifying the disaggregation method to be used angstr_a: parameter a of the Angstrom model (intercept) angstr_b: parameter b of the Angstrom model (slope) mean_course: monthly values of the mean hourly radiation course Returns: Disaggregated hourly values of shortwave radiation. """ # check if disaggregation method has a valid value if method not in ('pot_rad', 'pot_rad_via_ssd', 'pot_rad_via_bc', 'mean_course'): raise ValueError('Invalid option') glob_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method == 'mean_course': assert mean_course is not None pot_rad = pd.Series(index=glob_disagg.index) pot_rad[:] = mean_course.unstack().loc[list(zip(pot_rad.index.month, pot_rad.index.hour))].values else: assert pot_rad is not None pot_rad_daily = pot_rad.resample('D').mean() if method in ('pot_rad', 'mean_course'): globalrad = data_daily.glob elif method == 'pot_rad_via_ssd': # in this case use the Angstrom model globalrad = pd.Series(index=data_daily.index, data=0.) dates = sun_times.index[sun_times.daylength > 0] # account for polar nights globalrad[dates] = angstroem(data_daily.ssd[dates], sun_times.daylength[dates], pot_rad_daily[dates], angstr_a, angstr_b) elif method == 'pot_rad_via_bc': # using data from Bristow-Campbell model globalrad = bristow_campbell(data_daily.tmin, data_daily.tmax, pot_rad_daily, bristcamp_a, bristcamp_c) globalrad_equal = globalrad.reindex(pot_rad.index, method='ffill') # hourly values (replicate daily mean value for each hour) pot_rad_daily_equal = pot_rad_daily.reindex(pot_rad.index, method='ffill') glob_disagg = pot_rad / pot_rad_daily_equal * globalrad_equal glob_disagg[glob_disagg < 1e-2] = 0. return glob_disagg
general function for radiation disaggregation Args: daily_data: daily values sun_times: daily dataframe including results of the util.sun_times function pot_rad: hourly dataframe including potential radiation method: keyword specifying the disaggregation method to be used angstr_a: parameter a of the Angstrom model (intercept) angstr_b: parameter b of the Angstrom model (slope) mean_course: monthly values of the mean hourly radiation course Returns: Disaggregated hourly values of shortwave radiation.
def StructField(name, field_type): # pylint: disable=invalid-name """Construct a field description protobuf. :type name: str :param name: the name of the field :type field_type: :class:`type_pb2.Type` :param field_type: the type of the field :rtype: :class:`type_pb2.StructType.Field` :returns: the appropriate struct-field-type protobuf """ return type_pb2.StructType.Field(name=name, type=field_type)
Construct a field description protobuf. :type name: str :param name: the name of the field :type field_type: :class:`type_pb2.Type` :param field_type: the type of the field :rtype: :class:`type_pb2.StructType.Field` :returns: the appropriate struct-field-type protobuf
def encode_ipmb_msg(header, data): """Encode an IPMB message. header: IPMB header object data: IPMI message data as bytestring Returns the message as bytestring. """ msg = array('B') msg.fromstring(header.encode()) if data is not None: a = array('B') a.fromstring(data) msg.extend(a) msg.append(checksum(msg[3:])) return msg.tostring()
Encode an IPMB message. header: IPMB header object data: IPMI message data as bytestring Returns the message as bytestring.
def load_job(self, job): """ Load the job from the given ``Job`` object. :param job: the job to load :type job: :class:`~aeneas.job.Job` :raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if ``job`` is not an instance of :class:`~aeneas.job.Job` """ if not isinstance(job, Job): self.log_exc(u"job is not an instance of Job", None, True, ExecuteJobInputError) self.job = job
Load the job from the given ``Job`` object. :param job: the job to load :type job: :class:`~aeneas.job.Job` :raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if ``job`` is not an instance of :class:`~aeneas.job.Job`
def get_delivery_notes_per_page(self, per_page=1000, page=1, params=None): """ Get delivery notes per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=DELIVERY_NOTES, per_page=per_page, page=page, params=params)
Get delivery notes per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
def Matches(self, file_entry, search_depth): """Determines if the file entry matches the find specification. Args: file_entry (FileEntry): file entry. search_depth (int): number of location path segments to compare. Returns: tuple: contains: bool: True if the file entry matches the find specification, False otherwise. bool: True if the location matches, False if not or None if no location specified. """ if self._location_segments is None: location_match = None else: location_match = self._CheckLocation(file_entry, search_depth) if not location_match: return False, location_match if search_depth != self._number_of_location_segments: return False, location_match match = self._CheckFileEntryType(file_entry) if match is not None and not match: return False, location_match match = self._CheckIsAllocated(file_entry) if match is not None and not match: return False, location_match return True, location_match
Determines if the file entry matches the find specification. Args: file_entry (FileEntry): file entry. search_depth (int): number of location path segments to compare. Returns: tuple: contains: bool: True if the file entry matches the find specification, False otherwise. bool: True if the location matches, False if not or None if no location specified.
def is_terminated(self, retry=False): """ If this instance has finished or not. :return: True if finished else False :rtype: bool """ retry_num = options.retry_times while retry_num > 0: try: return self.status == Instance.Status.TERMINATED except (errors.InternalServerError, errors.RequestTimeTooSkewed): retry_num -= 1 if not retry or retry_num <= 0: raise
If this instance has finished or not. :return: True if finished else False :rtype: bool
def init_app(self, app): """Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application """ app.scoped_session = self @app.teardown_appcontext def remove_scoped_session(*args, **kwargs): # pylint: disable=missing-docstring,unused-argument,unused-variable app.scoped_session.remove()
Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application
def assert_inequivalent(o1, o2): '''Asserts that o1 and o2 are distinct and inequivalent objects ''' if not (isinstance(o1, type) and isinstance(o2, type)): assert o1 is not o2 assert not o1 == o2 and o1 != o2 assert not o2 == o1 and o2 != o1
Asserts that o1 and o2 are distinct and inequivalent objects
def faz(input_file, variables=None): """ FAZ entry point. """ logging.debug("input file:\n {0}\n".format(input_file)) tasks = parse_input_file(input_file, variables=variables) print("Found {0} tasks.".format(len(tasks))) graph = DependencyGraph(tasks) graph.show_tasks() graph.execute()
FAZ entry point.
def collect_snmp(self, device, host, port, community): """ Collect stats from device """ # Log self.log.info("Collecting ServerTech PDU statistics from: %s" % device) # Set timestamp timestamp = time.time() inputFeeds = {} # Collect PDU input gauge values for gaugeName, gaugeOid in self.PDU_SYSTEM_GAUGES.items(): systemGauges = self.walk(gaugeOid, host, port, community) for o, gaugeValue in systemGauges.items(): # Get Metric Name metricName = gaugeName # Get Metric Value metricValue = float(gaugeValue) # Get Metric Path metricPath = '.'.join( ['devices', device, 'system', metricName]) # Create Metric metric = Metric(metricPath, metricValue, timestamp, 2) # Publish Metric self.publish_metric(metric) # Collect PDU input feed names inputFeedNames = self.walk( self.PDU_INFEED_NAMES, host, port, community) for o, inputFeedName in inputFeedNames.items(): # Extract input feed name inputFeed = ".".join(o.split(".")[-2:]) inputFeeds[inputFeed] = inputFeedName # Collect PDU input gauge values for gaugeName, gaugeOid in self.PDU_INFEED_GAUGES.items(): inputFeedGauges = self.walk(gaugeOid, host, port, community) for o, gaugeValue in inputFeedGauges.items(): # Extract input feed name inputFeed = ".".join(o.split(".")[-2:]) # Get Metric Name metricName = '.'.join([re.sub(r'\.|\\', '_', inputFeeds[inputFeed]), gaugeName]) # Get Metric Value if gaugeName == "infeedVolts": # Note: Voltage is in "tenth volts", so divide by 10 metricValue = float(gaugeValue) / 10.0 elif gaugeName == "infeedAmps": # Note: Amps is in "hundredth amps", so divide by 100 metricValue = float(gaugeValue) / 100.0 else: metricValue = float(gaugeValue) # Get Metric Path metricPath = '.'.join(['devices', device, 'input', metricName]) # Create Metric metric = Metric(metricPath, metricValue, timestamp, 2) # Publish Metric self.publish_metric(metric)
Collect stats from device
def encode(self, inputs, states=None, valid_length=None): """Encode the input sequence. Parameters ---------- inputs : NDArray states : list of NDArrays or None, default None valid_length : NDArray or None, default None Returns ------- outputs : list Outputs of the encoder. """ return self.encoder(self.src_embed(inputs), states, valid_length)
Encode the input sequence. Parameters ---------- inputs : NDArray states : list of NDArrays or None, default None valid_length : NDArray or None, default None Returns ------- outputs : list Outputs of the encoder.
def get_info(self, security_symbols, info_field_codes): """ Queries data from a /<security_type>/info endpoint. Args: security_symbols (list): List of string symbols info_field_codes (list): List of string info field codes Returns: dict of the decoded json from server response. Notes: The max length of any list arg is 100 """ security_symbols = self._str_or_list(security_symbols) info_field_codes = self._str_or_list(info_field_codes) url_path = self._build_url_path(security_symbols, 'info', info_field_codes) return self._get_data(url_path, None)
Queries data from a /<security_type>/info endpoint. Args: security_symbols (list): List of string symbols info_field_codes (list): List of string info field codes Returns: dict of the decoded json from server response. Notes: The max length of any list arg is 100
def adjoint(self): """Adjoint of this operator.""" if not self.is_linear: raise NotImplementedError('this operator is not linear and ' 'thus has no adjoint') forward_op = self class ResizingOperatorAdjoint(ResizingOperatorBase): """Adjoint of `ResizingOperator`. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. """ def _call(self, x, out): """Implement ``self(x, out)``.""" with writable_array(out) as out_arr: resize_array(x.asarray(), self.range.shape, offset=self.offset, pad_mode=self.pad_mode, pad_const=0, direction='adjoint', out=out_arr) @property def adjoint(self): """Adjoint of the adjoint, i.e. the original operator.""" return forward_op @property def inverse(self): """(Pseudo-)Inverse of this operator. Note that in axes where ``self`` extends, the returned operator acts as a proper inverse, while in restriction axes, the operation is not invertible. """ return ResizingOperatorAdjoint( domain=self.range, range=self.domain, pad_mode=self.pad_mode) return ResizingOperatorAdjoint(domain=self.range, range=self.domain, pad_mode=self.pad_mode)
Adjoint of this operator.
def block_dot(A, B, diagonal=False): """ Element wise dot product on block matricies +------+------+ +------+------+ +-------+-------+ | | | | | | |A11.B11|B12.B12| | A11 | A12 | | B11 | B12 | | | | +------+------+ o +------+------| = +-------+-------+ | | | | | | |A21.B21|A22.B22| | A21 | A22 | | B21 | B22 | | | | +-------------+ +------+------+ +-------+-------+ ..Note If any block of either (A or B) are stored as 1d vectors then we assume that it denotes a diagonal matrix efficient dot product using numpy broadcasting will be used, i.e. A11*B11 If either (A or B) of the diagonal matrices are stored as vectors then a more efficient dot product using numpy broadcasting will be used, i.e. A11*B11 """ #Must have same number of blocks and be a block matrix assert A.dtype is np.dtype('object'), "Must be a block matrix" assert B.dtype is np.dtype('object'), "Must be a block matrix" assert A.shape == B.shape def f(C,D): """ C is an element of A, D is the associated element of B """ Cshape = C.shape Dshape = D.shape if diagonal and (len(Cshape) == 1 or len(Dshape) == 1\ or C.shape[0] != C.shape[1] or D.shape[0] != D.shape[1]): print("Broadcasting, C: {} D:{}".format(C.shape, D.shape)) return C*D else: print("Dotting, C: {} C:{}".format(C.shape, D.shape)) return np.dot(C,D) dot = np.vectorize(f, otypes = [np.object]) return dot(A,B)
Element wise dot product on block matricies +------+------+ +------+------+ +-------+-------+ | | | | | | |A11.B11|B12.B12| | A11 | A12 | | B11 | B12 | | | | +------+------+ o +------+------| = +-------+-------+ | | | | | | |A21.B21|A22.B22| | A21 | A22 | | B21 | B22 | | | | +-------------+ +------+------+ +-------+-------+ ..Note If any block of either (A or B) are stored as 1d vectors then we assume that it denotes a diagonal matrix efficient dot product using numpy broadcasting will be used, i.e. A11*B11 If either (A or B) of the diagonal matrices are stored as vectors then a more efficient dot product using numpy broadcasting will be used, i.e. A11*B11
def set_mean(self, col, row, mean): """ Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float """ javabridge.call(self.jobject, "setMean", "(IID)V", col, row, mean)
Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float
def local_manager_target_uids(self): """Target uid's for local manager. """ groups = self.root['groups'].backend managed_uids = set() for gid in self.local_manager_target_gids: group = groups.get(gid) if group: managed_uids.update(group.member_ids) return list(managed_uids)
Target uid's for local manager.
def upload_rpm(rpm_path, repoid, connector, callback=None): """upload an rpm into pulp rpm_path: path to an rpm connector: the connector to use for interacting with pulp callback: Optional callback to call after an RPM is uploaded. Callback should accept one argument, the name of the RPM which was uploaded """ ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) info = rpm_info(rpm_path) pkg_name = info['name'] nvrea = info['nvrea'] cksum = info['cksum'] size = info['size'] package_basename = info['package_basename'] juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size) # initiate upload upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector) #create a statusbar pbar = ProgressBar(size) # read in rpm total_seeked = 0 rpm_fd = open(rpm_path, 'rb') rpm_fd.seek(0) while total_seeked < size: rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE) last_offset = total_seeked total_seeked += len(rpm_data) juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked)) upload_code = upload.append(fdata=rpm_data, offset=last_offset) if upload_code != Constants.PULP_PUT_OK: juicer.utils.Log.log_error("Upload failed.") pbar.update(len(rpm_data)) pbar.finish() rpm_fd.close() juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked) # finalize upload rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name) juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id) # clean up working dir upload.clean_upload() # Run callbacks? if callback: try: juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback)) callback(pkg_name) except Exception: juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback)) pass return rpm_id
upload an rpm into pulp rpm_path: path to an rpm connector: the connector to use for interacting with pulp callback: Optional callback to call after an RPM is uploaded. Callback should accept one argument, the name of the RPM which was uploaded
def tangent_only_intersections(all_types): """Determine intersection in the case of only-tangent intersections. If the only intersections are tangencies, then either the surfaces are tangent but don't meet ("kissing" edges) or one surface is internally tangent to the other. Thus we expect every intersection to be classified as :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.TANGENT_SECOND`, :attr:`~.IntersectionClassification.OPPOSED`, :attr:`~.IntersectionClassification.IGNORED_CORNER` or :attr:`~.IntersectionClassification.COINCIDENT_UNUSED`. What's more, we expect all intersections to be classified the same for a given pairing. Args: all_types (Set[.IntersectionClassification]): The set of all intersection classifications encountered among the intersections for the given surface-surface pair. Returns: Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of * Edges info list; will be empty or :data:`None` * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other. Raises: ValueError: If there are intersections of more than one type among :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.TANGENT_SECOND`, :attr:`~.IntersectionClassification.OPPOSED`, :attr:`~.IntersectionClassification.IGNORED_CORNER` or :attr:`~.IntersectionClassification.COINCIDENT_UNUSED`. ValueError: If there is a unique classification, but it isn't one of the tangent types. """ if len(all_types) != 1: raise ValueError("Unexpected value, types should all match", all_types) point_type = all_types.pop() if point_type == CLASSIFICATION_T.OPPOSED: return [], None elif point_type == CLASSIFICATION_T.IGNORED_CORNER: return [], None elif point_type == CLASSIFICATION_T.TANGENT_FIRST: return None, True elif point_type == CLASSIFICATION_T.TANGENT_SECOND: return None, False elif point_type == CLASSIFICATION_T.COINCIDENT_UNUSED: return [], None else: raise ValueError("Point type not for tangency", point_type)
Determine intersection in the case of only-tangent intersections. If the only intersections are tangencies, then either the surfaces are tangent but don't meet ("kissing" edges) or one surface is internally tangent to the other. Thus we expect every intersection to be classified as :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.TANGENT_SECOND`, :attr:`~.IntersectionClassification.OPPOSED`, :attr:`~.IntersectionClassification.IGNORED_CORNER` or :attr:`~.IntersectionClassification.COINCIDENT_UNUSED`. What's more, we expect all intersections to be classified the same for a given pairing. Args: all_types (Set[.IntersectionClassification]): The set of all intersection classifications encountered among the intersections for the given surface-surface pair. Returns: Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of * Edges info list; will be empty or :data:`None` * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other. Raises: ValueError: If there are intersections of more than one type among :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.TANGENT_SECOND`, :attr:`~.IntersectionClassification.OPPOSED`, :attr:`~.IntersectionClassification.IGNORED_CORNER` or :attr:`~.IntersectionClassification.COINCIDENT_UNUSED`. ValueError: If there is a unique classification, but it isn't one of the tangent types.
def get_function_argspec(func, is_class_method=None): ''' A small wrapper around getargspec that also supports callable classes :param is_class_method: Pass True if you are sure that the function being passed is a class method. The reason for this is that on Python 3 ``inspect.ismethod`` only returns ``True`` for bound methods, while on Python 2, it returns ``True`` for bound and unbound methods. So, on Python 3, in case of a class method, you'd need the class to which the function belongs to be instantiated and this is not always wanted. ''' if not callable(func): raise TypeError('{0} is not a callable'.format(func)) if six.PY2: if is_class_method is True: aspec = inspect.getargspec(func) del aspec.args[0] # self elif inspect.isfunction(func): aspec = inspect.getargspec(func) elif inspect.ismethod(func): aspec = inspect.getargspec(func) del aspec.args[0] # self elif isinstance(func, object): aspec = inspect.getargspec(func.__call__) del aspec.args[0] # self else: raise TypeError( 'Cannot inspect argument list for \'{0}\''.format(func) ) else: if is_class_method is True: aspec = _getargspec(func) del aspec.args[0] # self elif inspect.isfunction(func): aspec = _getargspec(func) # pylint: disable=redefined-variable-type elif inspect.ismethod(func): aspec = _getargspec(func) del aspec.args[0] # self elif isinstance(func, object): aspec = _getargspec(func.__call__) del aspec.args[0] # self else: raise TypeError( 'Cannot inspect argument list for \'{0}\''.format(func) ) return aspec
A small wrapper around getargspec that also supports callable classes :param is_class_method: Pass True if you are sure that the function being passed is a class method. The reason for this is that on Python 3 ``inspect.ismethod`` only returns ``True`` for bound methods, while on Python 2, it returns ``True`` for bound and unbound methods. So, on Python 3, in case of a class method, you'd need the class to which the function belongs to be instantiated and this is not always wanted.
def process_signed_elements(self): """ Verifies the signature nodes: - Checks that are Response or Assertion - Check that IDs and reference URI are unique and consistent. :returns: The signed elements tag names :rtype: list """ sign_nodes = self.__query('//ds:Signature') signed_elements = [] verified_seis = [] verified_ids = [] response_tag = '{%s}Response' % OneLogin_Saml2_Constants.NS_SAMLP assertion_tag = '{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML for sign_node in sign_nodes: signed_element = sign_node.getparent().tag if signed_element != response_tag and signed_element != assertion_tag: raise OneLogin_Saml2_ValidationError( 'Invalid Signature Element %s SAML Response rejected' % signed_element, OneLogin_Saml2_ValidationError.WRONG_SIGNED_ELEMENT ) if not sign_node.getparent().get('ID'): raise OneLogin_Saml2_ValidationError( 'Signed Element must contain an ID. SAML Response rejected', OneLogin_Saml2_ValidationError.ID_NOT_FOUND_IN_SIGNED_ELEMENT ) id_value = sign_node.getparent().get('ID') if id_value in verified_ids: raise OneLogin_Saml2_ValidationError( 'Duplicated ID. SAML Response rejected', OneLogin_Saml2_ValidationError.DUPLICATED_ID_IN_SIGNED_ELEMENTS ) verified_ids.append(id_value) # Check that reference URI matches the parent ID and no duplicate References or IDs ref = OneLogin_Saml2_XML.query(sign_node, './/ds:Reference') if ref: ref = ref[0] if ref.get('URI'): sei = ref.get('URI')[1:] if sei != id_value: raise OneLogin_Saml2_ValidationError( 'Found an invalid Signed Element. SAML Response rejected', OneLogin_Saml2_ValidationError.INVALID_SIGNED_ELEMENT ) if sei in verified_seis: raise OneLogin_Saml2_ValidationError( 'Duplicated Reference URI. SAML Response rejected', OneLogin_Saml2_ValidationError.DUPLICATED_REFERENCE_IN_SIGNED_ELEMENTS ) verified_seis.append(sei) signed_elements.append(signed_element) if signed_elements: if not self.validate_signed_elements(signed_elements, raise_exceptions=True): raise OneLogin_Saml2_ValidationError( 'Found an unexpected Signature Element. SAML Response rejected', OneLogin_Saml2_ValidationError.UNEXPECTED_SIGNED_ELEMENTS ) return signed_elements
Verifies the signature nodes: - Checks that are Response or Assertion - Check that IDs and reference URI are unique and consistent. :returns: The signed elements tag names :rtype: list
def parse(self, limit=None, or_limit=1): """ Parse mydrug files :param limit: int limit json docs processed :param or_limit: int odds ratio limit :return: None """ dir_path = Path(self.rawdir) aeolus_file = dir_path / self.files['aeolus']['file'] aeolus_fh = aeolus_file.open('r') count = 0 for line in aeolus_fh.readlines(): if limit is not None and count >= limit: break line = line.rstrip("\n,") if line != '[' and line != ']': self._parse_aeolus_data(document=json.loads(line), or_limit=or_limit) count += 1 if count % 500 == 0: LOG.info("Processed %i documents", count) aeolus_fh.close() return
Parse mydrug files :param limit: int limit json docs processed :param or_limit: int odds ratio limit :return: None
def _netstat_aix(): ''' Return netstat information for SunOS flavors ''' ret = [] ## AIX 6.1 - 7.2, appears to ignore addr_family field contents ## for addr_family in ('inet', 'inet6'): for addr_family in ('inet',): # Lookup connections cmd = 'netstat -n -a -f {0} | tail -n +3'.format(addr_family) out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() if len(comps) < 5: continue proto_seen = None tcp_flag = True if 'tcp' == comps[0] or 'tcp4' == comps[0]: proto_seen = 'tcp' elif 'tcp6' == comps[0]: proto_seen = 'tcp6' elif 'udp' == comps[0] or 'udp4' == comps[0]: proto_seen = 'udp' tcp_flag = False elif 'udp6' == comps[0]: proto_seen = 'udp6' tcp_flag = False if tcp_flag: if len(comps) >= 6: ret.append({ 'proto': proto_seen, 'recv-q': comps[1], 'send-q': comps[2], 'local-address': comps[3], 'remote-address': comps[4], 'state': comps[5]}) else: if len(comps) >= 5: ret.append({ 'proto': proto_seen, 'local-address': comps[3], 'remote-address': comps[4]}) return ret
Return netstat information for SunOS flavors
def _linemagic(cls, options, strict=False, backend=None): "Deprecated, not expected to be used by any current code" backends = None if backend is None else [backend] options, failure = cls._process_magic(options, strict, backends=backends) if failure: return with options_policy(skip_invalid=True, warn_on_skip=False): StoreOptions.apply_customizations(options, Store.options(backend=backend))
Deprecated, not expected to be used by any current code
def logout(self): """ Logout of user. :returns: ``True`` Successful logout ``False`` Logout failed (mainly because user was not login) """ ret = False if self.login_result is True: ret = self.__get_status_code(self.__request('logout')) == 200 self.login_result = None return ret
Logout of user. :returns: ``True`` Successful logout ``False`` Logout failed (mainly because user was not login)
def redirect(location=None, internal=False, code=None, headers={}, add_slash=False, request=None): ''' Perform a redirect, either internal or external. An internal redirect performs the redirect server-side, while the external redirect utilizes an HTTP 302 status code. :param location: The HTTP location to redirect to. :param internal: A boolean indicating whether the redirect should be internal. :param code: The HTTP status code to use for the redirect. Defaults to 302. :param headers: Any HTTP headers to send with the response, as a dictionary. :param request: The :class:`pecan.Request` instance to use. ''' request = request or state.request if add_slash: if location is None: split_url = list(urlparse.urlsplit(request.url)) new_proto = request.environ.get( 'HTTP_X_FORWARDED_PROTO', split_url[0] ) split_url[0] = new_proto else: split_url = urlparse.urlsplit(location) split_url[2] = split_url[2].rstrip('/') + '/' location = urlparse.urlunsplit(split_url) if not headers: headers = {} if internal: if code is not None: raise ValueError('Cannot specify a code for internal redirects') request.environ['pecan.recursive.context'] = request.context raise ForwardRequestException(location) if code is None: code = 302 raise exc.status_map[code](location=location, headers=headers)
Perform a redirect, either internal or external. An internal redirect performs the redirect server-side, while the external redirect utilizes an HTTP 302 status code. :param location: The HTTP location to redirect to. :param internal: A boolean indicating whether the redirect should be internal. :param code: The HTTP status code to use for the redirect. Defaults to 302. :param headers: Any HTTP headers to send with the response, as a dictionary. :param request: The :class:`pecan.Request` instance to use.
def results_class_wise_average_metrics(self): """Class-wise averaged metrics Returns ------- dict results in a dictionary format """ class_wise_results = self.results_class_wise_metrics() class_wise_eer = [] class_wise_fmeasure = [] class_wise_precision = [] class_wise_recall = [] for class_label in class_wise_results: if class_wise_results[class_label]['eer']['eer'] is not None: class_wise_eer.append(class_wise_results[class_label]['eer']['eer']) if class_wise_results[class_label]['f_measure']['f_measure'] is not None: class_wise_fmeasure.append(class_wise_results[class_label]['f_measure']['f_measure']) class_wise_precision.append(class_wise_results[class_label]['f_measure']['precision']) class_wise_recall.append(class_wise_results[class_label]['f_measure']['recall']) if class_wise_eer: eer = float(numpy.nanmean(class_wise_eer)) else: eer = None if class_wise_fmeasure: f_measure = float(numpy.nanmean(class_wise_fmeasure)) else: f_measure = None if class_wise_precision: precision = float(numpy.nanmean(class_wise_precision)) else: precision = None if class_wise_recall: recall = float(numpy.nanmean(class_wise_recall)) else: recall = None return { 'eer': { 'eer': eer }, 'f_measure': { 'f_measure': f_measure, 'precision': precision, 'recall': recall, } }
Class-wise averaged metrics Returns ------- dict results in a dictionary format
def unzip_file(filename): """Unzip the file if file is bzipped = ending with 'bz2'""" if filename.endswith('bz2'): bz2file = bz2.BZ2File(filename) fdn, tmpfilepath = tempfile.mkstemp() with closing(os.fdopen(fdn, 'wb')) as ofpt: try: ofpt.write(bz2file.read()) except IOError: import traceback traceback.print_exc() LOGGER.info("Failed to read bzipped file %s", str(filename)) os.remove(tmpfilepath) return None return tmpfilepath return None
Unzip the file if file is bzipped = ending with 'bz2
def attribute_changed(self, node, column): """ Calls :meth:`QAbstractItemModel.dataChanged` with given Node attribute index. :param node: Node. :type node: AbstractCompositeNode or GraphModelNode :param column: Attribute column. :type column: int :return: Method success. :rtype: bool """ index = self.get_attribute_index(node, column) if index is not None: self.dataChanged.emit(index, index) return True else: return False
Calls :meth:`QAbstractItemModel.dataChanged` with given Node attribute index. :param node: Node. :type node: AbstractCompositeNode or GraphModelNode :param column: Attribute column. :type column: int :return: Method success. :rtype: bool
def get_post_authorization_redirect_url(request, canvas=True): """ Determine the URL users should be redirected to upon authorization the application. If request is non-canvas use user defined site url if set, else the site hostname. """ path = request.get_full_path() if canvas: if FACEBOOK_APPLICATION_CANVAS_URL: path = path.replace(urlparse(FACEBOOK_APPLICATION_CANVAS_URL).path, '') redirect_uri = 'https://%(domain)s/%(namespace)s%(path)s' % { 'domain': FACEBOOK_APPLICATION_DOMAIN, 'namespace': FACEBOOK_APPLICATION_NAMESPACE, 'path': path } else: if FANDJANGO_SITE_URL: site_url = FANDJANGO_SITE_URL path = path.replace(urlparse(site_url).path, '') else: protocol = "https" if request.is_secure() else "http" site_url = "%s://%s" % (protocol, request.get_host()) redirect_uri = site_url + path return redirect_uri
Determine the URL users should be redirected to upon authorization the application. If request is non-canvas use user defined site url if set, else the site hostname.
def shred(key_name: str, value: t.Any, field_names: t.Iterable[str] = SHRED_DATA_FIELD_NAMES) -> t.Union[t.Any, str]: """ Replaces sensitive data in ``value`` with ``*`` if ``key_name`` contains something that looks like a secret. :param field_names: a list of key names that can possibly contain sensitive data :param key_name: a key name to check :param value: a value to mask :return: an unchanged value if nothing to hide, ``'*' * len(str(value))`` otherwise """ key_name = key_name.lower() need_shred = False for data_field_name in field_names: if data_field_name in key_name: need_shred = True break if not need_shred: return value return '*' * len(str(value))
Replaces sensitive data in ``value`` with ``*`` if ``key_name`` contains something that looks like a secret. :param field_names: a list of key names that can possibly contain sensitive data :param key_name: a key name to check :param value: a value to mask :return: an unchanged value if nothing to hide, ``'*' * len(str(value))`` otherwise
def anagrams_in_word(word, sowpods=False, start="", end=""): """Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word """ input_letters, blanks, questions = blank_tiles(word) for tile in start + end: input_letters.append(tile) for word in word_list(sowpods, start, end): lmap = _letter_map(input_letters) used_blanks = 0 for letter in word: if letter in lmap: lmap[letter] -= 1 if lmap[letter] < 0: used_blanks += 1 if used_blanks > (blanks + questions): break else: used_blanks += 1 if used_blanks > (blanks + questions): break else: yield (word, word_score(word, input_letters, questions))
Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word
def get_location_from_HDX_code(code, locations=None, configuration=None): # type: (str, Optional[List[Dict]], Optional[Configuration]) -> Optional[str] """Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name """ if locations is None: locations = Locations.validlocations(configuration) for locdict in locations: if code.upper() == locdict['name'].upper(): return locdict['title'] return None
Get location from HDX location code Args: code (str): code for which to get location name locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[str]: location name
def getmembers(self): """ :return: list of members as name, type tuples :rtype: list """ return filter( lambda m: not m[0].startswith("__") and not inspect.isfunction(m[1]) and not inspect.ismethod(m[1]), inspect.getmembers(self.__class__) )
:return: list of members as name, type tuples :rtype: list
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) vmlist = azurerm.list_vms_sub(access_token, subscription_id) print(json.dumps(vmlist, sort_keys=False, indent=2, separators=(',', ': '))) ''' for vm in vmlist['value']: count += 1 name = vm['name'] location = vm['location'] offer = vm['properties']['storageProfile']['imageReference']['offer'] sku = vm['properties']['storageProfile']['imageReference']['sku'] print(''.join([str(count), ': ', name, # ', RG: ', rgname, ', location: ', location, ', OS: ', offer, ' ', sku])) '''
Main routine.
def _handle_successful_job(self, job): """Handle successufl jobs""" result = job.result task_id = job.kwargs['task_id'] try: task = self.registry.get(task_id) except NotFoundError: logger.warning("Task %s not found; related job #%s will not be rescheduled", task_id, job.id) return if task.archiving_cfg and task.archiving_cfg.fetch_from_archive: logger.info("Job #%s (task: %s) successfully finished", job.id, task_id) return if result.nitems > 0: task.backend_args['next_from_date'] = unixtime_to_datetime(result.max_date) if result.offset: task.backend_args['next_offset'] = result.offset job_args = self._build_job_arguments(task) delay = task.scheduling_cfg.delay if task.scheduling_cfg else WAIT_FOR_QUEUING job_id = self._scheduler.schedule_job_task(Q_UPDATING_JOBS, task_id, job_args, delay=delay) logger.info("Job #%s (task: %s, old job: %s) re-scheduled", job_id, task_id, job.id)
Handle successufl jobs
def entries(self): """ Using the table structure, return the array of entries based on the table size. """ table = self.get_table() entries_array = self.row_structure * table.num_entries pointer_type = ctypes.POINTER(entries_array) return ctypes.cast(table.entries, pointer_type).contents
Using the table structure, return the array of entries based on the table size.
def _update_variables_shim_with_recalculation_table(self): """ Update self._variables_shim with the final values to be patched into the gate parameters, according to the arithmetic expressions in the original program. For example: DECLARE theta REAL DECLARE beta REAL RZ(3 * theta) 0 RZ(beta+theta) 0 gets translated to: DECLARE theta REAL DECLARE __P REAL[2] RZ(__P[0]) 0 RZ(__P[1]) 0 and the recalculation table will contain: { ParameterAref('__P', 0): Mul(3.0, <MemoryReference theta[0]>), ParameterAref('__P', 1): Add(<MemoryReference beta[0]>, <MemoryReference theta[0]>) } Let's say we've made the following two function calls: qpu.write_memory(region_name='theta', value=0.5) qpu.write_memory(region_name='beta', value=0.1) After executing this function, our self.variables_shim in the above example would contain the following: { ParameterAref('theta', 0): 0.5, ParameterAref('beta', 0): 0.1, ParameterAref('__P', 0): 1.5, # (3.0) * theta[0] ParameterAref('__P', 1): 0.6 # beta[0] + theta[0] } Once the _variables_shim is filled, execution continues as with regular binary patching. """ if not hasattr(self._executable, "recalculation_table"): # No recalculation table, no work to be done here. return for memory_reference, expression in self._executable.recalculation_table.items(): # Replace the user-declared memory references with any values the user has written, # coerced to a float because that is how we declared it. self._variables_shim[memory_reference] = float(self._resolve_memory_references(expression))
Update self._variables_shim with the final values to be patched into the gate parameters, according to the arithmetic expressions in the original program. For example: DECLARE theta REAL DECLARE beta REAL RZ(3 * theta) 0 RZ(beta+theta) 0 gets translated to: DECLARE theta REAL DECLARE __P REAL[2] RZ(__P[0]) 0 RZ(__P[1]) 0 and the recalculation table will contain: { ParameterAref('__P', 0): Mul(3.0, <MemoryReference theta[0]>), ParameterAref('__P', 1): Add(<MemoryReference beta[0]>, <MemoryReference theta[0]>) } Let's say we've made the following two function calls: qpu.write_memory(region_name='theta', value=0.5) qpu.write_memory(region_name='beta', value=0.1) After executing this function, our self.variables_shim in the above example would contain the following: { ParameterAref('theta', 0): 0.5, ParameterAref('beta', 0): 0.1, ParameterAref('__P', 0): 1.5, # (3.0) * theta[0] ParameterAref('__P', 1): 0.6 # beta[0] + theta[0] } Once the _variables_shim is filled, execution continues as with regular binary patching.
def rpm_eval(macro): """Get value of given macro using rpm tool""" try: value = subprocess.Popen( ['rpm', '--eval', macro], stdout=subprocess.PIPE).communicate()[0].strip() except OSError: logger.error('Failed to get value of {0} rpm macro'.format( macro), exc_info=True) value = b'' return console_to_str(value)
Get value of given macro using rpm tool
def bin(self): """The bin index of this mark. :returns: An integer bin index or None if the mark is inactive. """ bin = self._query(('MBIN?', Integer, Integer), self.idx) return None if bin == -1 else bin
The bin index of this mark. :returns: An integer bin index or None if the mark is inactive.
def cmd_wp_undo(self): '''handle wp undo''' if self.undo_wp_idx == -1 or self.undo_wp is None: print("No undo information") return wp = self.undo_wp if self.undo_type == 'move': wp.target_system = self.target_system wp.target_component = self.target_component self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.mav.mission_write_partial_list_send(self.target_system, self.target_component, self.undo_wp_idx, self.undo_wp_idx) self.wploader.set(wp, self.undo_wp_idx) print("Undid WP move") elif self.undo_type == 'remove': self.wploader.insert(self.undo_wp_idx, wp) self.fix_jumps(self.undo_wp_idx, 1) self.send_all_waypoints() print("Undid WP remove") else: print("bad undo type") self.undo_wp = None self.undo_wp_idx = -1
handle wp undo
def view(self, photo, options=None, **kwds): """ Endpoint: /photo/<id>[/<options>]/view.json Requests all properties of a photo. Can be used to obtain URLs for the photo at a particular size, by using the "returnSizes" parameter. Returns the requested photo object. The options parameter can be used to pass in additional options. Eg: options={"token": <token_data>} """ option_string = self._build_option_string(options) result = self._client.get("/photo/%s%s/view.json" % (self._extract_id(photo), option_string), **kwds)["result"] return Photo(self._client, result)
Endpoint: /photo/<id>[/<options>]/view.json Requests all properties of a photo. Can be used to obtain URLs for the photo at a particular size, by using the "returnSizes" parameter. Returns the requested photo object. The options parameter can be used to pass in additional options. Eg: options={"token": <token_data>}
def const_rand(size, seed=23980): """ Generate a random array with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) out = np.random.rand(size) np.random.seed(old_seed) return out
Generate a random array with a fixed seed.
def convert(self, string, preprocess = None): """ Swap characters from a script to transliteration and vice versa. Optionally sanitize string by using preprocess function. :param preprocess: :param string: :return: """ string = unicode(preprocess(string) if preprocess else string, encoding="utf-8") if self.regex: return self.regex.sub(lambda x: self.substitutes[x.group()], string).encode('utf-8') else: return string
Swap characters from a script to transliteration and vice versa. Optionally sanitize string by using preprocess function. :param preprocess: :param string: :return:
def authenticate(self, provider): """ Starts OAuth authorization flow, will redirect to 3rd party site. """ callback_url = url_for(".callback", provider=provider, _external=True) provider = self.get_provider(provider) session['next'] = request.args.get('next') or '' return provider.authorize(callback_url)
Starts OAuth authorization flow, will redirect to 3rd party site.
def parse_response(self, byte_stream, response_class): '''Parses a Hadoop RPC response. The RpcResponseHeaderProto contains a status field that marks SUCCESS or ERROR. The Hadoop RPC protocol looks like the diagram below for receiving SUCCESS requests. +-----------------------------------------------------------+ | Length of the RPC resonse (4 bytes/32 bit int) | +-----------------------------------------------------------+ | Delimited serialized RpcResponseHeaderProto | +-----------------------------------------------------------+ | Serialized delimited RPC response | +-----------------------------------------------------------+ In case of an error, the header status is set to ERROR and the error fields are set. ''' log.debug("############## PARSING ##############") log.debug("Payload class: %s" % response_class) # Read first 4 bytes to get the total length len_bytes = byte_stream.read(4) total_length = struct.unpack("!I", len_bytes)[0] log.debug("Total response length: %s" % total_length) header = RpcResponseHeaderProto() (header_len, header_bytes) = get_delimited_message_bytes(byte_stream) log.debug("Header read %d" % header_len) header.ParseFromString(header_bytes) log_protobuf_message("RpcResponseHeaderProto", header) if header.status == 0: log.debug("header: %s, total: %s" % (header_len, total_length)) if header_len >= total_length: return response = response_class() response_bytes = get_delimited_message_bytes(byte_stream, total_length - header_len)[1] if len(response_bytes) > 0: response.ParseFromString(response_bytes) if log.getEffectiveLevel() == logging.DEBUG: log_protobuf_message("Response", response) return response else: self.handle_error(header)
Parses a Hadoop RPC response. The RpcResponseHeaderProto contains a status field that marks SUCCESS or ERROR. The Hadoop RPC protocol looks like the diagram below for receiving SUCCESS requests. +-----------------------------------------------------------+ | Length of the RPC resonse (4 bytes/32 bit int) | +-----------------------------------------------------------+ | Delimited serialized RpcResponseHeaderProto | +-----------------------------------------------------------+ | Serialized delimited RPC response | +-----------------------------------------------------------+ In case of an error, the header status is set to ERROR and the error fields are set.
def d2Sbr_dV2(self, Cbr, Ybr, V, lam): """ Based on d2Sbr_dV2.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @rtype: tuple @return: The 2nd derivatives of complex power flow w.r.t. voltage. """ nb = len(V) nl = len(lam) ib = range(nb) il = range(nl) diaglam = csr_matrix((lam, (il, il))) diagV = csr_matrix((V, (ib, ib))) A = Ybr.H * diaglam * Cbr B = conj(diagV) * A * diagV D = csr_matrix( ((A * V) * conj(V), (ib, ib)) ) E = csr_matrix( ((A.T * conj(V) * V), (ib, ib)) ) F = B + B.T G = csr_matrix((ones(nb) / abs(V), (ib, ib))) Haa = F - D - E Hva = 1j * G * (B - B.T - D + E) Hav = Hva.T Hvv = G * F * G return Haa, Hav, Hva, Hvv
Based on d2Sbr_dV2.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @rtype: tuple @return: The 2nd derivatives of complex power flow w.r.t. voltage.
def _deserialize(self, stream): """:param from_rev_list: if true, the stream format is coming from the rev-list command Otherwise it is assumed to be a plain data stream from our object""" readline = stream.readline self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '') self.parents = [] next_line = None while True: parent_line = readline() if not parent_line.startswith(b'parent'): next_line = parent_line break # END abort reading parents self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii')))) # END for each parent line self.parents = tuple(self.parents) # we don't know actual author encoding before we have parsed it, so keep the lines around author_line = next_line committer_line = readline() # we might run into one or more mergetag blocks, skip those for now next_line = readline() while next_line.startswith(b'mergetag '): next_line = readline() while next_line.startswith(b' '): next_line = readline() # end skip mergetags # now we can have the encoding line, or an empty line followed by the optional # message. self.encoding = self.default_encoding self.gpgsig = None # read headers enc = next_line buf = enc.strip() while buf: if buf[0:10] == b"encoding ": self.encoding = buf[buf.find(' ') + 1:].decode('ascii') elif buf[0:7] == b"gpgsig ": sig = buf[buf.find(b' ') + 1:] + b"\n" is_next_header = False while True: sigbuf = readline() if not sigbuf: break if sigbuf[0:1] != b" ": buf = sigbuf.strip() is_next_header = True break sig += sigbuf[1:] # end read all signature self.gpgsig = sig.rstrip(b"\n").decode('ascii') if is_next_header: continue buf = readline().strip() # decode the authors name try: self.author, self.authored_date, self.author_tz_offset = \ parse_actor_and_date(author_line.decode(self.encoding, 'replace')) except UnicodeDecodeError: log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding, exc_info=True) try: self.committer, self.committed_date, self.committer_tz_offset = \ parse_actor_and_date(committer_line.decode(self.encoding, 'replace')) except UnicodeDecodeError: log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding, exc_info=True) # END handle author's encoding # a stream from our data simply gives us the plain message # The end of our message stream is marked with a newline that we strip self.message = stream.read() try: self.message = self.message.decode(self.encoding, 'replace') except UnicodeDecodeError: log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True) # END exception handling return self
:param from_rev_list: if true, the stream format is coming from the rev-list command Otherwise it is assumed to be a plain data stream from our object
def rules(self): """ Returns the rules for this password based on the configured options. :return: <str> """ rules = ['Passwords need to be at least {0} characters long'.format(self.__minlength)] if self.__requireUppercase: rules.append('have at least one uppercase letter') if self.__requireLowercase: rules.append('have at least one lowercase letter') if self.__requireNumber: rules.append('have at least one number') if self.__requireWildcard: rules.append('have at least one non alpha-number character') if len(rules) == 1: return rules[0] else: return ', '.join(rules[:-1]) + ' and ' + rules[-1]
Returns the rules for this password based on the configured options. :return: <str>
def find_file(search_dir, file_pattern): """ Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string """ for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string
def disconnect(self, cback): "See signal" return self.signal.disconnect(cback, subscribers=self.subscribers, instance=self.instance)
See signal
def from_bytes(cls, xbytes: bytes) -> 'BlsEntity': """ Creates and Bls entity from bytes representation. :param xbytes: Bytes representation of Bls entity :return: BLS entity intance """ logger = logging.getLogger(__name__) logger.debug("BlsEntity::from_bytes: >>>") c_instance = c_void_p() do_call(cls.from_bytes_handler, xbytes, len(xbytes), byref(c_instance)) res = cls(c_instance) logger.debug("BlsEntity::from_bytes: <<< res: %r", res) return res
Creates and Bls entity from bytes representation. :param xbytes: Bytes representation of Bls entity :return: BLS entity intance
def is_a_string(var, allow_none=False): """ Returns True if var is a string (ascii or unicode) Result py-2 py-3 ----------------- ----- ----- b'bytes literal' True False 'string literal' True True u'unicode literal' True True Also returns True if the var is a numpy string (numpy.string_, numpy.unicode_). """ return isinstance(var, six.string_types) or (var is None and allow_none)
Returns True if var is a string (ascii or unicode) Result py-2 py-3 ----------------- ----- ----- b'bytes literal' True False 'string literal' True True u'unicode literal' True True Also returns True if the var is a numpy string (numpy.string_, numpy.unicode_).
def toposort(data): """Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets. """ # Special case empty input. if len(data) == 0: return # Copy the input so as to leave it unmodified. data = data.copy() # Ignore self dependencies. for k, v in data.items(): v.discard(k) # Find all items that don't depend on anything. extra_items_in_deps = functools.reduce( set.union, data.values() ) - set(data.keys()) # Add empty dependences where needed. data.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = dict( (item, (dep - ordered)) for item, dep in data.items() if item not in ordered ) if len(data) != 0: raise ValueError( 'Cyclic dependencies exist among these items: {}' .format(', '.join(repr(x) for x in data.items())) )
Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets.
def no_type_check(arg): """Decorator to indicate that annotations are not type hints. The argument must be a class or function; if it is a class, it applies recursively to all methods and classes defined in that class (but not to methods defined in its superclasses or subclasses). This mutates the function(s) or class(es) in place. """ if isinstance(arg, type): arg_attrs = arg.__dict__.copy() for attr, val in arg.__dict__.items(): if val in arg.__bases__ + (arg,): arg_attrs.pop(attr) for obj in arg_attrs.values(): if isinstance(obj, types.FunctionType): obj.__no_type_check__ = True if isinstance(obj, type): no_type_check(obj) try: arg.__no_type_check__ = True except TypeError: # built-in classes pass return arg
Decorator to indicate that annotations are not type hints. The argument must be a class or function; if it is a class, it applies recursively to all methods and classes defined in that class (but not to methods defined in its superclasses or subclasses). This mutates the function(s) or class(es) in place.
def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
makedoedict
def est_credible_region(self, level=0.95, return_outside=False, modelparam_slice=None): """ Returns an array containing particles inside a credible region of a given level, such that the described region has probability mass no less than the desired level. Particles in the returned region are selected by including the highest- weight particles first until the desired credibility level is reached. :param float level: Crediblity level to report. :param bool return_outside: If `True`, the return value is a tuple of the those particles within the credible region, and the rest of the posterior particle cloud. :param slice modelparam_slice: Slice over which model parameters to consider. :rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``, where ``n_credible`` is the number of particles in the credible region and ``n_mps`` corresponds to the size of ``modelparam_slice``. If ``return_outside`` is ``True``, this method instead returns tuple ``(inside, outside)`` where ``inside`` is as described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``. :return: An array of particles inside the estimated credible region. Or, if ``return_outside`` is ``True``, both the particles inside and the particles outside, as a tuple. """ # which slice of modelparams to take s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:] mps = self.particle_locations[:, s_] # Start by sorting the particles by weight. # We do so by obtaining an array of indices `id_sort` such that # `particle_weights[id_sort]` is in descending order. id_sort = np.argsort(self.particle_weights)[::-1] # Find the cummulative sum of the sorted weights. cumsum_weights = np.cumsum(self.particle_weights[id_sort]) # Find all the indices where the sum is less than level. # We first find id_cred such that # `all(cumsum_weights[id_cred] <= level)`. id_cred = cumsum_weights <= level # By construction, by adding the next particle to id_cred, it must be # true that `cumsum_weights[id_cred] >= level`, as required. id_cred[np.sum(id_cred)] = True # We now return a slice onto the particle_locations by first permuting # the particles according to the sort order, then by selecting the # credible particles. if return_outside: return ( mps[id_sort][id_cred], mps[id_sort][np.logical_not(id_cred)] ) else: return mps[id_sort][id_cred]
Returns an array containing particles inside a credible region of a given level, such that the described region has probability mass no less than the desired level. Particles in the returned region are selected by including the highest- weight particles first until the desired credibility level is reached. :param float level: Crediblity level to report. :param bool return_outside: If `True`, the return value is a tuple of the those particles within the credible region, and the rest of the posterior particle cloud. :param slice modelparam_slice: Slice over which model parameters to consider. :rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``, where ``n_credible`` is the number of particles in the credible region and ``n_mps`` corresponds to the size of ``modelparam_slice``. If ``return_outside`` is ``True``, this method instead returns tuple ``(inside, outside)`` where ``inside`` is as described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``. :return: An array of particles inside the estimated credible region. Or, if ``return_outside`` is ``True``, both the particles inside and the particles outside, as a tuple.
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: DataRangeFileEntry: a file entry or None if not available. """ return data_range_file_entry.DataRangeFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: DataRangeFileEntry: a file entry or None if not available.
def stop_data_fetch(self): """Stops the thread that fetches data from the Streams view server. """ if self._data_fetcher: self._data_fetcher.stop.set() self._data_fetcher = None
Stops the thread that fetches data from the Streams view server.
def tempdir(*args, **kwargs): """A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass """ d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d)
A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass
def receive(self, data): """receive(data) -> List of decoded messages. Processes :obj:`data`, which must be a bytes-like object, and returns a (possibly empty) list with :class:`bytes` objects, each containing a decoded message. Any non-terminated SLIP packets in :obj:`data` are buffered, and processed with the next call to :meth:`receive`. :param bytes data: The bytes-like object to be processed. An empty :obj:`data` parameter forces the internal buffer to be flushed and decoded. :return: A (possibly empty) list of decoded messages. :rtype: list(bytes) :raises ProtocolError: An invalid byte sequence has been detected. """ # Empty data indicates that the data reception is complete. # To force a buffer flush, an END byte is added, so that the # current contents of _recv_buffer will form a complete message. if not data: data = END self._recv_buffer += data # The following situations can occur: # # 1) _recv_buffer is empty or contains only END bytes --> no packets available # 2) _recv_buffer contains non-END bytes --> packets are available # # Strip leading END bytes from _recv_buffer to avoid handling empty _packets. self._recv_buffer = self._recv_buffer.lstrip(END) if self._recv_buffer: # The _recv_buffer contains non-END bytes. # It is now split on sequences of one or more END bytes. # The trailing element from the split operation is a possibly incomplete # packet; this element is therefore used as the new _recv_buffer. # If _recv_buffer contains one or more trailing END bytes, # (meaning that there are no incomplete packets), then the last element, # and therefore the new _recv_buffer, is an empty bytes object. self._packets.extend(re.split(END + b'+', self._recv_buffer)) self._recv_buffer = self._packets.pop() # Process the buffered packets return self.flush()
receive(data) -> List of decoded messages. Processes :obj:`data`, which must be a bytes-like object, and returns a (possibly empty) list with :class:`bytes` objects, each containing a decoded message. Any non-terminated SLIP packets in :obj:`data` are buffered, and processed with the next call to :meth:`receive`. :param bytes data: The bytes-like object to be processed. An empty :obj:`data` parameter forces the internal buffer to be flushed and decoded. :return: A (possibly empty) list of decoded messages. :rtype: list(bytes) :raises ProtocolError: An invalid byte sequence has been detected.
def get_secret(self, filename, secret, type_=None): """Checks to see whether a secret is found in the collection. :type filename: str :param filename: the file to search in. :type secret: str :param secret: secret hash of secret to search for. :type type_: str :param type_: type of secret, if known. :rtype: PotentialSecret|None """ if filename not in self.data: return None if type_: # Optimized lookup, because we know the type of secret # (and therefore, its hash) tmp_secret = PotentialSecret(type_, filename, secret='will be overriden') tmp_secret.secret_hash = secret if tmp_secret in self.data[filename]: return self.data[filename][tmp_secret] return None # NOTE: We can only optimize this, if we knew the type of secret. # Otherwise, we need to iterate through the set and find out. for obj in self.data[filename]: if obj.secret_hash == secret: return obj return None
Checks to see whether a secret is found in the collection. :type filename: str :param filename: the file to search in. :type secret: str :param secret: secret hash of secret to search for. :type type_: str :param type_: type of secret, if known. :rtype: PotentialSecret|None
def requestMapIdentity(self, subject, vendorSpecific=None): """See Also: requestMapIdentityResponse() Args: subject: vendorSpecific: Returns: """ response = self.requestMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
See Also: requestMapIdentityResponse() Args: subject: vendorSpecific: Returns:
def get_scores(self, *args): ''' In this case, args aren't used, since this information is taken directly from the corpus categories. Returns ------- np.array, scores ''' if self.tdf_ is None: raise Exception("Use set_category_name('category name', ['not category name', ...]) " + "to set the category of interest") avgdl = self.tdf_.sum(axis=0).mean() def idf(cat): # Number of categories with term n_q = (self.tdf_ > 0).astype(int).max(axis=1).sum() N = len(self.tdf_) return (N - n_q + 0.5) / (n_q + 0.5) def length_adjusted_tf(cat): tf = self.tdf_[cat] dl = self.tdf_[cat].sum() return ((tf * (self.k1 + 1)) / (tf + self.k1 * (1 - self.b + self.b * (dl / avgdl)))) def bm25_score(cat): return - length_adjusted_tf(cat) * np.log(idf(cat)) scores = bm25_score('cat') - bm25_score('ncat') return scores
In this case, args aren't used, since this information is taken directly from the corpus categories. Returns ------- np.array, scores
def link(self, link, title, text): """Rendering a given link with content and title. :param link: href link for ``<a>`` tag. :param title: title content for `title` attribute. :param text: text content for description. """ if self.anonymous_references: underscore = '__' else: underscore = '_' if title: return self._raw_html( '<a href="{link}" title="{title}">{text}</a>'.format( link=link, title=title, text=text ) ) if not self.parse_relative_links: return '\ `{text} <{target}>`{underscore}\ '.format( target=link, text=text, underscore=underscore ) else: url_info = urlparse(link) if url_info.scheme: return '\ `{text} <{target}>`{underscore}\ '.format( target=link, text=text, underscore=underscore ) else: link_type = 'doc' anchor = url_info.fragment if url_info.fragment: if url_info.path: # Can't link to anchors via doc directive. anchor = '' else: # Example: [text](#anchor) link_type = 'ref' doc_link = '{doc_name}{anchor}'.format( # splittext approach works whether or not path is set. It # will return an empty string if unset, which leads to # anchor only ref. doc_name=os.path.splitext(url_info.path)[0], anchor=anchor ) return '\ :{link_type}:`{text} <{doc_link}>`\ '.format( link_type=link_type, doc_link=doc_link, text=text )
Rendering a given link with content and title. :param link: href link for ``<a>`` tag. :param title: title content for `title` attribute. :param text: text content for description.
def _include_environment_variables(self, program, executor_vars): """Define environment variables.""" env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) env_vars.update(set_env) export_commands = ['export {}={}'.format(key, shlex.quote(value)) for key, value in env_vars.items()] return os.linesep.join(export_commands) + os.linesep + program
Define environment variables.