code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def register_job_from_link(self, link, key, **kwargs): """Register a job in the `JobArchive` from a `Link` object """ job_config = kwargs.get('job_config', None) if job_config is None: job_config = link.args status = kwargs.get('status', JobStatus.unknown) job_details = JobDetails(jobname=link.linkname, jobkey=key, appname=link.appname, logfile=kwargs.get('logfile'), jobconfig=job_config, timestamp=get_timestamp(), file_dict=copy.deepcopy(link.files), sub_file_dict=copy.deepcopy(link.sub_files), status=status) self.register_job(job_details) return job_details
Register a job in the `JobArchive` from a `Link` object
def peek_64(library, session, address): """Read an 64-bit value from the specified address. Corresponds to viPeek64 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode` """ value_64 = ViUInt64() ret = library.viPeek64(session, address, byref(value_64)) return value_64.value, ret
Read an 64-bit value from the specified address. Corresponds to viPeek64 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
def add_input_opt(self, opt, inp): """ Add an option that determines an input """ self.add_opt(opt, inp._dax_repr()) self._add_input(inp)
Add an option that determines an input
def parse_options(self, arg): """ Parse options with the argv :param arg: one arg from argv """ if not arg.startswith('-'): return False value = None if '=' in arg: arg, value = arg.split('=') for option in self._option_list: if arg not in (option.shortname, option.longname): continue action = option.action if action: action() if option.key == option.shortname: self._results[option.key] = True return True if option.boolean and option.default: self._results[option.key] = False return True if option.boolean: self._results[option.key] = True return True # has tag, it should has value if not value: if self._argv: value = self._argv[0] self._argv = self._argv[1:] if not value: raise RuntimeError('Missing value for: %s' % option.name) self._results[option.key] = option.to_python(value) return True return False
Parse options with the argv :param arg: one arg from argv
def last(self): """Returns the last element accessed via next() or prev(). Returns the first element of range() if neither of these was called.""" if len(self._range) == 0: raise IndexError("range is empty") if self._idx == -1: return self._range[0] return self._get(self._idx)
Returns the last element accessed via next() or prev(). Returns the first element of range() if neither of these was called.
def _clear_entity_type_registry(entity, **kwargs): """Clear the given database/collection object's type registry.""" codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs)
Clear the given database/collection object's type registry.
def get_any_node(self, addr): """ Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want. """ for n in self.graph.nodes(): if n.addr == addr: return n
Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want.
def remove_release(self, username, package_name, version): ''' remove a release and all files under it :param username: the login of the package owner :param package_name: the name of the package :param version: the name of the package ''' url = '%s/release/%s/%s/%s' % (self.domain, username, package_name, version) res = self.session.delete(url) self._check_response(res, [201]) return
remove a release and all files under it :param username: the login of the package owner :param package_name: the name of the package :param version: the name of the package
def get(self, list_id, merge_id): """ Get information about a specific merge field in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param merge_id: The id for the merge field. :type merge_id: :py:class:`str` """ self.list_id = list_id self.merge_id = merge_id return self._mc_client._get(url=self._build_path(list_id, 'merge-fields', merge_id))
Get information about a specific merge field in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param merge_id: The id for the merge field. :type merge_id: :py:class:`str`
def list(self, log_level=values.unset, start_date=values.unset, end_date=values.unset, limit=None, page_size=None): """ Lists AlertInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode log_level: Only show alerts for this log-level. :param date start_date: Only show Alerts on or after this date. :param date end_date: Only show Alerts on or before this date. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.monitor.v1.alert.AlertInstance] """ return list(self.stream( log_level=log_level, start_date=start_date, end_date=end_date, limit=limit, page_size=page_size, ))
Lists AlertInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode log_level: Only show alerts for this log-level. :param date start_date: Only show Alerts on or after this date. :param date end_date: Only show Alerts on or before this date. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.monitor.v1.alert.AlertInstance]
def _check_reset_and_type_change(self, name, orig_ctr): """Check if name is in orig_ctr or in one of the other type containers.""" # Resetting a hyperparameter if name in orig_ctr: tf.logging.warning("Overwriting hparam %s", name) ctr_names = [ (self._categorical_params, "categorical"), (self._discrete_params, "discrete"), (self._float_params, "float"), (self._int_params, "int"), ] ctrs, names = list(zip(*ctr_names)) orig_name = names[ctrs.index(orig_ctr)] for ctr, ctr_name in ctr_names: if ctr is orig_ctr: continue # Using a different type for the same hyperparameter name if name in ctr: raise ValueError("Setting hyperparameter %s as type %s, but a " "hyperparemeter of the same name was originally " "registered as type %s" % (name, ctr_name, orig_name))
Check if name is in orig_ctr or in one of the other type containers.
def _parametersToDefaults(self, parameters): """ Extract the defaults from C{parameters}, constructing a dictionary mapping parameter names to default values, suitable for passing to L{ListChangeParameter}. @type parameters: C{list} of L{liveform.Parameter} or L{liveform.ChoiceParameter}. @rtype: C{dict} """ defaults = {} for p in parameters: if isinstance(p, liveform.ChoiceParameter): selected = [] for choice in p.choices: if choice.selected: selected.append(choice.value) defaults[p.name] = selected else: defaults[p.name] = p.default return defaults
Extract the defaults from C{parameters}, constructing a dictionary mapping parameter names to default values, suitable for passing to L{ListChangeParameter}. @type parameters: C{list} of L{liveform.Parameter} or L{liveform.ChoiceParameter}. @rtype: C{dict}
def _parse_permission(self, obj): """ 从 obj 中取出权限 :param obj: :return: [A.QUERY, A.WRITE, ...] """ if isinstance(obj, str): if obj == '*': return A.ALL elif obj in A.ALL: return obj, else: logger.warning('Invalid permission action: %s', obj) elif isinstance(obj, (list, tuple)): for i in obj: if i not in A.ALL: logger.warning('Invalid permission action: %s', i) return obj elif isinstance(obj, dict): return self._parse_permission(obj.get('*'))
从 obj 中取出权限 :param obj: :return: [A.QUERY, A.WRITE, ...]
def clear_from(self, timestamp): """Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size offset, remainder = timestamp // block_size, timestamp % block_size if remainder: raise ValueError('Timestamp must be on a block boundary') self.driver.clear_from(offset, timestamp)
Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary
def _init_default_values(self): """Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab. """ # Do not filter out invalid event values self["filtering"]["remove invalid events"] = False # Enable filters switch is mandatory self["filtering"]["enable filters"] = True # Limit events integer to downsample output data self["filtering"]["limit events"] = 0 # Polygon filter list self["filtering"]["polygon filters"] = [] # Defaults to no hierarchy parent self["filtering"]["hierarchy parent"] = "none" # Check for missing min/max values and set them to zero for item in dfn.scalar_feature_names: appends = [" min", " max"] for a in appends: self["filtering"][item + a] = 0
Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab.
def fs_cleansed_attachments(self): """ returns a list of absolute paths to the cleansed attachements""" if exists(self.fs_cleansed_attachment_container): return [join(self.fs_cleansed_attachment_container, attachment) for attachment in listdir(self.fs_cleansed_attachment_container)] else: return []
returns a list of absolute paths to the cleansed attachements
def average_last_builds(connection, package, limit=5): """ Find the average duration time for the last couple of builds. :param connection: txkoji.Connection :param package: package name :returns: deferred that when fired returns a datetime.timedelta object, or None if there were no previous builds for this package. """ # TODO: take branches (targets, or tags, etc) into account when estimating # a package's build time. state = build_states.COMPLETE opts = {'limit': limit, 'order': '-completion_time'} builds = yield connection.listBuilds(package, state=state, queryOpts=opts) if not builds: defer.returnValue(None) durations = [build.duration for build in builds] average = sum(durations, timedelta()) / len(durations) # print('average duration for %s is %s' % (package, average)) defer.returnValue(average)
Find the average duration time for the last couple of builds. :param connection: txkoji.Connection :param package: package name :returns: deferred that when fired returns a datetime.timedelta object, or None if there were no previous builds for this package.
def _PrintTasksStatus(self, processing_status): """Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status. """ if processing_status and processing_status.tasks_status: tasks_status = processing_status.tasks_status table_view = views.CLITabularTableView( column_names=['Tasks:', 'Queued', 'Processing', 'Merging', 'Abandoned', 'Total'], column_sizes=[15, 7, 15, 15, 15, 0]) table_view.AddRow([ '', tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status.
def saveVizGithub(contents, ontouri): """ DEPRECATED on 2016-11-16 Was working but had a dependecies on package 'uritemplate.py' which caused problems at installation time """ title = "Ontospy: ontology export" readme = """This ontology documentation was automatically generated with Ontospy (https://github.com/lambdamusic/Ontospy). The graph URI is: %s""" % str(ontouri) files = { 'index.html': { 'content': contents }, 'README.txt': { 'content': readme }, 'LICENSE.txt': { 'content': """The MIT License (MIT) Copyright (c) 2016 Ontospy project [http://lambdamusic.github.io/Ontospy/] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" } } urls = save_anonymous_gist(title, files) return urls
DEPRECATED on 2016-11-16 Was working but had a dependecies on package 'uritemplate.py' which caused problems at installation time
def update(self, other): """Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence. # Parameters _other_ : `Grant` > Another `Grant` of the same type as _self_ """ if type(self) != type(other): return NotImplemented else: if other.bad: self.error = other.error self.bad = True self._fieldDict.update(other._fieldDict)
Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence. # Parameters _other_ : `Grant` > Another `Grant` of the same type as _self_
def load_ply(fileobj): """Same as load_ply, but takes a file-like object""" def nextline(): """Read next line, skip comments""" while True: line = fileobj.readline() assert line != '' # eof if not line.startswith('comment'): return line.strip() assert nextline() == 'ply' assert nextline() == 'format ascii 1.0' line = nextline() assert line.startswith('element vertex') nverts = int(line.split()[2]) # print 'nverts : ', nverts assert nextline() == 'property float x' assert nextline() == 'property float y' assert nextline() == 'property float z' line = nextline() assert line.startswith('element face') nfaces = int(line.split()[2]) # print 'nfaces : ', nfaces assert nextline() == 'property list uchar int vertex_indices' line = nextline() has_texcoords = line == 'property list uchar float texcoord' if has_texcoords: assert nextline() == 'end_header' else: assert line == 'end_header' # Verts verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] # Faces faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) # faces_uv.append([float(v) for v in vals[5:]]) else: assert len(vals) == 4 return verts, faces, faces_uv
Same as load_ply, but takes a file-like object
def _create_results_summary(self): """ Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["params", "standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([isinstance(getattr(self, attr), pd.Series) for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.summary = pd.concat((self.params, self.standard_errors, self.tvalues, self.pvalues, self.robust_std_errs, self.robust_t_stats, self.robust_p_vals), axis=1) return None
Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None.
def call_handlers(self, msg): """ Reimplemented to emit signals instead of making callbacks. """ # Emit the generic signal. self.message_received.emit(msg) # Emit signals for specialized message types. msg_type = msg['header']['msg_type'] if msg_type == 'input_request': self.input_requested.emit(msg)
Reimplemented to emit signals instead of making callbacks.
def decode(self, binSequence): """decodes a binary sequence to return a string""" try: binSeq = iter(binSequence[0]) except TypeError, te: binSeq = binSequence ret = '' for b in binSeq : ch = '' for c in self.charToBin : if b & self.forma[self.charToBin[c]] > 0 : ch += c +'/' if ch == '' : raise KeyError('Key %d unkowom, bad format' % b) ret += ch[:-1] return ret
decodes a binary sequence to return a string
def not_empty(value, allow_empty = False, **kwargs): """Validate that ``value`` is not empty. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` """ if not value and allow_empty: return None elif not value: raise errors.EmptyValueError('value was empty') return value
Validate that ``value`` is not empty. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
def get_media_detail_input_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_media_detail = ET.Element("get_media_detail") config = get_media_detail input = ET.SubElement(get_media_detail, "input") interface_name = ET.SubElement(input, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _on_message(self, socket, message): """ Called aways when a message arrives. """ data = json.loads(message) message_type = None identifier = None subscription = None if 'type' in data: message_type = data['type'] if 'identifier' in data: identifier = json.loads(data['identifier']) if identifier is not None: subscription = self.find_subscription(identifier) if subscription is not None: subscription.received(data) elif message_type == 'welcome': self.logger.debug('Welcome message received.') for subscription in self.subscriptions.values(): if subscription.state == 'connection_pending': subscription.create() elif message_type == 'ping': if self.log_ping: self.logger.debug('Ping received.') else: self.logger.warning('Message not supported. (Message: {})'.format(message))
Called aways when a message arrives.
def count(self): """ :returns: The total number of elements in the reference list. :rtype: int """ references = self.conn.client.get(self.refcount_key) if references is None: return 0 return int(references)
:returns: The total number of elements in the reference list. :rtype: int
def update_job_by_id(user, job_id): """Update a job """ # get If-Match header if_match_etag = utils.check_and_get_etag(flask.request.headers) # get the diverse parameters values = schemas.job.put(flask.request.json) job = v1_utils.verify_existence_and_get(job_id, _TABLE) job = dict(job) if not user.is_in_team(job['team_id']): raise dci_exc.Unauthorized() # Update jobstate if needed status = values.get('status') if status and job.get('status') != status: jobstates.insert_jobstate(user, { 'status': status, 'job_id': job_id }) if status in models.FINAL_STATUSES: jobs_events.create_event(job_id, status, job['topic_id']) where_clause = sql.and_(_TABLE.c.etag == if_match_etag, _TABLE.c.id == job_id) values['etag'] = utils.gen_etag() query = _TABLE.update().returning(*_TABLE.columns).\ where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict('Job', job_id) return flask.Response( json.dumps({'job': result.fetchone()}), 200, headers={'ETag': values['etag']}, content_type='application/json' )
Update a job
def DeleteGRRUser(self, username): """Deletes the user and all related metadata with the given username.""" try: del self.approvals_by_username[username] except KeyError: pass # No approvals to delete for this user. for approvals in itervalues(self.approvals_by_username): for approval in itervalues(approvals): grants = [g for g in approval.grants if g.grantor_username != username] if len(grants) != len(approval.grants): approval.grants = grants try: del self.notifications_by_username[username] except KeyError: pass # No notifications to delete for this user. try: del self.users[username] except KeyError: raise db.UnknownGRRUserError(username)
Deletes the user and all related metadata with the given username.
def sanitize_filename(filename): """preserve the file ending, but replace the name with a random token """ # TODO: fix broken splitext (it reveals everything of the filename after the first `.` - doh!) token = generate_drop_id() name, extension = splitext(filename) if extension: return '%s%s' % (token, extension) else: return token
preserve the file ending, but replace the name with a random token
def get_sizestr(img): """ Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256') """ n_x, n_y, n_slices = img.shape[:3] import numpy as np voxel_dims = np.array(img.header.get_zooms()[:3]) matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y)) voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims]) fov = [n_x, n_y] * voxel_dims[:2] fov = 'x'.join([num_to_str(s) for s in fov]) return n_slices, voxel_size, matrix_size, fov
Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256')
def decode(packet): """Decode a navdata packet.""" offset = 0 _ = struct.unpack_from('IIII', packet, offset) s = _[1] state = dict() state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received state['fw_file'] = s >> 7 & 1 # Firmware file is good (1) state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1) state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1) state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency data = dict() data['state'] = state data['header'] = _[0] data['sequence'] = _[2] data['vision'] = _[3] offset += struct.calcsize('IIII') demo_fields = [ 'ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames' ] angles = ['theta', 'phi', 'psi'] while True: try: id_nr, size = struct.unpack_from('HH', packet, offset) offset += struct.calcsize('HH') except struct.error: break values = [] for i in range(size - struct.calcsize('HH')): values.append(struct.unpack_from('c', packet, offset)[0]) offset += struct.calcsize('c') if id_nr == 0: values = struct.unpack_from('IIfffIfffI', b''.join(values)) demo = dict(zip(demo_fields, values)) for a in angles: demo[a] = int(demo[a] / 1000) data['demo'] = demo return data
Decode a navdata packet.
def value_dp_matrix(self): """ :return: DataProperty for table data. :rtype: list """ if self.__value_dp_matrix is None: self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( to_value_matrix(self.headers, self.rows) ) return self.__value_dp_matrix
:return: DataProperty for table data. :rtype: list
def get_rules(license): """Gets can, cannot and must rules from github license API""" can = [] cannot = [] must = [] req = requests.get("{base_url}/licenses/{license}".format( base_url=BASE_URL, license=license), headers=_HEADERS) if req.status_code == requests.codes.ok: data = req.json() can = data["permitted"] cannot = data["forbidden"] must = data["required"] return can, cannot, must
Gets can, cannot and must rules from github license API
def flush_incoming(self): """ Flush all incoming queues to the respective processing methods. The handlers are called as usual, thus it may require at least one iteration through the asyncio event loop before effects can be seen. The incoming queues are empty after a call to this method. It is legal (but pretty useless) to call this method while the stream is :attr:`running`. """ while True: try: stanza_obj = self._incoming_queue.get_nowait() except asyncio.QueueEmpty: break self._process_incoming(None, stanza_obj)
Flush all incoming queues to the respective processing methods. The handlers are called as usual, thus it may require at least one iteration through the asyncio event loop before effects can be seen. The incoming queues are empty after a call to this method. It is legal (but pretty useless) to call this method while the stream is :attr:`running`.
def _ensure_allow_rp(rp_pyxb): """Ensure that RP allows replication.""" if not rp_pyxb.replicationAllowed: rp_pyxb.replicationAllowed = True if not rp_pyxb.numberReplicas: rp_pyxb.numberReplicas = 3
Ensure that RP allows replication.
def XORPS(cpu, dest, src): """ Performs a bitwise logical OR operation on the source operand (second operand) and the destination operand (first operand) and stores the result in the destination operand. The source operand can be an MMX technology register or a 64-bit memory location or it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX technology register or an XMM register. Each bit of the result is set to 1 if either or both of the corresponding bits of the first and second operands are 1; otherwise, it is set to 0. """ res = dest.write(dest.read() ^ src.read())
Performs a bitwise logical OR operation on the source operand (second operand) and the destination operand (first operand) and stores the result in the destination operand. The source operand can be an MMX technology register or a 64-bit memory location or it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX technology register or an XMM register. Each bit of the result is set to 1 if either or both of the corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
def _validate_max_staleness(max_staleness): """Validate max_staleness.""" if max_staleness == -1: return -1 if not isinstance(max_staleness, integer_types): raise TypeError(_invalid_max_staleness_msg(max_staleness)) if max_staleness <= 0: raise ValueError(_invalid_max_staleness_msg(max_staleness)) return max_staleness
Validate max_staleness.
def appname(path=None): """ Return a useful application name based on the program argument. A special case maps 'mod_wsgi' to a more appropriate name so web applications show up as our own. """ if path is None: path = sys.argv[0] name = os.path.basename(os.path.splitext(path)[0]) if name == 'mod_wsgi': name = 'nvn_web' # pragma: no cover return name
Return a useful application name based on the program argument. A special case maps 'mod_wsgi' to a more appropriate name so web applications show up as our own.
def spatialimg_to_hdfgroup(h5group, spatial_img): """Saves a Nifti1Image into an HDF5 group. Parameters ---------- h5group: h5py Group Output HDF5 file path spatial_img: nibabel SpatialImage Image to be saved h5path: str HDF5 group path where the image data will be saved. Datasets will be created inside the given group path: 'data', 'extra', 'affine', the header information will be set as attributes of the 'data' dataset. """ try: h5group['data'] = spatial_img.get_data() h5group['affine'] = spatial_img.get_affine() if hasattr(h5group, 'get_extra'): h5group['extra'] = spatial_img.get_extra() hdr = spatial_img.get_header() for k in list(hdr.keys()): h5group['data'].attrs[k] = hdr[k] except ValueError as ve: raise Exception('Error creating group ' + h5group.name) from ve
Saves a Nifti1Image into an HDF5 group. Parameters ---------- h5group: h5py Group Output HDF5 file path spatial_img: nibabel SpatialImage Image to be saved h5path: str HDF5 group path where the image data will be saved. Datasets will be created inside the given group path: 'data', 'extra', 'affine', the header information will be set as attributes of the 'data' dataset.
def run(input_path=None, output_path=None, verbose=True, plot=True, hist_sheet=False): """ Run the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use "<input_path>_output". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information. """ # If input file has not been specified, show open file dialog if input_path is None: input_path = show_open_file_dialog(filetypes=[('Excel files', '*.xlsx')]) if not input_path: if verbose: print("No input file selected.") return # Extract directory, filename, and filename with no extension from path input_dir, input_filename = os.path.split(input_path) input_filename_no_ext, __ = os.path.splitext(input_filename) # Read relevant tables from workbook if verbose: print("Reading {}...".format(input_filename)) instruments_table = read_table(input_path, sheetname='Instruments', index_col='ID') beads_table = read_table(input_path, sheetname='Beads', index_col='ID') samples_table = read_table(input_path, sheetname='Samples', index_col='ID') # Process beads samples beads_samples, mef_transform_fxns, mef_outputs = process_beads_table( beads_table, instruments_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir='plot_beads', full_output=True) # Add stats to beads table if verbose: print("") print("Calculating statistics for beads...") add_beads_stats(beads_table, beads_samples, mef_outputs) # Process samples samples = process_samples_table( samples_table, instruments_table, mef_transform_fxns=mef_transform_fxns, beads_table=beads_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir='plot_samples') # Add stats to samples table if verbose: print("") print("Calculating statistics for all samples...") add_samples_stats(samples_table, samples) # Generate histograms if hist_sheet: if verbose: print("Generating histograms table...") histograms_table = generate_histograms_table(samples_table, samples) # Generate about table about_table = generate_about_table({'Input file path': input_path}) # Generate list of tables to save table_list = [] table_list.append(('Instruments', instruments_table)) table_list.append(('Beads', beads_table)) table_list.append(('Samples', samples_table)) if hist_sheet: table_list.append(('Histograms', histograms_table)) table_list.append(('About Analysis', about_table)) # Write output excel file if verbose: print("Saving output Excel file...") if output_path is None: output_filename = "{}_output.xlsx".format(input_filename_no_ext) output_path = os.path.join(input_dir, output_filename) write_workbook(output_path, table_list) if verbose: print("\nDone.")
Run the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use "<input_path>_output". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information.
def register_sigma_task(self, *args, **kwargs): """Register a sigma task.""" kwargs["task_class"] = SigmaTask return self.register_task(*args, **kwargs)
Register a sigma task.
def authorization_documents(self): """ :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList """ if self._authorization_documents is None: self._authorization_documents = AuthorizationDocumentList(self) return self._authorization_documents
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
def get_readonly_fields(self, request, obj=None): """ Makes `created_by`, `create_date` & `update_date` readonly when editing. Author: Himanshu Shankar (https://himanshus.com) """ # Get read only fields from super fields = list(super(CreateUpdateAdmin, self).get_readonly_fields( request=request, obj=obj)) # Loop over ownership info field for k, v in self.ownership_info['fields'].items(): # Check if model has k attribute # and field k is readonly # and k is not already in fields # and k is not in excluded field # (if not checked, form.Meta.exclude has same field twice) if (hasattr(self.model, k) and ('readonly' in v and v['readonly']) and k not in fields and (not self.exclude or (self.exclude and k not in self.exclude))): fields.append(k) return tuple(fields)
Makes `created_by`, `create_date` & `update_date` readonly when editing. Author: Himanshu Shankar (https://himanshus.com)
def property_present(properties, admin_username='root', admin_password='calvin', host=None, **kwargs): ''' properties = {} ''' ret = {'name': host, 'context': {'Host': host}, 'result': True, 'changes': {}, 'comment': ''} if host is None: output = __salt__['cmd.run_all']('ipmitool lan print') stdout = output['stdout'] reg = re.compile(r'\s*IP Address\s*:\s*(\d+.\d+.\d+.\d+)\s*') for line in stdout: result = reg.match(line) if result is not None: # we want group(1) as this is match in parentheses host = result.group(1) break if not host: ret['result'] = False ret['comment'] = 'Unknown host!' return ret properties_get = {} for key, value in properties.items(): response = __salt__['dracr.get_property'](host, admin_username, admin_password, key) if response is False or response['retcode'] != 0: ret['result'] = False ret['comment'] = 'Failed to get property from idrac' return ret properties_get[key] = response['stdout'].split('\n')[-1].split('=')[-1] if __opts__['test']: for key, value in properties.items(): if properties_get[key] == value: ret['changes'][key] = 'Won\'t be changed' else: ret['changes'][key] = 'Will be changed to {0}'.format(properties_get[key]) return ret for key, value in properties.items(): if properties_get[key] != value: response = __salt__['dracr.set_property'](host, admin_username, admin_password, key, value) if response is False or response['retcode'] != 0: ret['result'] = False ret['comment'] = 'Failed to set property from idrac' return ret ret['changes'][key] = 'will be changed - old value {0} , new value {1}'.format(properties_get[key], value) return ret
properties = {}
def recv(self, tab_key, message_id=None, timeout=30): ''' Recieve a message, optionally filtering for a specified message id. If `message_id` is none, the first command in the receive queue is returned. If `message_id` is not none, the command waits untill a message is received with the specified id, or it times out. Timeout is the number of seconds to wait for a response, or `None` if the timeout has expired with no response. ''' self.__check_open_socket(tab_key) # First, check if the message has already been received. for idx in range(len(self.messages[tab_key])): if self.messages[tab_key][idx]: if "id" in self.messages[tab_key][idx] and message_id: if self.messages[tab_key][idx]['id'] == message_id: return self.messages[tab_key].pop(idx) # Then spin untill we either have the message, # or have timed out. def check_func(message): if message_id is None: return True if not message: self.log.debug("Message is not true (%s)!", message) return False if "id" in message: return message['id'] == message_id return False return self.recv_filtered(check_func, tab_key, timeout)
Recieve a message, optionally filtering for a specified message id. If `message_id` is none, the first command in the receive queue is returned. If `message_id` is not none, the command waits untill a message is received with the specified id, or it times out. Timeout is the number of seconds to wait for a response, or `None` if the timeout has expired with no response.
def col_frequencies(col, weights=None, gap_chars='-.'): """Frequencies of each residue type (totaling 1.0) in a single column.""" counts = col_counts(col, weights, gap_chars) # Reduce to frequencies scale = 1.0 / sum(counts.values()) return dict((aa, cnt * scale) for aa, cnt in counts.iteritems())
Frequencies of each residue type (totaling 1.0) in a single column.
def interval_to_milliseconds(interval): """Convert a Binance interval string to milliseconds :param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w :type interval: str :return: None if unit not one of m, h, d or w None if string not in correct format int value of interval in milliseconds """ ms = None seconds_per_unit = { "m": 60, "h": 60 * 60, "d": 24 * 60 * 60, "w": 7 * 24 * 60 * 60 } unit = interval[-1] if unit in seconds_per_unit: try: ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000 except ValueError: pass return ms
Convert a Binance interval string to milliseconds :param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w :type interval: str :return: None if unit not one of m, h, d or w None if string not in correct format int value of interval in milliseconds
def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data
Return the information about a given job
def save_bed(cls, query, filename=sys.stdout): """ write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output """ out = _open(filename, 'w') for o in query: out.write(o.bed() + '\n')
write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output
def _get_name(self): """ There are three cases, because apipie definitions can have multiple signatures but python does not For example, the api endpoint: /api/myres/:myres_id/subres/:subres_id/subres2 for method *index* will be translated to the api method name: subres_index_subres2 So when you want to call it from v2 object, you'll have: myres.subres_index_subres2 """ if self.url.count(':') > 1: # /api/one/two/:three/four -> two_:three_four base_name = self.url.split('/', 3)[-1].replace('/', '_')[1:] # :one_two_three -> two_three if base_name.startswith(':'): base_name = base_name.split('_')[-1] # one_:two_three_:four_five -> one_three_five base_name = re.sub('_:[^/]+', '', base_name) # in case that the last term was a parameter if base_name.endswith('_'): base_name = base_name[:-1] # one_two_three -> one_two_method_three base_name = ( '_' + self._method['name'] ).join(base_name.rsplit('_', 1)) else: base_name = self._method['name'] if base_name == 'import': base_name = 'import_' if self._apipie_resource != self.resource: return '%s_%s' % (self._apipie_resource, base_name) else: return base_name
There are three cases, because apipie definitions can have multiple signatures but python does not For example, the api endpoint: /api/myres/:myres_id/subres/:subres_id/subres2 for method *index* will be translated to the api method name: subres_index_subres2 So when you want to call it from v2 object, you'll have: myres.subres_index_subres2
def _initialize_counter(self): """Initialize our counter pointer. If we're the top-level factory, instantiate a new counter Otherwise, point to the top-level factory's counter. """ if self._counter is not None: return if self.counter_reference is self: self._counter = _Counter(seq=self.factory._setup_next_sequence()) else: self.counter_reference._initialize_counter() self._counter = self.counter_reference._counter
Initialize our counter pointer. If we're the top-level factory, instantiate a new counter Otherwise, point to the top-level factory's counter.
def version(self, value): """Version setter.""" self.bytearray[self._get_slicers(1)] = bytearray(c_uint8(value or 0))
Version setter.
def has_default_privileges(name, object_name, object_type, defprivileges=None, grant_option=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2019.0.0 Check if a role has the specified privileges on an object CLI Example: .. code-block:: bash salt '*' postgres.has_default_privileges user_name table_name table \\ SELECT,INSERT maintenance_db=db_name name Name of the role whose privileges should be checked on object_type object_name Name of the object on which the check is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to check, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the grant option check is performed prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if object_type != 'group': owner = _get_object_owner(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if owner is not None and name == owner: return True _defprivileges = default_privileges_list(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if name in _defprivileges: if object_type == 'group': if grant_option: retval = _defprivileges[name] else: retval = True return retval else: _defperms = _DEFAULT_PRIVILEGE_TYPE_MAP[object_type] if grant_option: defperms = dict((_DEFAULT_PRIVILEGES_MAP[defperm], True) for defperm in _defperms) retval = defperms == _defprivileges[name] else: defperms = [_DEFAULT_PRIVILEGES_MAP[defperm] for defperm in _defperms] if 'ALL' in _defprivs: retval = sorted(defperms) == sorted(_defprivileges[name].keys()) else: retval = set(_defprivs).issubset( set(_defprivileges[name].keys())) return retval return False
.. versionadded:: 2019.0.0 Check if a role has the specified privileges on an object CLI Example: .. code-block:: bash salt '*' postgres.has_default_privileges user_name table_name table \\ SELECT,INSERT maintenance_db=db_name name Name of the role whose privileges should be checked on object_type object_name Name of the object on which the check is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to check, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the grant option check is performed prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
def create_zone(server, token, domain, identifier, dtype, master=None): """Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is SLAVE (default: None) ContentType: application/json x-authentication-token: token """ method = 'PUT' uri = 'https://' + server + '/zone' obj = JSONConverter(domain) obj.generate_zone(domain, identifier, dtype, master) connect.tonicdns_client(uri, method, token, obj.zone)
Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is SLAVE (default: None) ContentType: application/json x-authentication-token: token
def get_flops(): """ # DOESNT WORK """ from sys import stdout from re import compile filename = "linpack.out" fpnum = r'\d+\.\d+E[+-]\d\d' fpnum_1 = fpnum + r' +' pattern = compile(r'^ *' + fpnum_1 + fpnum_1 + fpnum_1 + r'(' + fpnum + r') +' + fpnum_1 + fpnum + r' *\n$') speeds = [0.0, 1.0e75, 0.0] file = open(filename) count = 0 while file : line = file.readline() if not line : break if pattern.match(line) : count = count + 1 x = float(pattern.sub(r'\1', line)) if x < 1.0 : print(count) speeds[0] = speeds[0] + x speeds[1] = min(speeds[1], x) speeds[2] = max(speeds[2], x) file.close() if count != 0 : speeds[0] = speeds[0] / count stdout.write("%6.1f MFlops (%d from %.1f to %.1f)\n" % (speeds[0], count, speeds[1], speeds[2]))
# DOESNT WORK
def make_masks(self, template): """This method generates all seven masks so that the best mask can be determined. The template parameter is a code matrix that will server as the base for all the generated masks. """ from copy import deepcopy nmasks = len(tables.mask_patterns) masks = [''] * nmasks count = 0 for n in range(nmasks): cur_mask = deepcopy(template) masks[n] = cur_mask #Add the type pattern bits to the code self.add_type_pattern(cur_mask, tables.type_bits[self.error][n]) #Get the mask pattern pattern = tables.mask_patterns[n] #This will read the 1's and 0's one at a time bits = iter(self.buffer.getvalue()) #These will help us do the up, down, up, down pattern row_start = itertools.cycle([len(cur_mask)-1, 0]) row_stop = itertools.cycle([-1,len(cur_mask)]) direction = itertools.cycle([-1, 1]) #The data pattern is added using pairs of columns for column in range(len(cur_mask)-1, 0, -2): #The vertical timing pattern is an exception to the rules, #move the column counter over by one if column <= 6: column = column - 1 #This will let us fill in the pattern #right-left, right-left, etc. column_pair = itertools.cycle([column, column-1]) #Go through each row in the pattern moving up, then down for row in range(next(row_start), next(row_stop), next(direction)): #Fill in the right then left column for i in range(2): col = next(column_pair) #Go to the next column if we encounter a #preexisting pattern (usually an alignment pattern) if cur_mask[row][col] != ' ': continue #Some versions don't have enough bits. You then fill #in the rest of the pattern with 0's. These are #called "remainder bits." try: bit = int(next(bits)) except: bit = 0 #If the pattern is True then flip the bit if pattern(row, col): cur_mask[row][col] = bit ^ 1 else: cur_mask[row][col] = bit #DEBUG CODE!!! #Save all of the masks as png files #for i, m in enumerate(masks): # _png(m, self.version, 'mask-{0}.png'.format(i), 5) return masks
This method generates all seven masks so that the best mask can be determined. The template parameter is a code matrix that will server as the base for all the generated masks.
def intervention_strategies(df, filepath=None): """ Plots all intervention strategies Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `TR:` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ logger = logging.getLogger("caspo") LIMIT = 50 if len(df) > LIMIT: msg = "Too many intervention strategies to visualize. A sample of %s strategies will be considered." % LIMIT logger.warning(msg) df = df.sample(LIMIT) values = np.unique(df.values.flatten()) if len(values) == 3: rwg = matplotlib.colors.ListedColormap(['red', 'white', 'green']) elif 1 in values: rwg = matplotlib.colors.ListedColormap(['white', 'green']) else: rwg = matplotlib.colors.ListedColormap(['red', 'white']) plt.figure(figsize=(max((len(df.columns)-1) * .5, 4), max(len(df)*0.6, 2.5))) df.columns = [c[3:] for c in df.columns] ax = sns.heatmap(df, linewidths=.5, cbar=False, cmap=rwg, linecolor='gray') ax.set_xlabel("Species") ax.set_ylabel("Intervention strategy") for tick in ax.get_xticklabels(): tick.set_rotation(90) plt.tight_layout() if filepath: plt.savefig(os.path.join(filepath, 'strategies.pdf')) return ax
Plots all intervention strategies Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `TR:` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
def _submit_task_with_template(self, task_ids): '''Submit tasks by interpolating a shell script defined in job_template''' runtime = self.config runtime.update({ 'workdir': os.getcwd(), 'cur_dir': os.getcwd(), # for backward compatibility 'verbosity': env.verbosity, 'sig_mode': env.config.get('sig_mode', 'default'), 'run_mode': env.config.get('run_mode', 'run'), 'home_dir': os.path.expanduser('~') }) if '_runtime' in env.sos_dict: runtime.update({ x: env.sos_dict['_runtime'][x] for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime') if x in env.sos_dict['_runtime'] }) if 'nodes' not in runtime: runtime['nodes'] = 1 if 'cores' not in runtime: runtime['cores'] = 1 # let us first prepare a task file job_text = '' for task_id in task_ids: runtime['task'] = task_id try: job_text += cfg_interpolate(self.job_template, runtime) job_text += '\n' except Exception as e: raise ValueError( f'Failed to generate job file for task {task_id}: {e}') filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else f'-{task_ids[-1]}.sh') # now we need to write a job file job_file = os.path.join( os.path.expanduser('~'), '.sos', 'tasks', filename) # do not translate newline under windows because the script will be executed # under linux/mac with open(job_file, 'w', newline='') as job: job.write(job_text) # then copy the job file to remote host if necessary self.agent.send_task_file(job_file) try: cmd = f'bash ~/.sos/tasks/{filename}' self.agent.run_command(cmd, wait_for_task=self.wait_for_task) except Exception as e: raise RuntimeError(f'Failed to submit task {task_ids}: {e}') return True
Submit tasks by interpolating a shell script defined in job_template
def create_mask(self): """Create boolean mask from drawing. All areas enclosed by all the shapes drawn will be set to 1 (True) in the mask. Otherwise, the values will be set to 0 (False). The mask will be inserted as a new image buffer, like ``Mosaic``. """ ntags = len(self._drawn_tags) if ntags == 0: return old_image = self.fitsimage.get_image() if old_image is None: return mask = None obj_kinds = set() # Create mask for tag in self._drawn_tags: obj = self.canvas.get_object_by_tag(tag) try: cur_mask = old_image.get_shape_mask(obj) except Exception as e: self.logger.error('Cannot create mask: {0}'.format(str(e))) continue if mask is not None: mask |= cur_mask else: mask = cur_mask obj_kinds.add(obj.kind) # Might be useful to inherit header from displayed image (e.g., WCS) # but the displayed image should not be modified. # Bool needs to be converted to int so FITS writer would not crash. image = dp.make_image(mask.astype('int16'), old_image, {}, pfx=self._mask_prefix) imname = image.get('name') # Insert new image self.fv.gui_call(self.fv.add_image, imname, image, chname=self.chname) # Add description to ChangeHistory s = 'Mask created from {0} drawings ({1})'.format( ntags, ','.join(sorted(obj_kinds))) info = dict(time_modified=datetime.utcnow(), reason_modified=s) self.fv.update_image_info(image, info) self.logger.info(s)
Create boolean mask from drawing. All areas enclosed by all the shapes drawn will be set to 1 (True) in the mask. Otherwise, the values will be set to 0 (False). The mask will be inserted as a new image buffer, like ``Mosaic``.
def clean(self, py_value): """ Cleans the value before storing it. :param: py_value : <str> :return: <str> """ try: import bleach return bleach.clean(py_value, **self.__bleachOptions) except ImportError: warnings.warn('Unable to clean string column without webhelpers installed.') return py_value
Cleans the value before storing it. :param: py_value : <str> :return: <str>
def TNRS(self, names, context_name=None, id_list=None, fuzzy_matching=False, include_deprecated=False, include_dubious=False, do_approximate_matching=None, wrap_response=None): """Takes a name and optional contextName returns a list of matches. `wrap_response` can be True to return a TNRSResponse object, None to return the "raw" response dict, or a function/class that takes (response, query_data=dict) as its arguments. Each match is a dict with: 'higher' boolean DEF??? 'exact' boolean for exact match 'ottId' int 'name' name (or uniqname???) for the taxon in OTT 'nodeId' int ID of not in the taxomachine db. probably not of use to anyone... """ # if context_name is None: # context_name = 'All life' if do_approximate_matching is not None: fuzzy_matching = do_approximate_matching if context_name and context_name not in self.valid_contexts: raise ValueError('"{}" is not a valid context name'.format(context_name)) if not (isinstance(names, list) or isinstance(names, tuple)): names = [names] for name in names: if len(name) < 2: raise ValueError('Name "{}" found. Names must have at least 2 characters!'.format(name)) if id_list and len(id_list) != len(names): raise ValueError('"id_list must be the same size as "names"') data = {'names': names} if self.use_v1: uri = '{p}/contextQueryForNames'.format(p=self.prefix) else: uri = '{p}/match_names'.format(p=self.prefix) if context_name: if self.use_v1: data['contextName'] = context_name else: data['context_name'] = context_name data['do_approximate_matching'] = bool(fuzzy_matching) if id_list: data['ids'] = list(id_list) if include_deprecated: data['include_deprecated'] = True if include_dubious: data['include_dubious'] = True resp = self.json_http_post(uri, data=anyjson.dumps(data)) if wrap_response is None or wrap_response is False: return resp if wrap_response is True: return TNRSResponse(self._wr, resp, query_data=data) return wrap_response(resp, query_data=data)
Takes a name and optional contextName returns a list of matches. `wrap_response` can be True to return a TNRSResponse object, None to return the "raw" response dict, or a function/class that takes (response, query_data=dict) as its arguments. Each match is a dict with: 'higher' boolean DEF??? 'exact' boolean for exact match 'ottId' int 'name' name (or uniqname???) for the taxon in OTT 'nodeId' int ID of not in the taxomachine db. probably not of use to anyone...
def item_fields(self): """ Get all available item fields """ # Check for a valid cached version if self.templates.get("item_fields") and not self._updated( "/itemFields", self.templates["item_fields"], "item_fields" ): return self.templates["item_fields"]["tmplt"] query_string = "/itemFields" # otherwise perform a normal request and cache the response retrieved = self._retrieve_data(query_string) return self._cache(retrieved, "item_fields")
Get all available item fields
def load(self, filename): '''load settings from a file. Return True/False on success/failure''' try: f = open(filename, mode='r') except Exception: return False while True: line = f.readline() if not line: break line = line.rstrip() eq = line.find('=') if eq == -1: continue name = line[:eq] value = line[eq+1:] self.set(name, value) f.close() return True
load settings from a file. Return True/False on success/failure
def add(self, element): """Add an element to this set.""" key = self._transform(element) if key not in self._elements: self._elements[key] = element
Add an element to this set.
def process_upload(upload_file, instance, form, event, request): """ Helper function that actually processes and saves the upload(s). Segregated out for readability. """ caption = form.cleaned_data.get('caption') upload_name = upload_file.name.lower() if upload_name.endswith('.jpg') or upload_name.endswith('.jpeg'): try: upload = Image( event=event, image=upload_file, caption=caption, ) upload.save() instance.photos.add(upload) except Exception as error: messages.error(request, 'Error saving image: {}.'.format(error))
Helper function that actually processes and saves the upload(s). Segregated out for readability.
def _read_local_kwalitee_configuration(directory="."): """Check if the repo has a ``.kwalitee.yaml`` file.""" filepath = os.path.abspath(os.path.join(directory, '.kwalitee.yml')) data = {} if os.path.exists(filepath): with open(filepath, 'r') as file_read: data = yaml.load(file_read.read()) return data
Check if the repo has a ``.kwalitee.yaml`` file.
def set_op_version(version): ''' .. versionadded:: 2019.2.0 Set the glusterfs volume op-version version Version to set the glusterfs volume op-version CLI Example: .. code-block:: bash salt '*' glusterfs.set_op_version <volume> ''' cmd = 'volume set all cluster.op-version {0}'.format(version) root = _gluster_xml(cmd) if not _gluster_ok(root): return False, root.find('opErrstr').text return root.find('output').text
.. versionadded:: 2019.2.0 Set the glusterfs volume op-version version Version to set the glusterfs volume op-version CLI Example: .. code-block:: bash salt '*' glusterfs.set_op_version <volume>
def verify(self, h, sig, sig_fmt=SER_BINARY): """ Verifies that `sig' is a signature for a message with SHA-512 hash `h'. """ s = deserialize_number(sig, sig_fmt) return self.p._ECDSA_verify(h, s)
Verifies that `sig' is a signature for a message with SHA-512 hash `h'.
def _dev_api(cls): """Get a developer instance for GitHub API access.""" gh = github3.GitHub() gh.set_client_id(cls.remote.consumer_key, cls.remote.consumer_secret) return gh
Get a developer instance for GitHub API access.
def set_debug(self, set_to=True): """ Sets the capture to debug mode (or turns it off if specified). """ if set_to: StreamHandler(sys.stdout).push_application() self._log.level = logbook.DEBUG self.debug = set_to
Sets the capture to debug mode (or turns it off if specified).
async def patch_entries(self, entry, **kwargs): """ PATCH /api/entries/{entry}.{_format} Change several properties of an entry :param entry: the entry to 'patch' / update :param kwargs: can contain one of the following title: string tags: a list of tags tag1,tag2,tag3 archive: '0' or '1', default '0' archived the entry. starred: '0' or '1', default '0' starred the entry In case that you don't want to *really* remove it.. :return data related to the ext """ # default values params = {'access_token': self.token, 'title': '', 'tags': []} if 'title' in kwargs: params['title'] = kwargs['title'] if 'tags' in kwargs and isinstance(kwargs['tags'], list): params['tags'] = ', '.join(kwargs['tags']) params['archive'] = self.__get_attr(what='archive', type_attr=int, value_attr=(0, 1), **kwargs) params['starred'] = self.__get_attr(what='starred', type_attr=int, value_attr=(0, 1), **kwargs) params['order'] = self.__get_attr(what='order', type_attr=str, value_attr=('asc', 'desc'), **kwargs) path = '/api/entries/{entry}.{ext}'.format( entry=entry, ext=self.format) return await self.query(path, "patch", **params)
PATCH /api/entries/{entry}.{_format} Change several properties of an entry :param entry: the entry to 'patch' / update :param kwargs: can contain one of the following title: string tags: a list of tags tag1,tag2,tag3 archive: '0' or '1', default '0' archived the entry. starred: '0' or '1', default '0' starred the entry In case that you don't want to *really* remove it.. :return data related to the ext
def _get_device_by_label(devices, label): ''' Returns the device with the given label, raises error if the device is not found. devices list of vim.vm.device.VirtualDevice objects key Unique key of device ''' device_labels = [d for d in devices if d.deviceInfo.label == label] if device_labels: return device_labels[0] else: raise salt.exceptions.VMwareObjectNotFoundError( 'Virtual machine device with ' 'label {0} does not exist'.format(label))
Returns the device with the given label, raises error if the device is not found. devices list of vim.vm.device.VirtualDevice objects key Unique key of device
def get_urlpatterns(self): """ Returns the URL patterns managed by the considered factory / application. """ return [ path( '', search_view_factory(view_class=self.search_view, form_class=self.search_form), name='search', ), ]
Returns the URL patterns managed by the considered factory / application.
def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False): """Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts """ authDict = recordGenders(self) if _countsTuple or countsOnly or fractionsMode: rawList = list(authDict.values()) countsList = [] for k in ('Male','Female','Unknown'): countsList.append(rawList.count(k)) if fractionsMode: tot = sum(countsList) for i in range(3): countsList.append(countsList.pop(0) / tot) if _countsTuple: return tuple(countsList) else: return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]} else: return authDict
Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts
def _notify(self, topic, **kwargs): """ Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict """ for cb in self._connects.get(topic, []): try: cb(**kwargs) except Exception: if self._debug: traceback.print_exc()
Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict
def image_import(self, image_name, url, image_meta, remote_host=None): """Import the image specified in url to SDK image repository, and create a record in image db, the imported images are located in image_repository/prov_method/os_version/image_name/, for example, /opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100""" image_info = [] try: image_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image record %s doens't exist in SDK image datebase," " will import the image and create record now" % image_name) LOG.info(msg) # Ensure the specified image is not exist in image DB if image_info: msg = ("The image name %s has already exist in SDK image " "database, please check if they are same image or consider" " to use a different image name for import" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=13, img=image_name) try: image_os_version = image_meta['os_version'].lower() target_folder = self._pathutils.create_import_image_repository( image_os_version, const.IMAGE_TYPE['DEPLOY'], image_name) except Exception as err: msg = ('Failed to create repository to store image %(img)s with ' 'error: %(err)s, please make sure there are enough space ' 'on zvmsdk server and proper permission to create the ' 'repository' % {'img': image_name, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKImageOperationError(rs=14, msg=msg) try: import_image_fn = urlparse.urlparse(url).path.split('/')[-1] import_image_fpath = '/'.join([target_folder, import_image_fn]) self._scheme2backend(urlparse.urlparse(url).scheme).image_import( image_name, url, import_image_fpath, remote_host=remote_host) # Check md5 after import to ensure import a correct image # TODO change to use query image name in DB expect_md5sum = image_meta.get('md5sum') real_md5sum = self._get_md5sum(import_image_fpath) if expect_md5sum and expect_md5sum != real_md5sum: msg = ("The md5sum after import is not same as source image," " the image has been broken") LOG.error(msg) raise exception.SDKImageOperationError(rs=4) # After import to image repository, figure out the image type is # single disk image or multiple-disk image,if multiple disks image, # extract it, if it's single image, rename its name to be same as # specific vdev # TODO: (nafei) use sub-function to check the image type image_type = 'rootonly' if image_type == 'rootonly': final_image_fpath = '/'.join([target_folder, CONF.zvm.user_root_vdev]) os.rename(import_image_fpath, final_image_fpath) elif image_type == 'alldisks': # For multiple disks image, extract it, after extract, the # content under image folder is like: 0100, 0101, 0102 # and remove the image file 0100-0101-0102.tgz pass # TODO: put multiple disk image into consideration, update the # disk_size_units and image_size db field disk_size_units = self._get_disk_size_units(final_image_fpath) image_size = self._get_image_size(final_image_fpath) # TODO: update the real_md5sum field to include each disk image self._ImageDbOperator.image_add_record(image_name, image_os_version, real_md5sum, disk_size_units, image_size, image_type) LOG.info("Image %s is import successfully" % image_name) except Exception: # Cleanup the image from image repository self._pathutils.clean_temp_folder(target_folder) raise
Import the image specified in url to SDK image repository, and create a record in image db, the imported images are located in image_repository/prov_method/os_version/image_name/, for example, /opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100
def gzip_if_smaller(content_related, data): """Calls bytes(request), and based on a certain threshold, optionally gzips the resulting data. If the gzipped data is smaller than the original byte array, this is returned instead. Note that this only applies to content related requests. """ if content_related and len(data) > 512: gzipped = bytes(GzipPacked(data)) return gzipped if len(gzipped) < len(data) else data else: return data
Calls bytes(request), and based on a certain threshold, optionally gzips the resulting data. If the gzipped data is smaller than the original byte array, this is returned instead. Note that this only applies to content related requests.
def popError(text, title="Lackey Error"): """ Creates an error dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showerror(title, text)
Creates an error dialog with the specified text.
def Pad(self, n): """Pad places zeros at the current offset.""" for i in range_func(n): self.Place(0, N.Uint8Flags)
Pad places zeros at the current offset.
def is_nested(values): '''Check if values is composed only by iterable elements.''' return (all(isinstance(item, Iterable) for item in values) if isinstance(values, Iterable) else False)
Check if values is composed only by iterable elements.
def add_to_submenu(self, submenu_path, item): ''' add an item to a submenu using a menu path array ''' for m in self.items: if m.name == submenu_path[0]: m.add_to_submenu(submenu_path[1:], item) return raise(ValueError("No submenu (%s) found" % (submenu_path[0])))
add an item to a submenu using a menu path array
def _get(self, text): """ Analyze the text to get the right function Parameters ---------- text : str The text that could call a function """ if self.strict: match = self.prog.match(text) if match: cmd = match.group() if cmd in self: return cmd else: words = self.prog.findall(text) for word in words: if word in self: return word
Analyze the text to get the right function Parameters ---------- text : str The text that could call a function
def pathFromHere_explore(self, astr_startPath = '/'): """ Return a list of paths from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: a list of paths from "here" """ self.l_lwd = [] self.treeExplore(startPath = astr_startPath, f=self.lwd) return self.l_lwd
Return a list of paths from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: a list of paths from "here"
def post(self, request, *args, **kwargs): """ Run some custom POST logic for Enterprise workflows before routing the user through existing views. """ # pylint: disable=unused-variable enterprise_customer_uuid, course_run_id, course_key, program_uuid = RouterView.get_path_variables(**kwargs) enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid) if course_key: context_data = get_global_context(request, enterprise_customer) try: kwargs['course_id'] = RouterView.get_course_run_id(request.user, enterprise_customer, course_key) except Http404: error_code = 'ENTRV001' log_message = ( 'Could not find course run with id {course_run_id} ' 'for course key {course_key} and ' 'for enterprise_customer_uuid {enterprise_customer_uuid} ' 'and program {program_uuid}. ' 'Returned error code {error_code} to user {userid}'.format( course_key=course_key, course_run_id=course_run_id, enterprise_customer_uuid=enterprise_customer_uuid, error_code=error_code, userid=request.user.id, program_uuid=program_uuid, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) return self.redirect(request, *args, **kwargs)
Run some custom POST logic for Enterprise workflows before routing the user through existing views.
def log(duration, message=None, use_last_commit_message=False): """ Log time against the current active issue """ branch = git.branch issue = jira.get_issue(branch) # Create the comment comment = "Working on issue %s" % branch if message: comment = message elif use_last_commit_message: comment = git.get_last_commit_message() if issue: # If the duration is provided use it, otherwise use the elapsed time since the last mark duration = jira.get_elapsed_time(issue) if duration == '.' else duration if duration: # Add the worklog jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None, comment=comment) print "Logged %s against issue %s (%s)" % (duration, branch, comment) else: print "No time logged, less than 0m elapsed."
Log time against the current active issue
def check_lat_extents(self, ds): ''' Check that the values of geospatial_lat_min/geospatial_lat_max approximately match the data. :param netCDF4.Dataset ds: An open netCDF dataset ''' if not (hasattr(ds, 'geospatial_lat_min') or hasattr(ds, 'geospatial_lat_max')): return Result(BaseCheck.MEDIUM, False, 'geospatial_lat_extents_match', ['geospatial_lat_min/max attribute not found, CF-1.6 spec chapter 4.1']) try: # type cast lat_min = float(ds.geospatial_lat_min) lat_max = float(ds.geospatial_lat_max) except ValueError: return Result(BaseCheck.MEDIUM, False, 'geospatial_lat_extents_match', ['Could not convert one of geospatial_lat_min ({}) or max ({}) to float see CF-1.6 spec chapter 4.1' ''.format(ds.geospatial_lat_min, ds.geospatial_lat_max)]) # identify lat var(s) as per CF 4.1 lat_vars = {} # var -> number of criteria passed for name, var in ds.variables.items(): # must have units if not hasattr(var, 'units'): continue lat_vars[var] = 0 # units in this set if var.units in _possibleyunits: lat_vars[var] += 1 # standard name of "latitude" if hasattr(var, 'standard_name') and var.standard_name == 'latitude': lat_vars[var] += 1 # axis of "Y" if hasattr(var, 'axis') and var.axis == 'Y': lat_vars[var] += 1 # trim out any zeros lat_vars = {k: v for k, v in lat_vars.items() if v > 0} if len(lat_vars) == 0: return Result(BaseCheck.MEDIUM, False, 'geospatial_lat_extents_match', ['Could not find lat variable to test extent of geospatial_lat_min/max, see CF-1.6 spec chapter 4.1']) # sort by criteria passed final_lats = sorted(lat_vars, key=lambda x: lat_vars[x], reverse=True) obs_mins = {var._name: np.nanmin(var) for var in final_lats if not np.isnan(var).all()} obs_maxs = {var._name: np.nanmax(var) for var in final_lats if not np.isnan(var).all()} min_pass = any((np.isclose(lat_min, min_val) for min_val in obs_mins.values())) max_pass = any((np.isclose(lat_max, max_val) for max_val in obs_maxs.values())) allpass = sum((min_pass, max_pass)) msgs = [] if not min_pass: msgs.append("Data for possible latitude variables (%s) did not match geospatial_lat_min value (%s)" % (obs_mins, lat_min)) if not max_pass: msgs.append("Data for possible latitude variables (%s) did not match geospatial_lat_max value (%s)" % (obs_maxs, lat_max)) return Result(BaseCheck.MEDIUM, (allpass, 2), 'geospatial_lat_extents_match', msgs)
Check that the values of geospatial_lat_min/geospatial_lat_max approximately match the data. :param netCDF4.Dataset ds: An open netCDF dataset
def human_file_size(size): """ Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file """ suffixes = ' kMGTPEZY' if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) num_scale = max(num_scale, 0) if num_scale >= len(suffixes): suffix = '?' else: suffix = suffixes[num_scale] num_scale = int(math.pow(1000, num_scale)) value = float(size) / num_scale str_value = str(value) if suffix == ' ': if '.' in str_value: str_value = str_value[:str_value.index('.')] elif str_value[2] == '.': str_value = str_value[:2] else: str_value = str_value[:3] return "{0:>3s}{1}".format(str_value, suffix)
Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file
def update_from_env(yaml_dict, prefix=None): ''' Override YAML settings with values from the environment variables. - The letter '_' is delimit the hierarchy of the YAML settings such that the value of 'config.databases.local' will be overridden by CONFIG_DATABASES_LOCAL. ''' prefix = prefix or "" def _set_env_var(path, node): env_path = "{0}{1}{2}".format( prefix.upper(), '_' if prefix else '', '_'.join([str(key).upper() for key in path]) ) env_val = os.environ.get(env_path, None) if env_val is not None: # convert the value to a YAML-defined type env_dict = yamldict.load('val: {0}'.format(env_val)) return env_dict.val else: return None # traverse yaml_dict with the callback function yaml_dict.traverse(_set_env_var)
Override YAML settings with values from the environment variables. - The letter '_' is delimit the hierarchy of the YAML settings such that the value of 'config.databases.local' will be overridden by CONFIG_DATABASES_LOCAL.
def suppress_output(reverse=False): """ Suppress output """ if reverse: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ else: sys.stdout = os.devnull sys.stderr = os.devnull
Suppress output
def unflag_field(self, move_x, move_y): """Unflag or unquestion a grid by given position.""" field_status = self.info_map[move_y, move_x] if field_status == 9 or field_status == 10: self.info_map[move_y, move_x] = 11
Unflag or unquestion a grid by given position.
def set_routput(self, routput): """ Add routput to be used in next api call :param routput: key :return: True/False, message """ if type(routput) != str: return False, "Routput must be string" self.r_outputs.append(routput) return True, "Ok"
Add routput to be used in next api call :param routput: key :return: True/False, message
def base(number, input_base=10, output_base=10, max_depth=10, string=False, recurring=True): """ Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8) """ # Convert number to tuple representation. if type(number) == int or type(number) == float: number = str(number) if type(number) == str: number = represent_as_tuple(number) # Check that the number is valid for the input base. if not check_valid(number, input_base): raise ValueError # Deal with base-1 special case if input_base == 1: number = (1,) * number.count(1) # Expand any recurring digits. number = expand_recurring(number, repeat=5) # Convert a fractional number. if "." in number: radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] integer_part = integer_base(integer_part, input_base, output_base) fractional_part = fractional_base(fractional_part, input_base, output_base, max_depth) number = integer_part + fractional_part number = truncate(number) # Convert an integer number. else: number = integer_base(number, input_base, output_base) if recurring: number = find_recurring(number, min_repeat=2) # Return the converted number as a srring or tuple. return represent_as_string(number) if string else number
Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8)
def to_description_dict(self): """ You might need keys below in some situation - caCertificateId - previousOwnedBy """ return { 'certificateArn': self.arn, 'certificateId': self.certificate_id, 'status': self.status, 'certificatePem': self.certificate_pem, 'ownedBy': self.owner, 'creationDate': self.creation_date, 'lastModifiedDate': self.last_modified_date, 'transferData': self.transfer_data }
You might need keys below in some situation - caCertificateId - previousOwnedBy
def normalize_object_slot(self, value=_nothing, prop=None, obj=None): """This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. """ if value is not _nothing and hasattr(prop, "compare_as"): method, nargs = getattr(prop, "compare_as_info", (False, 1)) args = [] if method: args.append(obj) if nargs: args.append(value) value = prop.compare_as(*args) return self.normalize_slot(value, prop)
This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value.
def _supply_data(data_sink, context): """ Supply data to the data sink """ try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
Supply data to the data sink
def __from_xml(self,value): """Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`""" n=value.children vns=get_node_ns(value) while n: if n.type!='element': n=n.next continue ns=get_node_ns(n) if (ns and vns and ns.getContent()!=vns.getContent()): n=n.next continue if n.name=='POBOX': self.pobox=unicode(n.getContent(),"utf-8","replace") elif n.name in ('EXTADR', 'EXTADD'): self.extadr=unicode(n.getContent(),"utf-8","replace") elif n.name=='STREET': self.street=unicode(n.getContent(),"utf-8","replace") elif n.name=='LOCALITY': self.locality=unicode(n.getContent(),"utf-8","replace") elif n.name=='REGION': self.region=unicode(n.getContent(),"utf-8","replace") elif n.name=='PCODE': self.pcode=unicode(n.getContent(),"utf-8","replace") elif n.name=='CTRY': self.ctry=unicode(n.getContent(),"utf-8","replace") elif n.name in ("HOME","WORK","POSTAL","PARCEL","DOM","INTL", "PREF"): self.type.append(n.name.lower()) n=n.next if self.type==[]: self.type=["intl","postal","parcel","work"] elif "dom" in self.type and "intl" in self.type: raise ValueError("Both 'dom' and 'intl' specified in vcard ADR")
Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`
def load_key(self, key, key_type, key_encoding): """Load a key from bytes. :param bytes key: Key bytes :param EncryptionKeyType key_type: Type of key :param KeyEncodingType key_encoding: Encoding used to serialize key :returns: Loaded key """ if key_type not in (EncryptionKeyType.PRIVATE, EncryptionKeyType.PUBLIC): raise ValueError( 'Invalid key type "{key_type}" for cipher "{cipher}"'.format(key_type=key_type, cipher=self.java_name) ) if key_encoding not in (KeyEncodingType.DER, KeyEncodingType.PEM): raise ValueError( 'Invalid key encoding "{key_encoding}" for cipher "{cipher}"'.format( key_encoding=key_encoding, cipher=self.java_name ) ) return _KEY_LOADERS[self.cipher](key, key_type, key_encoding)
Load a key from bytes. :param bytes key: Key bytes :param EncryptionKeyType key_type: Type of key :param KeyEncodingType key_encoding: Encoding used to serialize key :returns: Loaded key
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): """ Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: """ bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, only returned when requesting one object at a time... k = api_client.get_object(Bucket = bucket_name, Key = key['name']) key['ServerSideEncryption'] = k['ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k['SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name = key['name']) except Exception as e: continue # Save it bucket['keys'].append(key) update_status(key_count, bucket['keys_count'], 'keys')
Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: