code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def Q(self): N = self.N tig_to_idx = self.tig_to_idx signs = self.signs Q = np.ones((N, N, BB), dtype=int) * -1 for (at, bt), k in self.contacts_oriented.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] ao = signs[ai] bo = signs[bi] Q[ai, bi] = k[(ao, bo)] return Q
Contact frequency matrix when contigs are already oriented. This is s a similar matrix as M, but rather than having the number of links in the cell, it points to an array that has the actual distances.
def from_iterable(cls, target_types, address_mapper, adaptor_iter): inst = cls(target_types, address_mapper) all_valid_addresses = set() for target_adaptor in adaptor_iter: inst._inject_target(target_adaptor) all_valid_addresses.add(target_adaptor.address) inst._validate(all_valid_addresses) return inst
Create a new DependentGraph from an iterable of TargetAdaptor subclasses.
def post_comment(self, message): report_url = ( 'https://api.github.com/repos/%s/issues/%s/comments' % (self.repo_name, self.pr_number) ) result = self.requester.post(report_url, {'body': message}) if result.status_code >= 400: log.error("Error posting comment to github. %s", result.json()) return result
Comments on an issue, not on a particular line.
def get_file_for_id(_id, language=DEFAULT_LANG): file_start = '%s-' % _id json_path = DBVuln.get_json_path(language=language) for _file in os.listdir(json_path): if _file.startswith(file_start): return os.path.join(json_path, _file) raise NotFoundException('No data for ID %s' % _id)
Given _id, search the DB for the file which contains the data :param _id: The id to search (int) :param language: The user's language (en, es, etc.) :return: The filename
def exists(Name, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.get_trail_status(Name=Name) return {'exists': True} except ClientError as e: err = __utils__['boto3.get_error'](e) if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException': return {'exists': False} return {'error': err}
Given a trail name, check to see if the given trail exists. Returns True if the given trail exists and returns False if the given trail does not exist. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.exists mytrail
def get_storage(self, hex_contract_address: str, hex_key: str, is_full: bool = False) -> str: payload = self.generate_json_rpc_payload(RpcMethod.GET_STORAGE, [hex_contract_address, hex_key, 1]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
This interface is used to get the corresponding stored value based on hexadecimal contract address and stored key. :param hex_contract_address: hexadecimal contract address. :param hex_key: a hexadecimal stored key. :param is_full: :return: the information of contract storage.
def training_status_renderer_context(exercise_names: List[str], renderman: RenderingManager): renderer = TrainingStatusRenderer(exercise_names, renderman) try: yield renderer finally: renderer.clear_screen()
Ensures that the screen is always cleared, even on fatal errors in code that uses this renderer.
def summary(self): return { 'bestScore': self.bestHsp().score.score, 'coverage': self.coverage(), 'hspCount': self.hspCount(), 'medianScore': self.medianScore(), 'readCount': self.readCount(), 'subjectLength': self.subjectLength, 'subjectTitle': self.subjectTitle, }
Summarize the alignments for this subject. @return: A C{dict} with C{str} keys: bestScore: The C{float} best score of the matching reads. coverage: The C{float} fraction of the subject genome that is matched by at least one read. hspCount: The C{int} number of hsps that match the subject. medianScore: The C{float} median score of the matching reads. readCount: The C{int} number of reads that match the subject. subjectLength: The C{int} length of the subject. subjectTitle: The C{str} title of the subject.
def date_in_range(date1, date2, range): date_obj1 = convert_date(date1) date_obj2 = convert_date(date2) return (date_obj2 - date_obj1).days <= range
Check if two date objects are within a specific range
def convert_filename(txtfilename, outdir='.'): return os.path.join(outdir, os.path.basename(txtfilename)).rsplit('.', 1)[0] + '.th'
Convert a .TXT filename to a Therion .TH filename
def transition_matrix_non_reversible(C): r rowsums = 1.0 * np.sum(C, axis=1) if np.min(rowsums) <= 0: raise ValueError( "Transition matrix has row sum of " + str(np.min(rowsums)) + ". Must have strictly positive row sums.") return np.divide(C, rowsums[:, np.newaxis])
r""" Estimates a non-reversible transition matrix from count matrix C T_ij = c_ij / c_i where c_i = sum_j c_ij Parameters ---------- C: ndarray, shape (n,n) count matrix Returns ------- T: Estimated transition matrix
def apply(instance, func, path=None): path = path or os.path.sep if isinstance(instance, list): return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)] elif isinstance(instance, dict): return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()} return func(instance, path)
Apply `func` to all fundamental types of `instance`. Parameters ---------- instance : dict instance to apply functions to func : callable function with two arguments (instance, path) to apply to all fundamental types recursively path : str path in the document (defaults to '/') Returns ------- instance : dict instance after applying `func` to fundamental types
def hybrid_forward(self, F, a, b): tilde_a = self.f(a) tilde_b = self.f(b) e = F.batch_dot(tilde_a, tilde_b, transpose_b=True) beta = F.batch_dot(e.softmax(), tilde_b) alpha = F.batch_dot(e.transpose([0, 2, 1]).softmax(), tilde_a) feature1 = self.g(F.concat(tilde_a, beta, dim=2)) feature2 = self.g(F.concat(tilde_b, alpha, dim=2)) feature1 = feature1.sum(axis=1) feature2 = feature2.sum(axis=1) yhat = self.h(F.concat(feature1, feature2, dim=1)) return yhat
Forward of Decomposable Attention layer
def draw(self): if not self.visible: return if self.isEnabled: if self.mouseIsDown and self.lastMouseDownOverButton and self.mouseOverButton: if self.value: self.window.blit(self.surfaceOnDown, self.loc) else: self.window.blit(self.surfaceOffDown, self.loc) else: if self.value: self.window.blit(self.surfaceOn, self.loc) else: self.window.blit(self.surfaceOff, self.loc) else: if self.value: self.window.blit(self.surfaceOnDisabled, self.loc) else: self.window.blit(self.surfaceOffDisabled, self.loc)
Draws the checkbox.
def valid_replay(info, ping): if (info.HasField("error") or info.base_build != ping.base_build or info.game_duration_loops < 1000 or len(info.player_info) != 2): return False for p in info.player_info: if p.player_apm < 10 or p.player_mmr < 1000: return False return True
Make sure the replay isn't corrupt, and is worth looking at.
def restore_watched(plex, opts): with open(opts.filepath, 'r') as handle: source = json.load(handle) differences = defaultdict(lambda: dict()) for section in _iter_sections(plex, opts): print('Finding differences in %s..' % section.title) skey = section.title.lower() for item in _iter_items(section): ikey = _item_key(item) sval = source.get(skey,{}).get(ikey) if sval is None: raise SystemExit('%s not found' % ikey) if (sval is not None and item.isWatched != sval) and (not opts.watchedonly or sval): differences[skey][ikey] = {'isWatched':sval, 'item':item} print('Applying %s differences to destination' % len(differences)) import pprint; pprint.pprint(differences)
Restore watched status from the specified filepath.
def set_event(self, simulation_start=None, simulation_duration=None, simulation_end=None, rain_intensity=2, rain_duration=timedelta(seconds=30*60), event_type='EVENT', ): if event_type == 'LONG_TERM': self.event = LongTermMode(self.project_manager, self.db_session, self.project_directory, simulation_start=simulation_start, simulation_end=simulation_end, simulation_duration=simulation_duration, ) else: self.event = EventMode(self.project_manager, self.db_session, self.project_directory, simulation_start=simulation_start, simulation_duration=simulation_duration, ) self.event.add_uniform_precip_event(intensity=rain_intensity, duration=rain_duration)
Initializes event for GSSHA model
def python_to_jupyter_cli(args=None, namespace=None): from . import gen_gallery parser = argparse.ArgumentParser( description='Sphinx-Gallery Notebook converter') parser.add_argument('python_src_file', nargs='+', help='Input Python file script to convert. ' 'Supports multiple files and shell wildcards' ' (e.g. *.py)') args = parser.parse_args(args, namespace) for src_file in args.python_src_file: file_conf, blocks = split_code_and_text_blocks(src_file) print('Converting {0}'.format(src_file)) gallery_conf = copy.deepcopy(gen_gallery.DEFAULT_GALLERY_CONF) example_nb = jupyter_notebook(blocks, gallery_conf) save_notebook(example_nb, replace_py_ipynb(src_file))
Exposes the jupyter notebook renderer to the command line Takes the same arguments as ArgumentParser.parse_args
def rm_auth_key_from_file(user, source, config='.ssh/authorized_keys', saltenv='base', fingerprint_hash_type=None): lfile = __salt__['cp.cache_file'](source, saltenv) if not os.path.isfile(lfile): raise CommandExecutionError( 'Failed to pull key file from salt file server' ) s_keys = _validate_keys(lfile, fingerprint_hash_type) if not s_keys: err = ( 'No keys detected in {0}. Is file properly formatted?'.format( source ) ) log.error(err) __context__['ssh_auth.error'] = err return 'fail' else: rval = '' for key in s_keys: rval += rm_auth_key( user, key, config=config, fingerprint_hash_type=fingerprint_hash_type ) if 'Key not removed' in rval: return 'Key not removed' elif 'Key removed' in rval: return 'Key removed' else: return 'Key not present'
Remove an authorized key from the specified user's authorized key file, using a file as source CLI Example: .. code-block:: bash salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
def on_pubmsg(self, connection, event): for message in event.arguments(): nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] self.namespace.emit("message", nickname, message, nickname_color)
Messages received in the channel - send them to the WebSocket.
def _starting_consonants_only(self, letters: list) -> list: for idx, letter in enumerate(letters): if not self._contains_vowels(letter) and self._contains_consonants(letter): return [idx] if self._contains_vowels(letter): return [] if self._contains_vowels(letter) and self._contains_consonants(letter): return [] return []
Return a list of starting consonant positions.
def stdlib(self): if self.module == 'pkg_resources' or self.module.startswith('pkg_resources.'): return False elif self.filename.startswith(SITE_PACKAGES_PATHS): return False elif self.filename.startswith(SYS_PREFIX_PATHS): return True else: return False
A boolean flag. ``True`` if frame is in stdlib. :type: bool
def write(self, message): try: self.socket.send(message.encode('utf-8') + b'\0') except socket.error: if not self._closed: raise BrokenPipeError return Future.succeed(None)
Coroutine that writes the next packet.
def build_if_needed(self): if self._need_build: self._build() self._need_build = False self.update_variables()
Reset shader source if necesssary.
def emit_metadata_for_region_py(self, region, region_filename, module_prefix): terrobj = self.territory[region] with open(region_filename, "w") as outfile: prnt(_REGION_METADATA_PROLOG % {'region': terrobj.identifier(), 'module': module_prefix}, file=outfile) prnt("PHONE_METADATA_%s = %s" % (terrobj.identifier(), terrobj), file=outfile)
Emit Python code generating the metadata for the given region
def has_callback(obj, handle): callbacks = obj._callbacks if not callbacks: return False if isinstance(callbacks, Node): return handle is callbacks else: return handle in callbacks
Return whether a callback is currently registered for an object.
def get_children_metadata(self): metadata = dict(self._mdata['children']) metadata.update({'existing_children_values': self._my_map['childIds']}) return Metadata(**metadata)
Gets the metadata for children. return: (osid.Metadata) - metadata for the children *compliance: mandatory -- This method must be implemented.*
def add_inverse_query(self, key_val={}): q = Q("match", **key_val) self.search = self.search.query(~q) return self
Add an es_dsl inverse query object to the es_dsl Search object :param key_val: a key-value pair(dict) containing the query to be added to the search object :returns: self, which allows the method to be chainable with the other methods
def has_family_notes(family, data_dir=None): file_path = _family_notes_path(family, data_dir) return os.path.isfile(file_path)
Check if notes exist for a given family Returns True if they exist, false otherwise
def patch(): if hasattr(aiobotocore.client, '_xray_enabled'): return setattr(aiobotocore.client, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'aiobotocore.client', 'AioBaseClient._make_api_call', _xray_traced_aiobotocore, ) wrapt.wrap_function_wrapper( 'aiobotocore.endpoint', 'AioEndpoint.prepare_request', inject_header, )
Patch aiobotocore client so it generates subsegments when calling AWS services.
def set(self, logicalId, resource): resource_dict = resource if isinstance(resource, SamResource): resource_dict = resource.to_dict() self.resources[logicalId] = resource_dict
Adds the resource to dictionary with given logical Id. It will overwrite, if the logicalId is already used. :param string logicalId: Logical Id to set to :param SamResource or dict resource: The actual resource data
def _unsigned_bounds(self): ssplit = self._ssplit() if len(ssplit) == 1: lb = ssplit[0].lower_bound ub = ssplit[0].upper_bound return [ (lb, ub) ] elif len(ssplit) == 2: lb_1 = ssplit[0].lower_bound ub_1 = ssplit[0].upper_bound lb_2 = ssplit[1].lower_bound ub_2 = ssplit[1].upper_bound return [ (lb_1, ub_1), (lb_2, ub_2) ] else: raise Exception('WTF')
Get lower bound and upper bound for `self` in unsigned arithmetic. :return: a list of (lower_bound, upper_bound) tuples.
def smooth_rectangle(x, y, rec_w, rec_h, gaussian_width_x, gaussian_width_y): gaussian_x_coord = abs(x)-rec_w/2.0 gaussian_y_coord = abs(y)-rec_h/2.0 box_x=np.less(gaussian_x_coord,0.0) box_y=np.less(gaussian_y_coord,0.0) sigmasq_x=gaussian_width_x*gaussian_width_x sigmasq_y=gaussian_width_y*gaussian_width_y with float_error_ignore(): falloff_x=x*0.0 if sigmasq_x==0.0 else \ np.exp(np.divide(-gaussian_x_coord*gaussian_x_coord,2*sigmasq_x)) falloff_y=y*0.0 if sigmasq_y==0.0 else \ np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq_y)) return np.minimum(np.maximum(box_x,falloff_x), np.maximum(box_y,falloff_y))
Rectangle with a solid central region, then Gaussian fall-off at the edges.
def astype(self, dtype): dtype = np.dtype(dtype) filters = [] if self._filters: filters.extend(self._filters) filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype)) return self.view(filters=filters, dtype=dtype, read_only=True)
Returns a view that does on the fly type conversion of the underlying data. Parameters ---------- dtype : string or dtype NumPy dtype. Notes ----- This method returns a new Array object which is a view on the same underlying chunk data. Modifying any data via the view is currently not permitted and will result in an error. This is an experimental feature and its behavior is subject to change in the future. See Also -------- Array.view Examples -------- >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype=np.uint8) >>> a = zarr.array(data, chunks=10) >>> a[:] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], dtype=uint8) >>> v = a.astype(np.float32) >>> v.is_view True >>> v[:] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.], dtype=float32)
def ping(host, timeout=False, return_boolean=False): if timeout: if __grains__['kernel'] == 'SunOS': cmd = 'ping -c 4 {1} {0}'.format(timeout, salt.utils.network.sanitize_host(host)) else: cmd = 'ping -W {0} -c 4 {1}'.format(timeout, salt.utils.network.sanitize_host(host)) else: cmd = 'ping -c 4 {0}'.format(salt.utils.network.sanitize_host(host)) if return_boolean: ret = __salt__['cmd.run_all'](cmd) if ret['retcode'] != 0: return False else: return True else: return __salt__['cmd.run'](cmd)
Performs an ICMP ping to a host .. versionchanged:: 2015.8.0 Added support for SunOS CLI Example: .. code-block:: bash salt '*' network.ping archlinux.org .. versionadded:: 2015.5.0 Return a True or False instead of ping output. .. code-block:: bash salt '*' network.ping archlinux.org return_boolean=True Set the time to wait for a response in seconds. .. code-block:: bash salt '*' network.ping archlinux.org timeout=3
def _mkdir_for_config(cfg_file=cfg_file): dirname, filename = os.path.split(cfg_file) try: os.makedirs(dirname) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(dirname): pass else: raise
Given a path to a filename, make sure the directory exists
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> typing.Optional[typing.Sequence]: return self.__cascade_delete(data_item, safe=safe)
Remove data item from document model. This method is NOT threadsafe.
def identity_factor(self): return DiscreteFactor(self.variables, self.cardinality, np.ones(self.values.size))
Returns the identity factor. Def: The identity factor of a factor has the same scope and cardinality as the original factor, but the values for all the assignments is 1. When the identity factor is multiplied with the factor it returns the factor itself. Returns ------- DiscreteFactor: The identity factor. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi_identity = phi.identity_factor() >>> phi_identity.variables ['x1', 'x2', 'x3'] >>> phi_identity.values array([[[ 1., 1.], [ 1., 1.], [ 1., 1.]], [[ 1., 1.], [ 1., 1.], [ 1., 1.]]])
def find_cards(self, source=None, **filters): if not filters: new_filters = self.filters.copy() else: new_filters = filters.copy() for k, v in new_filters.items(): if isinstance(v, LazyValue): new_filters[k] = v.evaluate(source) from .. import cards return cards.filter(**new_filters)
Generate a card pool with all cards matching specified filters
def create_api_call(func, settings): def base_caller(api_call, _, *args): return api_call(*args) def inner(request, options=None): this_options = _merge_options_metadata(options, settings) this_settings = settings.merge(this_options) if this_settings.retry and this_settings.retry.retry_codes: api_call = gax.retry.retryable( func, this_settings.retry, **this_settings.kwargs) else: api_call = gax.retry.add_timeout_arg( func, this_settings.timeout, **this_settings.kwargs) api_call = _catch_errors(api_call, gax.config.API_ERRORS) return api_caller(api_call, this_settings, request) if settings.page_descriptor: if settings.bundler and settings.bundle_descriptor: raise ValueError('The API call has incompatible settings: ' 'bundling and page streaming') api_caller = _page_streamable(settings.page_descriptor) elif settings.bundler and settings.bundle_descriptor: api_caller = _bundleable(settings.bundle_descriptor) else: api_caller = base_caller return inner
Converts an rpc call into an API call governed by the settings. In typical usage, ``func`` will be a callable used to make an rpc request. This will mostly likely be a bound method from a request stub used to make an rpc call. The result is created by applying a series of function decorators defined in this module to ``func``. ``settings`` is used to determine which function decorators to apply. The result is another callable which for most values of ``settings`` has has the same signature as the original. Only when ``settings`` configures bundling does the signature change. Args: func (Callable[Sequence[object], object]): is used to make a bare rpc call. settings (_CallSettings): provides the settings for this call Returns: Callable[Sequence[object], object]: a bound method on a request stub used to make an rpc call Raises: ValueError: if ``settings`` has incompatible values, e.g, if bundling and page_streaming are both configured
def set_user_password(self, username, password): text = "SET PASSWORD FOR {0} = {1}".format( quote_ident(username), quote_literal(password)) self.query(text)
Change the password of an existing user. :param username: the username who's password is being changed :type username: str :param password: the new password for the user :type password: str
def run(self): while not self._abort: hashes = self._GetHashes(self._hash_queue, self.hashes_per_batch) if hashes: time_before_analysis = time.time() hash_analyses = self.Analyze(hashes) current_time = time.time() self.seconds_spent_analyzing += current_time - time_before_analysis self.analyses_performed += 1 for hash_analysis in hash_analyses: self._hash_analysis_queue.put(hash_analysis) self._hash_queue.task_done() time.sleep(self.wait_after_analysis) else: time.sleep(self.EMPTY_QUEUE_WAIT_TIME)
The method called by the threading library to start the thread.
def retrieve(self, id) : _, _, note = self.http_client.get("/notes/{id}".format(id=id)) return note
Retrieve a single note Returns a single note available to the user, according to the unique note ID provided If the note ID does not exist, this request will return an error :calls: ``get /notes/{id}`` :param int id: Unique identifier of a Note. :return: Dictionary that support attriubte-style access and represent Note resource. :rtype: dict
def on_frame(self, frame_in): LOGGER.debug('Frame Received: %s', frame_in.name) if frame_in.name == 'Heartbeat': return elif frame_in.name == 'Connection.Close': self._close_connection(frame_in) elif frame_in.name == 'Connection.CloseOk': self._close_connection_ok() elif frame_in.name == 'Connection.Blocked': self._blocked_connection(frame_in) elif frame_in.name == 'Connection.Unblocked': self._unblocked_connection() elif frame_in.name == 'Connection.OpenOk': self._set_connection_state(Stateful.OPEN) elif frame_in.name == 'Connection.Start': self.server_properties = frame_in.server_properties self._send_start_ok(frame_in) elif frame_in.name == 'Connection.Tune': self._send_tune_ok(frame_in) self._send_open_connection() else: LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
Handle frames sent to Channel0. :param frame_in: Amqp frame. :return:
def add_mismatch(self, entity, *traits): for trait in traits: self.index[trait].add(entity)
Add a mismatching entity to the index. We do this by simply adding the mismatch to the index. :param collections.Hashable entity: an object to be mismatching the values of `traits_indexed_by` :param list traits: a list of hashable traits to index the entity with
def device_configuration(self, pending=False, use_included=False): device_configs = self.device_configurations(use_included=use_included) for device_config in device_configs: if device_config.is_loaded() is not pending: return device_config return None
Get a specific device configuration. A device can have at most one loaded and one pending device configuration. This returns that device_configuration based on a given flag. Keyword Args: pending(bool): Fetch the pending configuration or return the loaded one. use_included(bool): Use included resources in this device configuration. Returns: The requested loaded or pending configuration or None if no device configuration is found.
def add_new_header_groups(self, groups): already_present = [] for group in groups: col_names = self.dm[self.dm['group'] == group].index for col in col_names: if col not in self.grid.col_labels: col_number = self.grid.add_col(col) if col in self.contribution.vocab.vocabularies: self.drop_down_menu.add_drop_down(col_number, col) elif col in self.contribution.vocab.suggested: self.drop_down_menu.add_drop_down(col_number, col) elif col in ['specimen', 'sample', 'site', 'location', 'specimens', 'samples', 'sites']: self.drop_down_menu.add_drop_down(col_number, col) elif col == 'experiments': self.drop_down_menu.add_drop_down(col_number, col) if col == "method_codes": self.drop_down_menu.add_method_drop_down(col_number, col) else: already_present.append(col) return already_present
compile list of all headers belonging to all specified groups eliminate all headers that are already included add any req'd drop-down menus return errors
def tube(self, name): tube = self.tubes.get(name) if tube is None: tube = Tube(self, name) self.tubes[name] = tube return tube
Create tube object, if not created before. Returns `Tube` object.
def hash_user_id(self, user_id: str) -> str: h = sha256() h.update(user_id.encode()) return h.hexdigest()
As per the law, anonymize user identifier before sending it.
def is_not_empty(value, **kwargs): try: value = validators.not_empty(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
Indicate whether ``value`` is empty. :param value: The value to evaluate. :returns: ``True`` if ``value`` is empty, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def adjust_level(logger, level): level = level_to_number(level) if logger.getEffectiveLevel() > level: logger.setLevel(level)
Increase a logger's verbosity up to the requested level. :param logger: The logger to change (a :class:`~logging.Logger` object). :param level: The log level to enable (a string or number). This function is used by functions like :func:`install()`, :func:`increase_verbosity()` and :func:`.enable_system_logging()` to adjust a logger's level so that log messages up to the requested log level are propagated to the configured output handler(s). It uses :func:`logging.Logger.getEffectiveLevel()` to check whether `logger` propagates or swallows log messages of the requested `level` and sets the logger's level to the requested level if it would otherwise swallow log messages. Effectively this function will "widen the scope of logging" when asked to do so but it will never "narrow the scope of logging". This is because I am convinced that filtering of log messages should (primarily) be decided by handlers.
def _patch_resource(self, method): resource = self.client.get_resource("", self.resource.path, method) if not resource: raise UnsupportedResourceMethodError(self.resource.path, method) self.resource = resource
Patch the current RAML ResourceNode by the resource with the correct method if it exists If the resource with the specified method does not exist an exception is raised. :param str method: the method of the resource :raises UnsupportedResourceMethodError: if resource does not support the method
def updatepLvlNextFunc(self): orig_time = self.time_flow self.timeFwd() pLvlNextFunc = [] pLogMean = self.pLvlInitMean for t in range(self.T_cycle): pLvlNextFunc.append(pLvlFuncAR1(pLogMean,self.PermGroFac[t],self.PrstIncCorr)) pLogMean += np.log(self.PermGroFac[t]) self.pLvlNextFunc = pLvlNextFunc self.addToTimeVary('pLvlNextFunc') if not orig_time: self.timeRev()
A method that creates the pLvlNextFunc attribute as a sequence of AR1-style functions. Draws on the attributes PermGroFac and PrstIncCorr. If cycles=0, the product of PermGroFac across all periods must be 1.0, otherwise this method is invalid. Parameters ---------- None Returns ------- None
def wxcode(code: str) -> str: if not code: return '' ret = '' if code[0] == '+': ret = 'Heavy ' code = code[1:] elif code[0] == '-': ret = 'Light ' code = code[1:] if len(code) not in [2, 4, 6]: return code for _ in range(len(code) // 2): if code[:2] in WX_TRANSLATIONS: ret += WX_TRANSLATIONS[code[:2]] + ' ' else: ret += code[:2] code = code[2:] return ret.strip()
Translates weather codes into readable strings Returns translated string of variable length
def DEFINE_point(name, default, help): flags.DEFINE(PointParser(), name, default, help)
Registers a flag whose value parses as a point.
def qteGetAppletFromWidget(widgetObj): if widgetObj is None: return None if hasattr(widgetObj, '_qteAdmin'): return widgetObj._qteAdmin.qteApplet visited = [widgetObj] wid = widgetObj.parent() while wid not in visited: if hasattr(wid, '_qteAdmin'): return wid._qteAdmin.qteApplet elif wid is None: return None else: visited.append(wid) wid = wid.parent() return None
Return the parent applet of ``widgetObj``. |Args| * ``widgetObj`` (**QWidget**): widget (if any) for which the containing applet is requested. |Returns| * **QtmacsApplet**: the applet containing ``widgetObj`` or **None**. |Raises| * **None**
def write_without_mac(self, data, block): assert len(data) == 16 and type(block) is int log.debug("write 1 block without mac".format()) sc_list = [tt3.ServiceCode(0, 0b001001)] bc_list = [tt3.BlockCode(block)] self.write_without_encryption(sc_list, bc_list, data)
Write a data block without integrity check. This is the standard write method for a FeliCa Lite. The 16-byte string or bytearray *data* is written to the numbered *block* in service 0x0009 (NDEF write service). :: data = bytearray(range(16)) # 0x00, 0x01, ... 0x0F try: tag.write_without_mac(data, 5) # write block 5 except nfc.tag.TagCommandError: print("something went wrong") Tag command errors raise :exc:`~nfc.tag.TagCommandError`.
def _umask(self): if self.filesystem.is_windows_fs: return 0 if sys.platform == 'win32': return 0o002 else: mask = os.umask(0) os.umask(mask) return mask
Return the current umask.
def _SetPath(self, path): old_path = self._path if old_path and not io_wrapper.IsCloudPath(old_path): try: size = tf.io.gfile.stat(old_path).length logger.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except tf.errors.OpError as e: logger.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch.
def plot_vxx(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): if cb_label is None: cb_label = self._vxx_label if ax is None: fig, axes = self.vxx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vxx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the Vxx component of the tensor. Usage ----- x.plot_vxx([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = False If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{xx}$' Text label for the colorbar.. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def register_host(): pyblish.api.register_host("hython") pyblish.api.register_host("hpython") pyblish.api.register_host("houdini")
Register supported hosts
def run( project: 'projects.Project', step: 'projects.ProjectStep' ) -> dict: with open(step.source_path, 'r') as f: code = f.read() try: cauldron.display.markdown(code, **project.shared.fetch(None)) return {'success': True} except Exception as err: return dict( success=False, html_message=templating.render_template( 'markdown-error.html', error=err ) )
Runs the markdown file and renders the contents to the notebook display :param project: :param step: :return: A run response dictionary containing
def parse(input, identifier: str = None, use_cache=False, clear_cache=True, pattern="*.qface", profile=EProfile.FULL): inputs = input if isinstance(input, (list, tuple)) else [input] logger.debug('parse input={0}'.format(inputs)) identifier = 'system' if not identifier else identifier system = System() cache = None if use_cache: cache = shelve.open('qface.cache') if identifier in cache and clear_cache: del cache[identifier] if identifier in cache: system = cache[identifier] for input in inputs: path = Path.getcwd() / str(input) if path.isfile(): FileSystem.parse_document(path, system) else: for document in path.walkfiles(pattern): FileSystem.parse_document(document, system) if use_cache: cache[identifier] = system return system
Input can be either a file or directory or a list of files or directory. A directory will be parsed recursively. The function returns the resulting system. Stores the result of the run in the domain cache named after the identifier. :param path: directory to parse :param identifier: identifies the parse run. Used to name the cache :param clear_cache: clears the domain cache (defaults to true)
def can_process(self, statement): response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1
Determines whether it is appropriate for this adapter to respond to the user input.
def qr(self,text): qr_code = qrcode.QRCode(version=4, box_size=4, border=1) qr_code.add_data(text) qr_code.make(fit=True) qr_img = qr_code.make_image() im = qr_img._img.convert("RGB") self._convert_image(im)
Print QR Code for the provided string
def xpath(self, xpath): return [self.get_node_factory().create(node_id) for node_id in self._get_xpath_ids(xpath).split(",") if node_id]
Finds another node by XPath originating at the current node.
def grid_select(self, grid, clear_selection=True): if clear_selection: grid.ClearSelection() for (tl, br) in zip(self.block_tl, self.block_br): grid.SelectBlock(tl[0], tl[1], br[0], br[1], addToSelected=True) for row in self.rows: grid.SelectRow(row, addToSelected=True) for col in self.cols: grid.SelectCol(col, addToSelected=True) for cell in self.cells: grid.SelectBlock(cell[0], cell[1], cell[0], cell[1], addToSelected=True)
Selects cells of grid with selection content
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore): successors = set() def successorRecursion(jobGraph): for successorList in jobGraph.stack: for successorJobNode in successorList: if successorJobNode.jobStoreID not in alreadySeenSuccessors: successors.add(successorJobNode.jobStoreID) alreadySeenSuccessors.add(successorJobNode.jobStoreID) if jobStore.exists(successorJobNode.jobStoreID): successorRecursion(jobStore.load(successorJobNode.jobStoreID)) successorRecursion(jobGraph) return successors
Gets successors of the given job by walking the job graph recursively. Any successor in alreadySeenSuccessors is ignored and not traversed. Returns the set of found successors. This set is added to alreadySeenSuccessors.
def compare_version(a, b): aa = string.split(a, ".") bb = string.split(b, ".") for i in range(0, 4): if aa[i] != bb[i]: return cmp(int(aa[i]), int(bb[i])) return 0
Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger.
def sort_filenames(filenames): basenames = [os.path.basename(x) for x in filenames] indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])] return [filenames[x] for x in indexes]
sort a list of files by filename only, ignoring the directory names
def save_ready(self, service_name): self._load_ready_file() self._ready.add(service_name) self._save_ready_file()
Save an indicator that the given service is now data_ready.
def _invite(self, name, method, email, uuid, event, password=""): props = { 'uuid': std_uuid(), 'status': 'Open', 'name': name, 'method': method, 'email': email, 'password': password, 'timestamp': std_now() } enrollment = objectmodels['enrollment'](props) enrollment.save() self.log('Enrollment stored', lvl=debug) self._send_invitation(enrollment, event) packet = { 'component': 'hfos.enrol.enrolmanager', 'action': 'invite', 'data': [True, email] } self.fireEvent(send(uuid, packet))
Actually invite a given user
def bin_hex_type(arg): if re.match(r'^[a-f0-9]{2}(:[a-f0-9]{2})+$', arg, re.I): arg = arg.replace(':', '') elif re.match(r'^(\\x[a-f0-9]{2})+$', arg, re.I): arg = arg.replace('\\x', '') try: arg = binascii.a2b_hex(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid hex data".format(repr(arg))) return arg
An argparse type representing binary data encoded in hex.
def makedirs(p): try: os.makedirs(p, settings.FILE_UPLOAD_PERMISSIONS) except OSError: if not os.path.isdir(p): raise
A makedirs that avoids a race conditions for multiple processes attempting to create the same directory.
def output(self): return ElasticsearchTarget( host=self.host, port=self.port, http_auth=self.http_auth, index=self.index, doc_type=self.doc_type, update_id=self.update_id(), marker_index_hist_size=self.marker_index_hist_size, timeout=self.timeout, extra_elasticsearch_args=self.extra_elasticsearch_args )
Returns a ElasticsearchTarget representing the inserted dataset. Normally you don't override this.
def regexpExec(self, content): ret = libxml2mod.xmlRegexpExec(self._o, content) return ret
Check if the regular expression generates the value
def path(self): names = [] obj = self while obj: names.insert(0, obj.name) obj = obj.parent_dir sep = self.filesystem._path_separator(self.name) if names[0] == sep: names.pop(0) dir_path = sep.join(names) is_drive = names and len(names[0]) == 2 and names[0][1] == ':' if not is_drive: dir_path = sep + dir_path else: dir_path = sep.join(names) dir_path = self.filesystem.absnormpath(dir_path) return dir_path
Return the full path of the current object.
def _full_path(self, path_info): full_path = self.root + path_info if path.exists(full_path): return full_path else: for magic in self.magics: if path.exists(magic.new_path(full_path)): return magic.new_path(full_path) else: return full_path
Return the full path from which to read.
def validate(src, **kwargs): resolver = Resolver() validator = OcrdZipValidator(resolver, src) report = validator.validate(**kwargs) print(report) if not report.is_valid: sys.exit(1)
Validate OCRD-ZIP SRC must exist an be an OCRD-ZIP, either a ZIP file or a directory.
def create_role(*_, **kwargs): click.echo(green('\nCreating new role:')) click.echo(green('-' * 40)) with get_app().app_context(): role = Role(**kwargs) result = role_service.save(role) if not isinstance(result, Role): print_validation_errors(result) click.echo(green('Created: ') + str(role) + '\n')
Create user role
def spawn(f): def fun(pipe,x): pipe.send(f(x)) pipe.close() return fun
Function for parallel evaluation of the acquisition function
def prj_add_user(self, *args, **kwargs): if not self.cur_prj: return dialog = UserAdderDialog(project=self.cur_prj) dialog.exec_() users = dialog.users for user in users: userdata = djitemdata.UserItemData(user) treemodel.TreeItem(userdata, self.prj_user_model.root) self.cur_prj.save()
Add more users to the project. :returns: None :rtype: None :raises: None
def blend(self, blend_recipe, join_base, join_blend): assert isinstance(blend_recipe, Recipe) self.blend_recipes.append(blend_recipe) self.blend_types.append('inner') self.blend_criteria.append((join_base, join_blend)) self.dirty = True return self.recipe
Blend a recipe into the base recipe. This performs an inner join of the blend_recipe to the base recipe's SQL.
def _formatVals(self, val_list): vals = [] for (name, val) in val_list: if val is not None: if isinstance(val, float): vals.append("%s.value %f" % (name, val)) else: vals.append("%s.value %s" % (name, val)) else: vals.append("%s.value U" % (name,)) return "\n".join(vals)
Formats value list from Munin Graph and returns multi-line value entries for the plugin fetch cycle. @param val_list: List of name-value pairs. @return: Multi-line text.
def example_generator(all_files, urls_path, sum_token): def fix_run_on_sents(line): if u"@highlight" in line: return line if not line: return line if line[-1] in END_TOKENS: return line return line + u"." filelist = example_splits(urls_path, all_files) story_summary_split_token = u" <summary> " if sum_token else " " for story_file in filelist: story = [] summary = [] reading_highlights = False for line in tf.gfile.Open(story_file, "rb"): line = text_encoder.to_unicode_utf8(line.strip()) line = fix_run_on_sents(line) if not line: continue elif line.startswith(u"@highlight"): if not story: break reading_highlights = True elif reading_highlights: summary.append(line) else: story.append(line) if (not story) or not summary: continue yield " ".join(story) + story_summary_split_token + " ".join(summary)
Generate examples.
def get(self, request): sections_list = self.generate_sections() p = Paginator(sections_list, 25) page = request.GET.get('page') try: sections = p.page(page) except PageNotAnInteger: sections = p.page(1) except EmptyPage: sections = p.page(p.num_pages) context = { 'sections': sections, 'page_title': self.generate_page_title(), 'browse_type': self.browse_type } return render( request, self.template_path, context )
Handle HTTP GET request. Returns template and context from generate_page_title and generate_sections to populate template.
def get_matching(self, source_id): value = self._accessor.get_by_id(source_id) if not value is None: reg = get_current_registry() prx_fac = reg.getUtility(IDataTraversalProxyFactory) prx = prx_fac.make_proxy(value, self._accessor, self.relationship_direction, self.relation_operation) else: prx = None return prx
Returns a matching target object for the given source ID.
def float16_activations_var_getter(getter, *args, **kwargs): requested_dtype = kwargs["dtype"] if requested_dtype == tf.float16: kwargs["dtype"] = tf.float32 if requested_dtype == tf.float32: requested_dtype = tf.float16 var = getter(*args, **kwargs) if var.dtype.base_dtype != requested_dtype: var = tf.cast(var, requested_dtype) return var
A custom getter function for float32 parameters and float16 activations. This function ensures the following: 1. All variables requested with type fp16 are stored as type fp32. 2. All variables requested with type fp32 are returned as type fp16. See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/ #training_tensorflow for more information on this strategy. Args: getter: custom getter *args: arguments **kwargs: keyword arguments Returns: variables with the correct dtype. Raises: KeyError: if "dtype" is not provided as a kwarg.
def unicode(self, s, encoding=None): if isinstance(s, unicode): return unicode(s) return self.to_unicode(s, encoding)
Convert a string to unicode using the given encoding, and return it. This function uses the underlying to_unicode attribute. Arguments: s: a basestring instance to convert to unicode. Unlike Python's built-in unicode() function, it is okay to pass unicode strings to this function. (Passing a unicode string to Python's unicode() with the encoding argument throws the error, "TypeError: decoding Unicode is not supported.") encoding: the encoding to pass to the to_unicode attribute. Defaults to None.
def fetch_data(self): yield from self._get_httpsession() yield from self._post_login_page() token_uuid = yield from self._get_token() account_number = yield from self._get_account_number(*token_uuid) self._phone_numbers = yield from self._list_phone_numbers(account_number) balance = yield from self._get_balance(account_number) self._data['balance'] = balance for number in self._phone_numbers: fido_dollar = yield from self._get_fido_dollar(account_number, number) self._data[number]= {'fido_dollar': fido_dollar} for number in self._phone_numbers: usage = yield from self._get_usage(account_number, number) self._data[number].update(usage)
Fetch the latest data from Fido.
def main_hrun(): parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( 'case_file', help="yaml testcase file") color_print("httpdriver {}".format(__version__), "GREEN") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) runner = TestRunner(runner = HttpDriver).run(args.case_file) html_report = runner.gen_html_report() color_print("report: {}".format(html_report))
parse command line options and run commands.
def _get_encoding_opt(synchronizer, extra_opts, default): encoding = default if extra_opts and "encoding" in extra_opts: encoding = extra_opts.get("encoding") if encoding: encoding = codecs.lookup(encoding).name return encoding or None
Helper to figure out encoding setting inside constructors.
def add_component(self, component, temporary=False): tile = IOTile(component) value = os.path.normpath(os.path.abspath(component)) if temporary is True: self._component_overlays[tile.name] = value else: self.kvstore.set(tile.name, value)
Register a component with ComponentRegistry. Component must be a buildable object with a module_settings.json file that describes its name and the domain that it is part of. By default, this component is saved in the permanent registry associated with this environment and will remain registered for future CoreTools invocations. If you only want this component to be temporarily registered during this program's session, you can pass temporary=True and the component will be stored in RAM only, not persisted to the underlying key-value store. Args: component (str): The path to a component that should be registered. temporary (bool): Optional flag to only temporarily register the component for the duration of this program invocation.
def __query(domain, limit=100): s = check_output(['{}'.format(os.path.join(os.path.dirname(__file__), 'whois.sh')), '--limit {} {}'.format(limit, domain)], universal_newlines=True) return s
Using the shell script to query pdns.cert.at is a hack, but python raises an error every time using subprocess functions to call whois. So this hack is avoiding calling whois directly. Ugly, but works. :param domain: The domain pdns is queried with. :type domain: str :param limit: Maximum number of results :type limit: int :returns: str -- Console output from whois call. :rtype: str
def print_modified_files(opts, anchors): print("Files with modifications:") for file_path in anchors: print(" " + strip_prefix(file_path, opts.abs_input)) print("--------------------") print(str(len(anchors)) + " total\n")
Prints out which files were modified amongst those looked at :param anchors: Dictionary mapping file path strings to dictionaries containing AnchorHub tag/generated header key-value pairs
def remove_server_data(server_id): logger.debug("Removing server from serverdata") data = datatools.get_data() if server_id in data["discord"]["servers"]: data["discord"]["servers"].pop(server_id) datatools.write_data(data)
Remove a server from the server data Args: server_id (int): The server to remove from the server data
def _set_class_parser(self, init_parser, methods_to_parse, cls): top_level_parents = [init_parser] if init_parser else [] description = self._description or cls.__doc__ top_level_parser = argparse.ArgumentParser(description=description, parents=top_level_parents, add_help=False, conflict_handler="resolve") top_level_parser.add_argument("-h", "--help", action=FullHelpAction, help="Display this help message") parser_to_method = self._add_sub_parsers(top_level_parser, methods_to_parse, cls.__name__) if init_parser: parser_to_method["__init__"] = "__init__" top_level_parser.call = self._get_parser_call_method(parser_to_method) cls.parser = top_level_parser
Creates the complete argument parser for the decorated class. Args: init_parser: argument parser for the __init__ method or None methods_to_parse: dict of method name pointing to their associated argument parser cls: the class we are decorating Returns: The decorated class with an added attribute 'parser'
def csv(self): output = StringIO.StringIO() fieldnames = ['id', 'name', 'description', 'rules_direction', 'rules_ip_protocol', 'rules_from_port', 'rules_to_port', 'rules_grants_group_id', 'rules_grants_name', 'rules_grants_cidr_ip', 'rules_description'] writer = csv.DictWriter(output, fieldnames=fieldnames) writer.writeheader() for fr in self.rules: writer.writerow(fr.as_dict()) csv_content = output.getvalue() stripped_csv_content = csv_content.strip() return stripped_csv_content
Returns the security rules as a CSV. CSV format: - id - name - description - rules_direction - rules_ip_protocol - rules_from_port - rules_to_port - rules_grants_group_id - rules_grants_name - rules_grants_cidr_ip - rules_description Returns: str
def wiki_versions_list(self, page_id, updater_id): params = { 'earch[updater_id]': updater_id, 'search[wiki_page_id]': page_id } return self._get('wiki_page_versions.json', params)
Return a list of wiki page version. Parameters: page_id (int): updater_id (int):
def download_with_progress(url, chunk_size, **progress_kwargs): resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data
Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data.