code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _write_multiplicons(self, filename): mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y', 'list_y', 'level', 'number_of_anchorpoints', 'profile_length', 'begin_x', 'end_x', 'begin_y', 'end_y', 'is_redundant']) with open(filename, 'w') as fhandle: fhandle.write(mhead + '\n') for mrow in self.multiplicons: fhandle.write('\t'.join([str(e) for e in mrow]) + '\n')
Write multiplicons to file. - filename, (str) location of output file
def _parse_prefix_query(self, query_str): sp = smart_parsing.PrefixSmartParser() query = sp.parse(query_str) return query
Parse a smart search query for prefixes This is a helper function to smart_search_prefix for easier unit testing of the parser.
def get_history(self): if hasattr(self, '_history'): return self._history try: self._history = APICallDayHistory.objects.get( user=self.user, creation_date=now().date()) except APICallDayHistory.DoesNotExist: self._history = APICallDayHistory(user=self.user) self._history.amount_api_calls = 0 return self._history
Returns the history from cache or DB or a newly created one.
def to_representation(self, value): value = apply_subfield_projection(self, value, deep=True) return super().to_representation(value)
Project outgoing native value.
def find_one(self, collection, selector={}): for _id, doc in self.collection_data.data.get(collection, {}).items(): doc.update({'_id': _id}) if selector == {}: return doc for key, value in selector.items(): if key in doc and doc[key] == value: return doc return None
Return one item from a collection Arguments: collection - collection to search Keyword Arguments: selector - the query (default returns first item found)
def get_leading_spaces(data): spaces = '' m = re.match(r'^(\s*)', data) if m: spaces = m.group(1) return spaces
Get the leading space of a string if it is not empty :type data: str
def check_link_and_get_info(self, target_id=0xFF): for _ in range(0, 5): if self._update_info(target_id): if self._in_boot_cb: self._in_boot_cb.call(True, self.targets[ target_id].protocol_version) if self._info_cb: self._info_cb.call(self.targets[target_id]) return True return False
Try to get a connection with the bootloader by requesting info 5 times. This let roughly 10 seconds to boot the copter ...
def get_urls(self): urls = super(LayoutAdmin, self).get_urls() my_urls = patterns( '', url( r'^placeholder_data/(?P<id>\d+)/$', self.admin_site.admin_view(self.placeholder_data_view), name='layout_placeholder_data', ) ) return my_urls + urls
Add ``layout_placeholder_data`` URL.
def check_for_invalid_columns( problems: List, table: str, df: DataFrame ) -> List: r = cs.GTFS_REF valid_columns = r.loc[r["table"] == table, "column"].values for col in df.columns: if col not in valid_columns: problems.append( ["warning", f"Unrecognized column {col}", table, []] ) return problems
Check for invalid columns in the given GTFS DataFrame. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check whether the DataFrame contains extra columns not in the GTFS and append to the problems list one warning for each extra column.
def ssad(patch, cols, splits): sad_results = sad(patch, cols, splits, clean=False) for i, sad_result in enumerate(sad_results): if i == 0: fulldf = sad_result[1] fulldf.columns = ['spp', '0'] else: fulldf[str(i)] = sad_result[1]['y'] result_list = [] for _, row in fulldf.iterrows(): row_values_array = np.array(row[1:], dtype=float) result_list.append((row[0], pd.DataFrame({'y': row_values_array}))) return result_list
Calculates an empirical intra-specific spatial abundance distribution Parameters ---------- {0} Returns ------- {1} Result has one column giving the individuals of species in each subplot. Notes ----- {2} {3} Examples -------- {4} >>> # Get the spatial abundance distribution for all species for each of >>> # the cells in the ANBO plot >>> all_spp_ssads = meco.empirical.ssad(pat, cols='spp_col:spp; count_col:count', splits='row:4; column:4') >>> # Convert to dict for easy searching >>> all_ssads_dict = dict(all_spp_ssads) >>> # Look up the spatial abundance distribution for 'grass' >>> all_ssads_dict['grass'] y 0 42 1 20 2 60 3 60 4 88 5 86 6 20 7 0 8 110 9 12 10 115 11 180 12 160 13 120 14 26 15 11 >>> # Each value in 'y' gives the abundance of grass in one of the 16 cells See http://www.macroeco.org/tutorial_macroeco.html for additional examples and explanation
def handle_update(self, options): username = options["username"] try: user = User.objects.get(username=username) except User.DoesNotExist: raise CommandError("User %s does not exist" % username) if options["email"]: user.email = options["email"] if options["active"] in [True, False]: user.is_active = options["active"] if options["staff"] in [True, False]: user.is_staff = options["staff"] if options["superuser"] in [True, False]: user.is_superuser = options["superuser"] user.save()
Update existing user
def rpc_stop(server_state): rpc_srv = server_state['rpc'] if rpc_srv is not None: log.info("Shutting down RPC") rpc_srv.stop_server() rpc_srv.join() log.info("RPC joined") else: log.info("RPC already joined") server_state['rpc'] = None
Stop the global RPC server thread
def update_nanopubstore_start_dt(url: str, start_dt: str): hostname = urllib.parse.urlsplit(url)[1] start_dates_doc = state_mgmt.get(start_dates_doc_key) if not start_dates_doc: start_dates_doc = { "_key": start_dates_doc_key, "start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}], } state_mgmt.insert(start_dates_doc) else: for idx, start_date in enumerate(start_dates_doc["start_dates"]): if start_date["nanopubstore"] == hostname: start_dates_doc["start_dates"][idx]["start_dt"] = start_dt break else: start_dates_doc["start_dates"].append( {"nanopubstore": hostname, "start_dt": start_dt} ) state_mgmt.replace(start_dates_doc)
Add nanopubstore start_dt to belapi.state_mgmt collection Args: url: url of nanopubstore start_dt: datetime of last query against nanopubstore for new ID's
def get_ordered_types(self): types = self.get_types() types_arr = np.array(types) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return types_arr[idx_sort].tolist()
Returns the ordered list of data types :return: list of data types
def create_simulated_env( output_dir, grayscale, resize_width_factor, resize_height_factor, frame_stack_size, generative_model, generative_model_params, random_starts=True, which_epoch_data="last", **other_hparams ): a_bit_risky_defaults = { "game": "pong", "real_batch_size": 1, "rl_env_max_episode_steps": -1, "max_num_noops": 0 } for key in a_bit_risky_defaults: if key not in other_hparams: other_hparams[key] = a_bit_risky_defaults[key] hparams = hparam.HParams( grayscale=grayscale, resize_width_factor=resize_width_factor, resize_height_factor=resize_height_factor, frame_stack_size=frame_stack_size, generative_model=generative_model, generative_model_params=generative_model_params, **other_hparams ) return load_data_and_make_simulated_env( output_dir, wm_dir=None, hparams=hparams, which_epoch_data=which_epoch_data, random_starts=random_starts)
Create SimulatedEnv with minimal subset of hparams.
def check_signature(params): if 'id' in params: try: id_int = int(params['id'][0]) except: my_log_message(args, syslog.LOG_INFO, "Non-numerical client id (%s) in request." % (params['id'][0])) return False, None key = client_ids.get(id_int) if key: if 'h' in params: sig = params['h'][0] good_sig = make_signature(params, key) if sig == good_sig: return True, key else: my_log_message(args, syslog.LOG_INFO, "Bad signature from client id '%i' (%s, expected %s)." \ % (id_int, sig, good_sig)) else: my_log_message(args, syslog.LOG_INFO, "Client id (%i) but no HMAC in request." % (id_int)) return False, key else: my_log_message(args, syslog.LOG_INFO, "Unknown client id '%i'" % (id_int)) return False, None return True, None
Verify the signature of the parameters in an OTP v2.0 verify request. Returns ValResultBool, Key
def remove_all_connections(provider_id): provider = get_provider_or_404(provider_id) ctx = dict(provider=provider.name, user=current_user) deleted = _datastore.delete_connections(user_id=current_user.get_id(), provider_id=provider_id) if deleted: after_this_request(_commit) msg = ('All connections to %s removed' % provider.name, 'info') connection_removed.send(current_app._get_current_object(), user=current_user._get_current_object(), provider_id=provider_id) else: msg = ('Unable to remove connection to %(provider)s' % ctx, 'error') do_flash(*msg) return redirect(request.referrer)
Remove all connections for the authenticated user to the specified provider
def convert_general(value): if isinstance(value, bool): return "true" if value else "false" elif isinstance(value, list): value = [convert_general(item) for item in value] value = convert_to_imgur_list(value) elif isinstance(value, Integral): return str(value) elif 'pyimgur' in str(type(value)): return str(getattr(value, 'id', value)) return value
Take a python object and convert it to the format Imgur expects.
def lifetimes(self): r return -self._lag / np.log(np.diag(self.transition_matrix))
r""" Lifetimes of states of the hidden transition matrix Returns ------- l : ndarray(nstates) state lifetimes in units of the input trajectory time step, defined by :math:`-tau / ln | p_{ii} |, i = 1,...,nstates`, where :math:`p_{ii}` are the diagonal entries of the hidden transition matrix.
def OnLabelSizeIntCtrl(self, event): self.attrs["labelsize"] = event.GetValue() post_command_event(self, self.DrawChartMsg)
Label size IntCtrl event handler
def set_cols_align(self, array): self._check_row_size(array) self._align = array return self
Set the desired columns alignment - the elements of the array should be either "l", "c" or "r": * "l": column flushed left * "c": column centered * "r": column flushed right
def set_permission(permission, value, app): script = app_url = 'app://' + app run_marionette_script(script % (permission, app_url, app_url, value), True)
Set a permission for the specified app Value should be 'deny' or 'allow'
def update_redis(project: str, environment: str, feature: str, state: str) \ -> None: try: hosts = RedisWrapper.connection_string_parser( os.environ.get('REDIS_HOSTS')) except RuntimeError as ex: LOG.error(ex) sys.exit(1) for host in hosts: LOG.info("connecting to %s:%s", host.host, host.port) try: if valid_state(state): new_state = state.lower() redis = RedisWrapper( host.host, host.port, project, environment ) redis.update_flag_record(new_state, feature) create_file(project, environment, feature, new_state) LOG.info("%s was successfully updated.", feature) else: raise Exception('Invalid state: {0}, -s needs \ to be either on or off.'.format(state)) except KeyError as ex: LOG.error("unable to update %s. Exception: %s", host.host, ex) sys.exit(1)
Update redis state for a feature flag. :param project: LaunchDarkly project key. :param environment: LaunchDarkly environment key. :param feature: LaunchDarkly feature key. :param state: State for a feature flag.
def wait_until_stale(self, timeout=None): timeout = timeout if timeout is not None else self.driver_wrapper.timeout def wait(): WebDriverWait(self.driver, timeout).until(EC.staleness_of(self.element)) return self return self.execute_and_handle_webelement_exceptions(wait, 'wait for staleness')
Waits for the element to go stale in the DOM @type timeout: int @param timeout: override for default timeout @rtype: WebElementWrapper @return: Self
def _determine_leftpad(column, point_place): ndigits_left = [_find_point(x) for x in column] return [max((point_place - 1) - x, 0) for x in ndigits_left]
Find how many spaces to put before a column of numbers so that all the decimal points line up This function takes a column of decimal numbers, and returns a vector containing the number of spaces to place before each number so that (when possible) the decimal points line up. Parameters ---------- column : list Numbers that will be printed as a column point_place : int Number of the character column to put the decimal point
def input_file(self, _container): p = local.path(_container) if set_input_container(p, CFG): return p = find_hash(CFG["container"]["known"].value, container) if set_input_container(p, CFG): return raise ValueError("The path '{0}' does not exist.".format(p))
Find the input path of a uchroot container.
def __get_smtp(self): use_tls = self.config['shutit.core.alerting.emailer.use_tls'] if use_tls: smtp = SMTP(self.config['shutit.core.alerting.emailer.smtp_server'], self.config['shutit.core.alerting.emailer.smtp_port']) smtp.starttls() else: smtp = SMTP_SSL(self.config['shutit.core.alerting.emailer.smtp_server'], self.config['shutit.core.alerting.emailer.smtp_port']) return smtp
Return the appropraite smtplib depending on wherther we're using TLS
def doDynamicValidation(self, request: Request): self.execute_hook(NodeHooks.PRE_DYNAMIC_VALIDATION, request=request) ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest) if ledger_id is not None and seq_no is not None: raise SuspiciousPrePrepare('Trying to order already ordered request') ledger = self.getLedger(self.ledger_id_for_request(request)) for txn in ledger.uncommittedTxns: if get_payload_digest(txn) == request.payload_digest: raise SuspiciousPrePrepare('Trying to order already ordered request') operation = request.operation req_handler = self.get_req_handler(txn_type=operation[TXN_TYPE]) req_handler.validate(request) self.execute_hook(NodeHooks.POST_DYNAMIC_VALIDATION, request=request)
State based validation
def parse_http_response(http_response: HttpResponse) -> 'environ.Response': try: response = environ.Response.deserialize(http_response.json()) except Exception as error: response = environ.Response().fail( code='INVALID_REMOTE_RESPONSE', error=error, message='Invalid HTTP response from remote connection' ).console( whitespace=1 ).response response.http_response = http_response return response
Returns a Cauldron response object parsed from the serialized JSON data specified in the http_response argument. If the response doesn't contain valid Cauldron response data, an error Cauldron response object is returned instead. :param http_response: The response object from an http request that contains a JSON serialized Cauldron response object as its body :return: The Cauldron response object for the given http response
def sg_sugar_func(func): r @wraps(func) def wrapper(tensor, **kwargs): out = func(tensor, tf.sg_opt(kwargs)) out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs)+sg_get_context(), prev=tensor) out.sg_reuse = types.MethodType(sg_reuse, out) return out return wrapper
r""" Decorates a function `func` so that it can be a sugar function. Sugar function can be used in a chainable manner. Args: func: function to decorate Returns: A sugar function.
def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0): dfs = pd.read_html(url, header=header) return longest_table(dfs)
Retrieve the HTML tables from a URL and return the longest DataFrame found >>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns Index(['Common and formal names', 'Membership within the UN System[a]', 'Sovereignty dispute[b]', 'Further information on status and recognition of sovereignty[d]'], dtype='object')
def is_visa_electron(n): n, length = str(n), len(str(n)) form = ['026', '508', '844', '913', '917'] if length == 16: if n[0] == '4': if ''.join(n[1:4]) in form or ''.join(n[1:6]) == '17500': return True return False
Checks if credit card number fits the visa electron format.
def dePeriod(arr): diff= arr-nu.roll(arr,1,axis=1) w= diff < -6. addto= nu.cumsum(w.astype(int),axis=1) return arr+_TWOPI*addto
make an array of periodic angles increase linearly
def encode(self, inputs, states=None, valid_length=None): return self.encoder(self.src_embed(inputs), states, valid_length)
Encode the input sequence. Parameters ---------- inputs : NDArray states : list of NDArrays or None, default None valid_length : NDArray or None, default None Returns ------- outputs : list Outputs of the encoder.
def vi_return_param(self, index): if index == 0: return self.mu0 elif index == 1: return np.log(self.sigma0)
Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter
def setData(self, type: str, data: str) -> None: type = normalize_type(type) if type in self.__data: del self.__data[type] self.__data[type] = data
Set data of type format. :arg str type: Data format of the data, like 'text/plain'.
def finish(self): self.lines.reverse() self._content = '\n'.join(self.lines) self.lines = None
Creates block of content with lines belonging to fragment.
def _ondim(self, dimension, valuestring): try: self.dimensions[dimension] = int(valuestring) except ValueError: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1)) if self.dimensions[dimension] < 1: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1))
Converts valuestring to int and assigns result to self.dim If there is an error (such as an empty valuestring) or if the value is < 1, the value 1 is assigned to self.dim Parameters ---------- dimension: int \tDimension that is to be updated. Must be in [1:4] valuestring: string \t A string that can be converted to an int
def relative(self): if not self.is_absolute(): raise ValueError("URL should be absolute") val = self._val._replace(scheme="", netloc="") return URL(val, encoded=True)
Return a relative part of the URL. scheme, user, password, host and port are removed.
def xrb_address_to_public_key(address): address = bytearray(address, 'ascii') if not address.startswith(b'xrb_'): raise ValueError('address does not start with xrb_: %s' % address) if len(address) != 64: raise ValueError('address must be 64 chars long: %s' % address) address = bytes(address) key_b32xrb = b'1111' + address[4:56] key_bytes = b32xrb_decode(key_b32xrb)[3:] checksum = address[56:] if b32xrb_encode(address_checksum(key_bytes)) != checksum: raise ValueError('invalid address, invalid checksum: %s' % address) return key_bytes
Convert an xrb address to public key in bytes >>> xrb_address_to_public_key('xrb_1e3i81r51e3i81r51e3i81r51e3i'\ '81r51e3i81r51e3i81r51e3imxssakuq') b'00000000000000000000000000000000' :param address: xrb address :type address: bytes :return: public key in bytes :rtype: bytes :raises ValueError:
def check_espeak(cls): try: from aeneas.textfile import TextFile from aeneas.textfile import TextFragment from aeneas.ttswrappers.espeakttswrapper import ESPEAKTTSWrapper text = u"From fairest creatures we desire increase," text_file = TextFile() text_file.add_fragment(TextFragment(language=u"eng", lines=[text], filtered_lines=[text])) handler, output_file_path = gf.tmp_file(suffix=u".wav") ESPEAKTTSWrapper().synthesize_multiple(text_file, output_file_path) gf.delete_file(handler, output_file_path) gf.print_success(u"espeak OK") return False except: pass gf.print_error(u"espeak ERROR") gf.print_info(u" Please make sure you have espeak installed correctly") gf.print_info(u" and that its path is in your PATH environment variable") gf.print_info(u" You might also want to check that the espeak-data directory") gf.print_info(u" is set up correctly, for example, it has the correct permissions") return True
Check whether ``espeak`` can be called. Return ``True`` on failure and ``False`` on success. :rtype: bool
def surrounding_nodes(self, position): n_node_index, n_node_position, n_node_error = self.nearest_node(position) if n_node_error == 0.0: index_mod = [] for i in range(len(n_node_index)): new_point = np.asarray(n_node_position) new_point[i] += 1.e-5*np.abs(new_point[i]) try: self.nearest_node(tuple(new_point)) index_mod.append(-1) except ValueError: index_mod.append(1) else: index_mod = [] for i in range(len(n_node_index)): if n_node_position[i] > position[i]: index_mod.append(-1) else: index_mod.append(1) return tuple(n_node_index), tuple(index_mod)
Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node.
def excluded_length(self): return sum([shot.length for shot in self.shots if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags])
Surveyed length which does not count toward the included total
def validate(self, cmd, messages=None): valid = True args = [ arg for arg in cmd.args if arg is not None ] if self.nargs != len(args): valid = False if messages is not None: msg = 'Expected %d arguments, but received %d.' messages.append(msg % (self.nargs, len(args))) for defn, value in zip(self.args, cmd.args): if value is None: valid = False if messages is not None: messages.append('Argument "%s" is missing.' % defn.name) elif defn.validate(value, messages) is False: valid = False if len(cmd._unrecognized) > 0: valid = False if messages is not None: for name in cmd.unrecognized: messages.append('Argument "%s" is unrecognized.' % name) return valid
Returns True if the given Command is valid, False otherwise. Validation error messages are appended to an optional messages array.
def reread(self): logger.debug("Loading credentials from %s", os.path.abspath(self.creds_filename)) creds = {} try: with self.open_creds() as fp: creds = yaml.safe_load(fp) except IOError: logger.info("No credentials file found at %s", os.path.abspath(self.creds_filename)) except: logger.exception("Error loading credentials file") if creds != self.creds: self.creds = creds return True return False
Read and parse credentials file. If something goes wrong, log exception and continue.
def _generate_feed(self, feed_data): atom_feed = self._render_html('atom.xml', feed_data) feed_path = os.path.join(os.getcwd(), 'public', 'atom.xml') with codecs.open(feed_path, 'wb', 'utf-8') as f: f.write(atom_feed)
render feed file with data
def run(self, gates, n_qubits, *args, **kwargs): return self._run(gates, n_qubits, args, kwargs)
Run the backend.
def ones_comp_sum16(num1: int, num2: int) -> int: carry = 1 << 16 result = num1 + num2 return result if result < carry else result + 1 - carry
Calculates the 1's complement sum for 16-bit numbers. Args: num1: 16-bit number. num2: 16-bit number. Returns: The calculated result.
def to_struct(self, value): if self.str_format: return value.strftime(self.str_format) return value.strftime(self.default_format)
Cast `date` object to string.
def disable_component(self, component): if not isinstance(component, type): component = component.__class__ self.enabled[component] = False self.components[component] = None
Force a component to be disabled. :param component: can be a class or an instance.
def _process_thread(self, client): file_list = self.files if not file_list: return print('Filefinder to collect {0:d} items'.format(len(file_list))) flow_action = flows_pb2.FileFinderAction( action_type=flows_pb2.FileFinderAction.DOWNLOAD) flow_args = flows_pb2.FileFinderArgs( paths=file_list, action=flow_action,) flow_id = self._launch_flow(client, 'FileFinder', flow_args) self._await_flow(client, flow_id) collected_flow_data = self._download_files(client, flow_id) if collected_flow_data: print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data)) fqdn = client.data.os_info.fqdn.lower() self.state.output.append((fqdn, collected_flow_data))
Process a single client. Args: client: GRR client object to act on.
def AddExtraShapes(extra_shapes_txt, graph): print("Adding extra shapes from %s" % extra_shapes_txt) try: tmpdir = tempfile.mkdtemp() shutil.copy(extra_shapes_txt, os.path.join(tmpdir, 'shapes.txt')) loader = transitfeed.ShapeLoader(tmpdir) schedule = loader.Load() for shape in schedule.GetShapeList(): print("Adding extra shape: %s" % shape.shape_id) graph.AddPoly(ShapeToPoly(shape)) finally: if tmpdir: shutil.rmtree(tmpdir)
Add extra shapes into our input set by parsing them out of a GTFS-formatted shapes.txt file. Useful for manually adding lines to a shape file, since it's a pain to edit .shp files.
def extract_zip(self, suffix, path='.'): zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip') os.close(zip_fd) try: _download_url(self.artifact_url(suffix), zip_fn) LOG.info('.. extracting') with zipfile.ZipFile(zip_fn) as zip_fp: for info in zip_fp.infolist(): _extract_file(zip_fp, info, path) finally: os.unlink(zip_fn)
Download and extract a zip artifact @type suffix: @param suffix: @type path: @param path:
def append(self, row_or_table): if not row_or_table: return if isinstance(row_or_table, Table): t = row_or_table columns = list(t.select(self.labels)._columns.values()) n = t.num_rows else: if (len(list(row_or_table)) != self.num_columns): raise Exception('Row should have '+ str(self.num_columns) + " columns") columns, n = [[value] for value in row_or_table], 1 for i, column in enumerate(self._columns): if self.num_rows: self._columns[column] = np.append(self[column], columns[i]) else: self._columns[column] = np.array(columns[i]) self._num_rows += n return self
Append a row or all rows of a table. An appended table must have all columns of self.
async def capability_check(self, optional=None, required=None): self._check_receive_loop() await self.query( "version", {"optional": optional or [], "required": required or []} )
Perform a server capability check.
def alpha(self, theta_x, theta_y, kwargs_lens, k=None): beta_x, beta_y = self.ray_shooting(theta_x, theta_y, kwargs_lens) alpha_x = theta_x - beta_x alpha_y = theta_y - beta_y return alpha_x, alpha_y
reduced deflection angle :param theta_x: angle in x-direction :param theta_y: angle in y-direction :param kwargs_lens: lens model kwargs :return:
def normalize_text(self, text): if not self.editor.free_format: text = ' ' * 6 + text[6:] return text.upper()
Normalize text, when fixed format is ON, replace the first 6 chars by a space.
def reverse_file(infile, outfile): with open(infile, 'rb') as inf: with open(outfile, 'wb') as outf: reverse_fd(inf, outf)
Reverse the content of infile, write to outfile. Both infile and outfile are filenames or filepaths.
def get_session(self, account_id): if account_id not in self.account_sessions: if account_id not in self.config['accounts']: raise AccountNotFound("account:%s is unknown" % account_id) self.account_sessions[account_id] = s = assumed_session( self.config['accounts'][account_id]['role'], "Sphere11") s._session.user_agent_name = "Sphere11" s._session.user_agent_version = "0.07" return self.account_sessions[account_id]
Get an active session in the target account.
def add_marker_to_qtl(qtl, map_list): closest = '' diff = None for marker in map_list: if qtl[1] == marker[1]: tmp_diff = float(qtl[2]) - float(marker[2]) if diff is None or abs(diff) > abs(tmp_diff): diff = tmp_diff closest = marker if closest != '': closest = closest[0] return closest
Add the closest marker to the given QTL. :arg qtl: a row of the QTL list. :arg map_list: the genetic map containing the list of markers.
def readDivPressure(fileName): try: df = pandas.read_csv(fileName, sep=None, engine='python') pandasformat = True except ValueError: pandasformat = False df.columns = ['site', 'divPressureValue'] scaleFactor = max(df["divPressureValue"].abs()) if scaleFactor > 0: df["divPressureValue"] = [x / scaleFactor for x in df["divPressureValue"]] assert len(df['site'].tolist()) == len(set(df['site'].tolist())),"There is at least one non-unique site in {0}".format(fileName) assert max(df["divPressureValue"].abs()) <= 1, "The scaling produced a diversifying pressure value with an absolute value greater than one." sites = df['site'].tolist() divPressure = {} for r in sites: divPressure[r] = df[df['site'] == r]["divPressureValue"].tolist()[0] return divPressure
Reads in diversifying pressures from some file. Scale diversifying pressure values so absolute value of the max value is 1, unless all values are zero. Args: `fileName` (string or readable file-like object) File holding diversifying pressure values. Can be comma-, space-, or tab-separated file. The first column is the site (consecutively numbered, sites starting with one) and the second column is the diversifying pressure values. Returns: `divPressure` (dict keyed by ints) `divPressure[r][v]` is the diversifying pressure value of site `r`.
def _mirror_groups_from_stormpath(self): APPLICATION = get_application() sp_groups = [g.name for g in APPLICATION.groups] missing_from_db, missing_from_sp = self._get_group_difference(sp_groups) if missing_from_db: groups_to_create = [] for g_name in missing_from_db: groups_to_create.append(Group(name=g_name)) Group.objects.bulk_create(groups_to_create)
Helper method for saving to the local db groups that are missing but are on Stormpath
def _load_greedy(self, module_name, dependencies, recursive): found = module_name in self.modules allmodules = list(self._pathfiles.keys()) i = 0 while not found and i < len(allmodules): current = allmodules[i] if not current in self._modulefiles: self.parse(self._pathfiles[current], dependencies and recursive) found = module_name in self.modules i += 1
Keeps loading modules in the filepaths dictionary until all have been loaded or the module is found.
def _message_callback(self, msg): if msg.type == 'polytouch': button = button_from_press(msg.note) if button: self.on_button(button, msg.value != 0) elif msg.note == 127: self.on_fader_touch(msg.value != 0) elif msg.type == 'control_change' and msg.control == 0: self._msb = msg.value elif msg.type == 'control_change' and msg.control == 32: self._fader = (self._msb << 7 | msg.value) >> 4 self.on_fader(self._fader) elif msg.type == 'pitchwheel': self.on_rotary(1 if msg.pitch < 0 else -1) else: print('Unhandled:', msg)
Callback function to handle incoming MIDI messages.
def derivatives_factory(cls, coef, domain, kind, **kwargs): basis_polynomial = cls._basis_polynomial_factory(kind) return basis_polynomial(coef, domain).deriv()
Given some coefficients, return a the derivative of a certain kind of orthogonal polynomial defined over a specific domain.
def merge_periods(data): newdata = sorted(data, key=lambda drange: drange[0]) end = 0 for period in newdata: if period[0] != end and period[0] != (end - 1): end = period[1] dat = newdata new_intervals = [] cur_start = None cur_end = None for (dt_start, dt_end) in dat: if cur_end is None: cur_start = dt_start cur_end = dt_end continue else: if cur_end >= dt_start: cur_end = dt_end else: new_intervals.append((cur_start, cur_end)) cur_start = dt_start cur_end = dt_end new_intervals.append((cur_start, cur_end)) return new_intervals
Merge periods to have better continous periods. Like 350-450, 400-600 => 350-600 :param data: list of periods :type data: list :return: better continous periods :rtype: list
def create_header_from_telpars(telpars): pars = [val.strip() for val in (';').join(telpars).split(';') if val.strip() != ''] with warnings.catch_warnings(): warnings.simplefilter('ignore', fits.verify.VerifyWarning) hdr = fits.Header(map(parse_hstring, pars)) return hdr
Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams
def get_client_calls_for_app(source_code): parsed = parse_code(source_code) parsed.parsed_ast = AppViewTransformer().visit(parsed.parsed_ast) ast.fix_missing_locations(parsed.parsed_ast) t = SymbolTableTypeInfer(parsed) binder = t.bind_types() collector = APICallCollector(binder) api_calls = collector.collect_api_calls(parsed.parsed_ast) return api_calls
Return client calls for a chalice app. This is similar to ``get_client_calls`` except it will automatically traverse into chalice views with the assumption that they will be called.
def mag_yaw(RAW_IMU, inclination, declination): m = mag_rotation(RAW_IMU, inclination, declination) (r, p, y) = m.to_euler() y = degrees(y) if y < 0: y += 360 return y
estimate yaw from mag
def put_encryption_materials(self, cache_key, encryption_materials, plaintext_length, entry_hints=None): return CryptoMaterialsCacheEntry(cache_key=cache_key, value=encryption_materials)
Does not add encryption materials to the cache since there is no cache to which to add them. :param bytes cache_key: Identifier for entries in cache :param encryption_materials: Encryption materials to add to cache :type encryption_materials: aws_encryption_sdk.materials_managers.EncryptionMaterials :param int plaintext_length: Length of plaintext associated with this request to the cache :param entry_hints: Metadata to associate with entry (optional) :type entry_hints: aws_encryption_sdk.caches.CryptoCacheEntryHints :rtype: aws_encryption_sdk.caches.CryptoMaterialsCacheEntry
def find_pore_to_pore_distance(network, pores1=None, pores2=None): r from scipy.spatial.distance import cdist p1 = sp.array(pores1, ndmin=1) p2 = sp.array(pores2, ndmin=1) coords = network['pore.coords'] return cdist(coords[p1], coords[p2])
r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix.
def _getModelData(self, modelData, parentItem=None): if parentItem is None: parentItem = self.rootItem for item in parentItem.getChildren(): key = item.getItemData(0) if item.childCount(): modelData[key] = odict() self._getModelData(modelData[key], item) else: if isinstance(item.getItemData(2), float): modelData[key] = [item.getItemData(1), item.getItemData(2)] else: modelData[key] = item.getItemData(1)
Return the data contained in the model.
def association(self, group_xid): association = {'groupXid': group_xid} self._indicator_data.setdefault('associatedGroups', []).append(association)
Add association using xid value. Args: group_xid (str): The external id of the Group to associate.
def camel_to_snake_case(string): s = _1.sub(r'\1_\2', string) return _2.sub(r'\1_\2', s).lower()
Converts 'string' presented in camel case to snake case. e.g.: CamelCase => snake_case
def write(filename, groupname, items, times, features, properties=None, dformat='dense', chunk_size='auto', sparsity=0.1, mode='a'): sparsity = sparsity if dformat == 'sparse' else None data = Data(items, times, features, properties=properties, sparsity=sparsity, check=True) Writer(filename, chunk_size=chunk_size).write(data, groupname, append=True)
Write h5features data in a HDF5 file. This function is a wrapper to the Writer class. It has three purposes: * Check parameters for errors (see details below), * Create Items, Times and Features objects * Send them to the Writer. :param str filename: HDF5 file to be writted, potentially serving as a container for many small files. If the file does not exist, it is created. If the file is already a valid HDF5 file, try to append the data in it. :param str groupname: Name of the group to write the data in, or to append the data to if the group already exists in the file. :param items: List of files from which the features where extracted. Items must not contain duplicates. :type items: list of str :param times: Time value for the features array. Elements of a 1D array are considered as the center of the time window associated with the features. A 2D array must have 2 columns corresponding to the begin and end timestamps of the features time window. :type times: list of 1D or 2D numpy arrays :param features: Features should have time along the lines and features along the columns (accomodating row-major storage in hdf5 files). :type features: list of 2D numpy arrays :param properties: Optional. Properties associated with each item. Properties describe the features associated with each item in a dictionnary. It can store parameters or fields recorded by the user. :type properties: list of dictionnaries :param str dformat: Optional. Which format to store the features into (sparse or dense). Default is dense. :param float chunk_size: Optional. In Mo, tuning parameter corresponding to the size of a chunk in the h5file. By default the chunk size is guessed automatically. Tis parameter is ignored if the file already exists. :param float sparsity: Optional. Tuning parameter corresponding to the expected proportion (in [0, 1]) of non-zeros elements on average in a single frame. :param char mode: Optional. The mode for overwriting an existing file, 'a' to append data to the file, 'w' to overwrite it :raise IOError: if the filename is not valid or parameters are inconsistent. :raise NotImplementedError: if dformat == 'sparse'
def download_file(self, url, filename): self.print_message("Downloading to file '%s' from URL '%s'" % (filename, url)) try: db_file = urllib2.urlopen(url) with open(filename, 'wb') as output: output.write(db_file.read()) db_file.close() except Exception as e: self.error(str(e)) self.print_message("File downloaded")
Download file from url to filename.
def save_hdf_metadata(filename, metadata, groupname="data", mode="a"): with _h5py.File(filename, mode) as f: for key, val in metadata.items(): f[groupname].attrs[key] = val
Save a dictionary of metadata to a group's attrs.
def _compose(self, *args, **kwargs): name = kwargs.pop('name', None) if name: name = c_str(name) if len(args) != 0 and len(kwargs) != 0: raise TypeError('compose only accept input Symbols \ either as positional or keyword arguments, not both') for arg in args: if not isinstance(arg, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') for val in kwargs.values(): if not isinstance(val, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') num_args = len(args) + len(kwargs) if len(kwargs) != 0: keys = c_str_array(kwargs.keys()) args = c_handle_array(kwargs.values()) else: keys = None args = c_handle_array(kwargs.values()) check_call(_LIB.NNSymbolCompose( self.handle, name, num_args, keys, args))
Compose symbol on inputs. This call mutates the current symbol. Parameters ---------- args: provide positional arguments kwargs: provide keyword arguments Returns ------- the resulting symbol
def create(*context, **kwargs): items = context context = ContextStack() for item in items: if item is None: continue if isinstance(item, ContextStack): context._stack.extend(item._stack) else: context.push(item) if kwargs: context.push(kwargs) return context
Build a ContextStack instance from a sequence of context-like items. This factory-style method is more general than the ContextStack class's constructor in that, unlike the constructor, the argument list can itself contain ContextStack instances. Here is an example illustrating various aspects of this method: >>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'} >>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'}) >>> >>> context = ContextStack.create(obj1, None, obj2, mineral='gold') >>> >>> context.get('animal') 'cat' >>> context.get('vegetable') 'spinach' >>> context.get('mineral') 'gold' Arguments: *context: zero or more dictionaries, ContextStack instances, or objects with which to populate the initial context stack. None arguments will be skipped. Items in the *context list are added to the stack in order so that later items in the argument list take precedence over earlier items. This behavior is the same as the constructor's. **kwargs: additional key-value data to add to the context stack. As these arguments appear after all items in the *context list, in the case of key conflicts these values take precedence over all items in the *context list. This behavior is the same as the constructor's.
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y): for id_a, pgn_a in zip(id_x, pgn_x): for id_b, pgn_b in zip(id_y, pgn_y): if pgn_a == pgn_b: yield (id_a, id_b)
Yield arbitration ids which has the same pgn.
def get_connection(db_type, db_pth, user=None, password=None, name=None): if db_type == 'sqlite': print(db_pth) conn = sqlite3.connect(db_pth) elif db_type == 'mysql': import mysql.connector conn = mysql.connector.connect(user=user, password=password, database=name) elif db_type == 'django_mysql': from django.db import connection as conn else: print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type)) return conn
Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be installed. Args: db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql" Returns: sql connection object
def create_argparser(self): if self.desc: if self.title: fulldesc = '%s\n\n%s' % (self.title, self.desc) else: fulldesc = self.desc else: fulldesc = self.title return self.ArgumentParser(command=self, prog=self.name, description=fulldesc)
Factory for arg parser. Can be overridden as long as it returns an ArgParser compatible instance.
def shellinput(initialtext='>> ', splitpart=' '): shelluserinput = input(str(initialtext)) return shelluserinput if splitpart in ( '', None) else shelluserinput.split(splitpart)
Give the user a shell-like interface to enter commands which are returned as a multi-part list containing the command and each of the arguments. :type initialtext: string :param initialtext: Set the text to be displayed as the prompt. :type splitpart: string :param splitpart: The character to split when generating the list item. :return: A string of the user's input or a list of the user's input split by the split character. :rtype: string or list
def parse_value_instancewithpath(self, tup_tree): self.check_node(tup_tree, 'VALUE.INSTANCEWITHPATH') k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(INSTANCEPATH, INSTANCE))", name(tup_tree), k), conn_id=self.conn_id) inst_path = self.parse_instancepath(k[0]) instance = self.parse_instance(k[1]) instance.path = inst_path return instance
The VALUE.INSTANCEWITHPATH is used to define a value that comprises a single CIMInstance with additional information that defines the absolute path to that object. :: <!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)>
def currency_to_protocol(amount): if type(amount) in [float, int]: amount = "%.8f" % amount return int(amount.replace(".", ''))
Convert a string of 'currency units' to 'protocol units'. For instance converts 19.1 bitcoin to 1910000000 satoshis. Input is a float, output is an integer that is 1e8 times larger. It is hard to do this conversion because multiplying floats causes rounding nubers which will mess up the transactions creation process. examples: 19.1 -> 1910000000 0.001 -> 100000
def ensure_float(arr): if issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) return arr
Ensure that an array object has a float dtype if possible. Parameters ---------- arr : array-like The array whose data type we want to enforce as float. Returns ------- float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned.
def listSites(self, block_name="", site_name=""): try: conn = self.dbi.connection() if block_name: result = self.blksitelist.execute(conn, block_name) else: result = self.sitelist.execute(conn, site_name) return result finally: if conn: conn.close()
Returns sites.
def put_log_events(awsclient, log_group_name, log_stream_name, log_events, sequence_token=None): client_logs = awsclient.get_client('logs') request = { 'logGroupName': log_group_name, 'logStreamName': log_stream_name, 'logEvents': log_events } if sequence_token: request['sequenceToken'] = sequence_token response = client_logs.put_log_events(**request) if 'rejectedLogEventsInfo' in response: log.warn(response['rejectedLogEventsInfo']) if 'nextSequenceToken' in response: return response['nextSequenceToken']
Put log events for the specified log group and stream. :param log_group_name: log group name :param log_stream_name: log stream name :param log_events: [{'timestamp': 123, 'message': 'string'}, ...] :param sequence_token: the sequence token :return: next_token
def process_batches(self): for key, batch in iteritems(self._batches): self._current_tups = batch self._current_key = key self.process_batch(key, batch) if self.auto_ack: for tup in batch: self.ack(tup) self._current_key = None self._batches[key] = [] self._batches = defaultdict(list)
Iterate through all batches, call process_batch on them, and ack. Separated out for the rare instances when we want to subclass BatchingBolt and customize what mechanism causes batches to be processed.
def get_label_names(ctx): labels = [ label for label in ctx.__dict__ if not label.startswith("_") and label not in [ "children", "exception", "invokingState", "parentCtx", "parser", "start", "stop", ] ] return labels
Get labels defined in an ANTLR context for a parser rule
def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None): if not isinstance(rdf_value, cls.rdf_type): raise ValueError("This collection only accepts values of type %s." % cls.rdf_type.__name__) if mutation_pool is None: raise ValueError("Mutation pool can't be none.") timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch() if not isinstance(queue_urn, rdfvalue.RDFURN): queue_urn = rdfvalue.RDFURN(queue_urn) mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)
Adds an rdf value the queue. Adds an rdf value to a queue. Does not require that the queue be locked, or even open. NOTE: The caller is responsible for ensuring that the queue exists and is of the correct type. Args: queue_urn: The urn of the queue to add to. rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
def ApprovalUrnBuilder(subject, user, approval_id): return aff4.ROOT_URN.Add("ACL").Add(subject).Add(user).Add(approval_id)
Encode an approval URN.
async def exist(self, key, param=None): identity = self._gen_identity(key, param) return await self.client.exists(identity)
see if specific identity exists
def get(self, sid): return QueueContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
Constructs a QueueContext :param sid: The unique string that identifies this resource :returns: twilio.rest.api.v2010.account.queue.QueueContext :rtype: twilio.rest.api.v2010.account.queue.QueueContext
def createPenWidthCti(nodeName, defaultData=1.0, zeroValueText=None): return FloatCti(nodeName, defaultData=defaultData, specialValueText=zeroValueText, minValue=0.1 if zeroValueText is None else 0.0, maxValue=100, stepSize=0.1, decimals=1)
Creates a FloatCti with defaults for configuring a QPen width. If specialValueZero is set, this string will be displayed when 0.0 is selected. If specialValueZero is None, the minValue will be 0.1
def make_relative(base, obj): uri = obj.get("location", obj.get("path")) if ":" in uri.split("/")[0] and not uri.startswith("file://"): pass else: if uri.startswith("file://"): uri = uri_file_path(uri) obj["location"] = os.path.relpath(uri, base)
Relativize the location URI of a File or Directory object.
def prune_feed_map(meta_graph, feed_map): node_names = [x.name + ":0" for x in meta_graph.graph_def.node] keys_to_delete = [] for k, _ in feed_map.items(): if k not in node_names: keys_to_delete.append(k) for k in keys_to_delete: del feed_map[k]
Function to prune the feedmap of nodes which no longer exist.
def open_xmldoc(fobj, **kwargs): from ligo.lw.ligolw import (Document, LIGOLWContentHandler) from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_filename, load_fileobj) use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler)) try: if isinstance(fobj, string_types): return load_filename(fobj, **kwargs) if isinstance(fobj, FILE_LIKE): return load_fileobj(fobj, **kwargs)[0] except (OSError, IOError): return Document() except LigolwElementError as exc: if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return open_xmldoc(fobj, ilwdchar_compat=True, **kwargs) except Exception: pass raise
Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~ligo.lw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document`
def set_interface(interface, name=''): global interfaces if not interface: raise ValueError('interface is empty') if name in interfaces: interfaces[name].close() interfaces[name] = interface
don't want to bother with a dsn? Use this method to make an interface available
def use_mutation(module_path, operator, occurrence): original_code, mutated_code = apply_mutation(module_path, operator, occurrence) try: yield original_code, mutated_code finally: with module_path.open(mode='wt', encoding='utf-8') as handle: handle.write(original_code) handle.flush()
A context manager that applies a mutation for the duration of a with-block. This applies a mutation to a file on disk, and after the with-block it put the unmutated code back in place. Args: module_path: The path to the module to mutate. operator: The `Operator` instance to use. occurrence: The occurrence of the operator to apply. Yields: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was no mutation performed, the `mutated-code` is `None`.