code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _write_multiplicons(self, filename):
mhead = '\t'.join(['id', 'genome_x', 'list_x', 'parent', 'genome_y',
'list_y', 'level', 'number_of_anchorpoints',
'profile_length', 'begin_x', 'end_x', 'begin_y',
'end_y', 'is_redundant'])
... | Write multiplicons to file.
- filename, (str) location of output file |
def _parse_prefix_query(self, query_str):
sp = smart_parsing.PrefixSmartParser()
query = sp.parse(query_str)
return query | Parse a smart search query for prefixes
This is a helper function to smart_search_prefix for easier unit
testing of the parser. |
def get_history(self):
if hasattr(self, '_history'):
return self._history
try:
self._history = APICallDayHistory.objects.get(
user=self.user, creation_date=now().date())
except APICallDayHistory.DoesNotExist:
self._history = APICallDayHistory(u... | Returns the history from cache or DB or a newly created one. |
def to_representation(self, value):
value = apply_subfield_projection(self, value, deep=True)
return super().to_representation(value) | Project outgoing native value. |
def find_one(self, collection, selector={}):
for _id, doc in self.collection_data.data.get(collection, {}).items():
doc.update({'_id': _id})
if selector == {}:
return doc
for key, value in selector.items():
if key in doc and doc[key] == value:
... | Return one item from a collection
Arguments:
collection - collection to search
Keyword Arguments:
selector - the query (default returns first item found) |
def get_leading_spaces(data):
spaces = ''
m = re.match(r'^(\s*)', data)
if m:
spaces = m.group(1)
return spaces | Get the leading space of a string if it is not empty
:type data: str |
def check_link_and_get_info(self, target_id=0xFF):
for _ in range(0, 5):
if self._update_info(target_id):
if self._in_boot_cb:
self._in_boot_cb.call(True, self.targets[
target_id].protocol_version)
if self._info_cb:
... | Try to get a connection with the bootloader by requesting info
5 times. This let roughly 10 seconds to boot the copter ... |
def get_urls(self):
urls = super(LayoutAdmin, self).get_urls()
my_urls = patterns(
'',
url(
r'^placeholder_data/(?P<id>\d+)/$',
self.admin_site.admin_view(self.placeholder_data_view),
name='layout_placeholder_data',
)
... | Add ``layout_placeholder_data`` URL. |
def check_for_invalid_columns(
problems: List, table: str, df: DataFrame
) -> List:
r = cs.GTFS_REF
valid_columns = r.loc[r["table"] == table, "column"].values
for col in df.columns:
if col not in valid_columns:
problems.append(
["warning", f"Unrecognized column {col}... | Check for invalid columns in the given GTFS DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it... |
def ssad(patch, cols, splits):
sad_results = sad(patch, cols, splits, clean=False)
for i, sad_result in enumerate(sad_results):
if i == 0:
fulldf = sad_result[1]
fulldf.columns = ['spp', '0']
else:
fulldf[str(i)] = sad_result[1]['y']
result_list = []
f... | Calculates an empirical intra-specific spatial abundance distribution
Parameters
----------
{0}
Returns
-------
{1} Result has one column giving the individuals of species in each
subplot.
Notes
-----
{2}
{3}
Examples
--------
{4}
>>> # Get the spatial ... |
def handle_update(self, options):
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
... | Update existing user |
def rpc_stop(server_state):
rpc_srv = server_state['rpc']
if rpc_srv is not None:
log.info("Shutting down RPC")
rpc_srv.stop_server()
rpc_srv.join()
log.info("RPC joined")
else:
log.info("RPC already joined")
server_state['rpc'] = None | Stop the global RPC server thread |
def update_nanopubstore_start_dt(url: str, start_dt: str):
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start... | Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's |
def get_ordered_types(self):
types = self.get_types()
types_arr = np.array(types)
poss = [self.chrPos, self.startPos, self.stopPos]
if self.strandPos is not None:
poss.append(self.strandPos)
if self.otherPos:
for o in self.otherPos:
poss.ap... | Returns the ordered list of data types
:return: list of data types |
def create_simulated_env(
output_dir, grayscale, resize_width_factor, resize_height_factor,
frame_stack_size, generative_model, generative_model_params,
random_starts=True, which_epoch_data="last", **other_hparams
):
a_bit_risky_defaults = {
"game": "pong",
"real_batch_size": 1,
"rl_env_... | Create SimulatedEnv with minimal subset of hparams. |
def check_signature(params):
if 'id' in params:
try:
id_int = int(params['id'][0])
except:
my_log_message(args, syslog.LOG_INFO, "Non-numerical client id (%s) in request." % (params['id'][0]))
return False, None
key = client_ids.get(id_int)
if key:... | Verify the signature of the parameters in an OTP v2.0 verify request.
Returns ValResultBool, Key |
def remove_all_connections(provider_id):
provider = get_provider_or_404(provider_id)
ctx = dict(provider=provider.name, user=current_user)
deleted = _datastore.delete_connections(user_id=current_user.get_id(),
provider_id=provider_id)
if deleted:
after... | Remove all connections for the authenticated user to the
specified provider |
def convert_general(value):
if isinstance(value, bool):
return "true" if value else "false"
elif isinstance(value, list):
value = [convert_general(item) for item in value]
value = convert_to_imgur_list(value)
elif isinstance(value, Integral):
return str(value)
elif 'pyimg... | Take a python object and convert it to the format Imgur expects. |
def lifetimes(self):
r
return -self._lag / np.log(np.diag(self.transition_matrix)) | r""" Lifetimes of states of the hidden transition matrix
Returns
-------
l : ndarray(nstates)
state lifetimes in units of the input trajectory time step,
defined by :math:`-tau / ln | p_{ii} |, i = 1,...,nstates`, where
:math:`p_{ii}` are the diagonal entries... |
def OnLabelSizeIntCtrl(self, event):
self.attrs["labelsize"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | Label size IntCtrl event handler |
def set_cols_align(self, array):
self._check_row_size(array)
self._align = array
return self | Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right |
def set_permission(permission, value, app):
script =
app_url = 'app://' + app
run_marionette_script(script % (permission, app_url, app_url, value), True) | Set a permission for the specified app
Value should be 'deny' or 'allow' |
def update_redis(project: str, environment: str, feature: str, state: str) \
-> None:
try:
hosts = RedisWrapper.connection_string_parser(
os.environ.get('REDIS_HOSTS'))
except RuntimeError as ex:
LOG.error(ex)
sys.exit(1)
for host in hosts:
LOG.info("conne... | Update redis state for a feature flag.
:param project: LaunchDarkly project key.
:param environment: LaunchDarkly environment key.
:param feature: LaunchDarkly feature key.
:param state: State for a feature flag. |
def wait_until_stale(self, timeout=None):
timeout = timeout if timeout is not None else self.driver_wrapper.timeout
def wait():
WebDriverWait(self.driver, timeout).until(EC.staleness_of(self.element))
return self
return self.execute_and_handle_webelement_exceptions(wait, ... | Waits for the element to go stale in the DOM
@type timeout: int
@param timeout: override for default timeout
@rtype: WebElementWrapper
@return: Self |
def _determine_leftpad(column, point_place):
ndigits_left = [_find_point(x) for x in column]
return [max((point_place - 1) - x, 0) for x in ndigits_left] | Find how many spaces to put before a column of numbers
so that all the decimal points line up
This function takes a column of decimal numbers, and returns a
vector containing the number of spaces to place before each number
so that (when possible) the decimal points line up.
Parameters
---... |
def input_file(self, _container):
p = local.path(_container)
if set_input_container(p, CFG):
return
p = find_hash(CFG["container"]["known"].value, container)
if set_input_container(p, CFG):
return
raise ValueError("The path '{0}' does not exist.".format(p)... | Find the input path of a uchroot container. |
def __get_smtp(self):
use_tls = self.config['shutit.core.alerting.emailer.use_tls']
if use_tls:
smtp = SMTP(self.config['shutit.core.alerting.emailer.smtp_server'], self.config['shutit.core.alerting.emailer.smtp_port'])
smtp.starttls()
else:
smtp = SMTP_SSL(self.config['shutit.core.alerting.emailer.smtp_... | Return the appropraite smtplib depending on wherther we're using TLS |
def doDynamicValidation(self, request: Request):
self.execute_hook(NodeHooks.PRE_DYNAMIC_VALIDATION, request=request)
ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to ... | State based validation |
def parse_http_response(http_response: HttpResponse) -> 'environ.Response':
try:
response = environ.Response.deserialize(http_response.json())
except Exception as error:
response = environ.Response().fail(
code='INVALID_REMOTE_RESPONSE',
error=error,
message='... | Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request th... |
def sg_sugar_func(func):
r
@wraps(func)
def wrapper(tensor, **kwargs):
out = func(tensor, tf.sg_opt(kwargs))
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs)+sg_get_context(), prev=tensor)
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper | r""" Decorates a function `func` so that it can be a sugar function.
Sugar function can be used in a chainable manner.
Args:
func: function to decorate
Returns:
A sugar function. |
def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0):
dfs = pd.read_html(url, header=header)
return longest_table(dfs) | Retrieve the HTML tables from a URL and return the longest DataFrame found
>>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns
Index(['Common and formal names', 'Membership within the UN System[a]',
'Sovereignty dispute[b]',
'Further information on status and r... |
def is_visa_electron(n):
n, length = str(n), len(str(n))
form = ['026', '508', '844', '913', '917']
if length == 16:
if n[0] == '4':
if ''.join(n[1:4]) in form or ''.join(n[1:6]) == '17500':
return True
return False | Checks if credit card number fits the visa electron format. |
def dePeriod(arr):
diff= arr-nu.roll(arr,1,axis=1)
w= diff < -6.
addto= nu.cumsum(w.astype(int),axis=1)
return arr+_TWOPI*addto | make an array of periodic angles increase linearly |
def encode(self, inputs, states=None, valid_length=None):
return self.encoder(self.src_embed(inputs), states, valid_length) | Encode the input sequence.
Parameters
----------
inputs : NDArray
states : list of NDArrays or None, default None
valid_length : NDArray or None, default None
Returns
-------
outputs : list
Outputs of the encoder. |
def vi_return_param(self, index):
if index == 0:
return self.mu0
elif index == 1:
return np.log(self.sigma0) | Wrapper function for selecting appropriate latent variable for variational inference
Parameters
----------
index : int
0 or 1 depending on which latent variable
Returns
----------
The appropriate indexed parameter |
def setData(self, type: str, data: str) -> None:
type = normalize_type(type)
if type in self.__data:
del self.__data[type]
self.__data[type] = data | Set data of type format.
:arg str type: Data format of the data, like 'text/plain'. |
def finish(self):
self.lines.reverse()
self._content = '\n'.join(self.lines)
self.lines = None | Creates block of content with lines
belonging to fragment. |
def _ondim(self, dimension, valuestring):
try:
self.dimensions[dimension] = int(valuestring)
except ValueError:
self.dimensions[dimension] = 1
self.textctrls[dimension].SetValue(str(1))
if self.dimensions[dimension] < 1:
self.dimensions[dimension] ... | Converts valuestring to int and assigns result to self.dim
If there is an error (such as an empty valuestring) or if
the value is < 1, the value 1 is assigned to self.dim
Parameters
----------
dimension: int
\tDimension that is to be updated. Must be in [1:4]
v... |
def relative(self):
if not self.is_absolute():
raise ValueError("URL should be absolute")
val = self._val._replace(scheme="", netloc="")
return URL(val, encoded=True) | Return a relative part of the URL.
scheme, user, password, host and port are removed. |
def xrb_address_to_public_key(address):
address = bytearray(address, 'ascii')
if not address.startswith(b'xrb_'):
raise ValueError('address does not start with xrb_: %s' % address)
if len(address) != 64:
raise ValueError('address must be 64 chars long: %s' % address)
address = bytes(addr... | Convert an xrb address to public key in bytes
>>> xrb_address_to_public_key('xrb_1e3i81r51e3i81r51e3i81r51e3i'\
'81r51e3i81r51e3i81r51e3imxssakuq')
b'00000000000000000000000000000000'
:param address: xrb address
:type address: bytes
:return: public key in bytes
... |
def check_espeak(cls):
try:
from aeneas.textfile import TextFile
from aeneas.textfile import TextFragment
from aeneas.ttswrappers.espeakttswrapper import ESPEAKTTSWrapper
text = u"From fairest creatures we desire increase,"
text_file = TextFile()
... | Check whether ``espeak`` can be called.
Return ``True`` on failure and ``False`` on success.
:rtype: bool |
def surrounding_nodes(self, position):
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5... | Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node. |
def excluded_length(self):
return sum([shot.length for shot in self.shots if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags]) | Surveyed length which does not count toward the included total |
def validate(self, cmd, messages=None):
valid = True
args = [ arg for arg in cmd.args if arg is not None ]
if self.nargs != len(args):
valid = False
if messages is not None:
msg = 'Expected %d arguments, but received %d.'
messages.append(... | Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array. |
def reread(self):
logger.debug("Loading credentials from %s",
os.path.abspath(self.creds_filename))
creds = {}
try:
with self.open_creds() as fp:
creds = yaml.safe_load(fp)
except IOError:
logger.info("No credentials file found... | Read and parse credentials file.
If something goes wrong, log exception and continue. |
def _generate_feed(self, feed_data):
atom_feed = self._render_html('atom.xml', feed_data)
feed_path = os.path.join(os.getcwd(), 'public', 'atom.xml')
with codecs.open(feed_path, 'wb', 'utf-8') as f:
f.write(atom_feed) | render feed file with data |
def run(self, gates, n_qubits, *args, **kwargs):
return self._run(gates, n_qubits, args, kwargs) | Run the backend. |
def ones_comp_sum16(num1: int, num2: int) -> int:
carry = 1 << 16
result = num1 + num2
return result if result < carry else result + 1 - carry | Calculates the 1's complement sum for 16-bit numbers.
Args:
num1: 16-bit number.
num2: 16-bit number.
Returns:
The calculated result. |
def to_struct(self, value):
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format) | Cast `date` object to string. |
def disable_component(self, component):
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = False
self.components[component] = None | Force a component to be disabled.
:param component: can be a class or an instance. |
def _process_thread(self, client):
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
flow_args = flows_pb2.FileFinderArgs(
... | Process a single client.
Args:
client: GRR client object to act on. |
def AddExtraShapes(extra_shapes_txt, graph):
print("Adding extra shapes from %s" % extra_shapes_txt)
try:
tmpdir = tempfile.mkdtemp()
shutil.copy(extra_shapes_txt, os.path.join(tmpdir, 'shapes.txt'))
loader = transitfeed.ShapeLoader(tmpdir)
schedule = loader.Load()
for shape in schedule.GetShape... | Add extra shapes into our input set by parsing them out of a GTFS-formatted
shapes.txt file. Useful for manually adding lines to a shape file, since it's
a pain to edit .shp files. |
def extract_zip(self, suffix, path='.'):
zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip')
os.close(zip_fd)
try:
_download_url(self.artifact_url(suffix), zip_fn)
LOG.info('.. extracting')
with zipfile.ZipFile(zip_fn) as zip_fp:
... | Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path: |
def append(self, row_or_table):
if not row_or_table:
return
if isinstance(row_or_table, Table):
t = row_or_table
columns = list(t.select(self.labels)._columns.values())
n = t.num_rows
else:
if (len(list(row_or_table)) != self.num_column... | Append a row or all rows of a table. An appended table must have all
columns of self. |
async def capability_check(self, optional=None, required=None):
self._check_receive_loop()
await self.query(
"version", {"optional": optional or [], "required": required or []}
) | Perform a server capability check. |
def alpha(self, theta_x, theta_y, kwargs_lens, k=None):
beta_x, beta_y = self.ray_shooting(theta_x, theta_y, kwargs_lens)
alpha_x = theta_x - beta_x
alpha_y = theta_y - beta_y
return alpha_x, alpha_y | reduced deflection angle
:param theta_x: angle in x-direction
:param theta_y: angle in y-direction
:param kwargs_lens: lens model kwargs
:return: |
def normalize_text(self, text):
if not self.editor.free_format:
text = ' ' * 6 + text[6:]
return text.upper() | Normalize text, when fixed format is ON, replace the first 6 chars by a space. |
def reverse_file(infile, outfile):
with open(infile, 'rb') as inf:
with open(outfile, 'wb') as outf:
reverse_fd(inf, outf) | Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths. |
def get_session(self, account_id):
if account_id not in self.account_sessions:
if account_id not in self.config['accounts']:
raise AccountNotFound("account:%s is unknown" % account_id)
self.account_sessions[account_id] = s = assumed_session(
self.config['a... | Get an active session in the target account. |
def add_marker_to_qtl(qtl, map_list):
closest = ''
diff = None
for marker in map_list:
if qtl[1] == marker[1]:
tmp_diff = float(qtl[2]) - float(marker[2])
if diff is None or abs(diff) > abs(tmp_diff):
diff = tmp_diff
closest = marker
if clo... | Add the closest marker to the given QTL.
:arg qtl: a row of the QTL list.
:arg map_list: the genetic map containing the list of markers. |
def readDivPressure(fileName):
try:
df = pandas.read_csv(fileName, sep=None, engine='python')
pandasformat = True
except ValueError:
pandasformat = False
df.columns = ['site', 'divPressureValue']
scaleFactor = max(df["divPressureValue"].abs())
if scaleFactor > 0:
df["... | Reads in diversifying pressures from some file.
Scale diversifying pressure values so absolute value of the max value is 1,
unless all values are zero.
Args:
`fileName` (string or readable file-like object)
File holding diversifying pressure values. Can be
comma-, space-, o... |
def _mirror_groups_from_stormpath(self):
APPLICATION = get_application()
sp_groups = [g.name for g in APPLICATION.groups]
missing_from_db, missing_from_sp = self._get_group_difference(sp_groups)
if missing_from_db:
groups_to_create = []
for g_name in missing_from_... | Helper method for saving to the local db groups
that are missing but are on Stormpath |
def _load_greedy(self, module_name, dependencies, recursive):
found = module_name in self.modules
allmodules = list(self._pathfiles.keys())
i = 0
while not found and i < len(allmodules):
current = allmodules[i]
if not current in self._modulefiles:
... | Keeps loading modules in the filepaths dictionary until all have
been loaded or the module is found. |
def _message_callback(self, msg):
if msg.type == 'polytouch':
button = button_from_press(msg.note)
if button:
self.on_button(button, msg.value != 0)
elif msg.note == 127:
self.on_fader_touch(msg.value != 0)
elif msg.type == 'control_cha... | Callback function to handle incoming MIDI messages. |
def derivatives_factory(cls, coef, domain, kind, **kwargs):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv() | Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain. |
def merge_periods(data):
newdata = sorted(data, key=lambda drange: drange[0])
end = 0
for period in newdata:
if period[0] != end and period[0] != (end - 1):
end = period[1]
dat = newdata
new_intervals = []
cur_start = None
cur_end = None
for (dt_start, dt_end) in dat:... | Merge periods to have better continous periods.
Like 350-450, 400-600 => 350-600
:param data: list of periods
:type data: list
:return: better continous periods
:rtype: list |
def create_header_from_telpars(telpars):
pars = [val.strip() for val in (';').join(telpars).split(';')
if val.strip() != '']
with warnings.catch_warnings():
warnings.simplefilter('ignore', fits.verify.VerifyWarning)
hdr = fits.Header(map(parse_hstring, pars))
return hdr | Create a list of fits header items from GTC telescope pars.
The GTC telescope server gives a list of string describing
FITS header items such as RA, DEC, etc.
Arguments
---------
telpars : list
list returned by server call to getTelescopeParams |
def get_client_calls_for_app(source_code):
parsed = parse_code(source_code)
parsed.parsed_ast = AppViewTransformer().visit(parsed.parsed_ast)
ast.fix_missing_locations(parsed.parsed_ast)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls =... | Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called. |
def mag_yaw(RAW_IMU, inclination, declination):
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
y = degrees(y)
if y < 0:
y += 360
return y | estimate yaw from mag |
def put_encryption_materials(self, cache_key, encryption_materials, plaintext_length, entry_hints=None):
return CryptoMaterialsCacheEntry(cache_key=cache_key, value=encryption_materials) | Does not add encryption materials to the cache since there is no cache to which to add them.
:param bytes cache_key: Identifier for entries in cache
:param encryption_materials: Encryption materials to add to cache
:type encryption_materials: aws_encryption_sdk.materials_managers.EncryptionMate... |
def find_pore_to_pore_distance(network, pores1=None, pores2=None):
r
from scipy.spatial.distance import cdist
p1 = sp.array(pores1, ndmin=1)
p2 = sp.array(pores2, ndmin=1)
coords = network['pore.coords']
return cdist(coords[p1], coords[p2]) | r'''
Find the distance between all pores on set one to each pore in set 2
Parameters
----------
network : OpenPNM Network Object
The network object containing the pore coordinates
pores1 : array_like
The pore indices of the first set
pores2 : array_Like
The pore indice... |
def _getModelData(self, modelData, parentItem=None):
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(mod... | Return the data contained in the model. |
def association(self, group_xid):
association = {'groupXid': group_xid}
self._indicator_data.setdefault('associatedGroups', []).append(association) | Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate. |
def camel_to_snake_case(string):
s = _1.sub(r'\1_\2', string)
return _2.sub(r'\1_\2', s).lower() | Converts 'string' presented in camel case to snake case.
e.g.: CamelCase => snake_case |
def write(filename, groupname, items, times, features, properties=None,
dformat='dense', chunk_size='auto', sparsity=0.1, mode='a'):
sparsity = sparsity if dformat == 'sparse' else None
data = Data(items, times, features, properties=properties,
sparsity=sparsity, check=True)
Writer... | Write h5features data in a HDF5 file.
This function is a wrapper to the Writer class. It has three purposes:
* Check parameters for errors (see details below),
* Create Items, Times and Features objects
* Send them to the Writer.
:param str filename: HDF5 file to be writted, potentially serving
... |
def download_file(self, url, filename):
self.print_message("Downloading to file '%s' from URL '%s'" % (filename, url))
try:
db_file = urllib2.urlopen(url)
with open(filename, 'wb') as output:
output.write(db_file.read())
db_file.close()
except ... | Download file from url to filename. |
def save_hdf_metadata(filename, metadata, groupname="data", mode="a"):
with _h5py.File(filename, mode) as f:
for key, val in metadata.items():
f[groupname].attrs[key] = val | Save a dictionary of metadata to a group's attrs. |
def _compose(self, *args, **kwargs):
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for ... | Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol |
def create(*context, **kwargs):
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)... | Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various... |
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
for id_a, pgn_a in zip(id_x, pgn_x):
for id_b, pgn_b in zip(id_y, pgn_y):
if pgn_a == pgn_b:
yield (id_a, id_b) | Yield arbitration ids which has the same pgn. |
def get_connection(db_type, db_pth, user=None, password=None, name=None):
if db_type == 'sqlite':
print(db_pth)
conn = sqlite3.connect(db_pth)
elif db_type == 'mysql':
import mysql.connector
conn = mysql.connector.connect(user=user, password=password, database=name)
elif db_t... | Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database
Example:
>>> from msp2db.db import get_connection
>>> conn = get_connection('sqlite', 'library.db')
If using "mysql" mysql.connector needs to be installed.
If using "django_mysql" Django needs to be... |
def create_argparser(self):
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
... | Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. |
def shellinput(initialtext='>> ', splitpart=' '):
shelluserinput = input(str(initialtext))
return shelluserinput if splitpart in (
'', None) else shelluserinput.split(splitpart) | Give the user a shell-like interface to enter commands which
are returned as a multi-part list containing the command
and each of the arguments.
:type initialtext: string
:param initialtext: Set the text to be displayed as the prompt.
:type splitpart: string
:param splitpart: The character to ... |
def parse_value_instancewithpath(self, tup_tree):
self.check_node(tup_tree, 'VALUE.INSTANCEWITHPATH')
k = kids(tup_tree)
if len(k) != 2:
raise CIMXMLParseError(
_format("Element {0!A} has invalid number of child elements "
"{1!A} (expecting two... | The VALUE.INSTANCEWITHPATH is used to define a value that comprises
a single CIMInstance with additional information that defines the
absolute path to that object.
::
<!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)> |
def currency_to_protocol(amount):
if type(amount) in [float, int]:
amount = "%.8f" % amount
return int(amount.replace(".", '')) | Convert a string of 'currency units' to 'protocol units'. For instance
converts 19.1 bitcoin to 1910000000 satoshis.
Input is a float, output is an integer that is 1e8 times larger.
It is hard to do this conversion because multiplying
floats causes rounding nubers which will mess up the transactions c... |
def ensure_float(arr):
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr | Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is ... |
def listSites(self, block_name="", site_name=""):
try:
conn = self.dbi.connection()
if block_name:
result = self.blksitelist.execute(conn, block_name)
else:
result = self.sitelist.execute(conn, site_name)
return result
final... | Returns sites. |
def put_log_events(awsclient, log_group_name, log_stream_name, log_events,
sequence_token=None):
client_logs = awsclient.get_client('logs')
request = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'logEvents': log_events
}
if sequence_token... | Put log events for the specified log group and stream.
:param log_group_name: log group name
:param log_stream_name: log stream name
:param log_events: [{'timestamp': 123, 'message': 'string'}, ...]
:param sequence_token: the sequence token
:return: next_token |
def process_batches(self):
for key, batch in iteritems(self._batches):
self._current_tups = batch
self._current_key = key
self.process_batch(key, batch)
if self.auto_ack:
for tup in batch:
self.ack(tup)
self._current... | Iterate through all batches, call process_batch on them, and ack.
Separated out for the rare instances when we want to subclass
BatchingBolt and customize what mechanism causes batches to be
processed. |
def get_label_names(ctx):
labels = [
label
for label in ctx.__dict__
if not label.startswith("_")
and label
not in [
"children",
"exception",
"invokingState",
"parentCtx",
"parser",
"start",
"... | Get labels defined in an ANTLR context for a parser rule |
def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None):
if not isinstance(rdf_value, cls.rdf_type):
raise ValueError("This collection only accepts values of type %s." %
cls.rdf_type.__name__)
if mutation_pool is None:
raise ValueError("Mutation pool can't be none.")
... | Adds an rdf value the queue.
Adds an rdf value to a queue. Does not require that the queue be locked, or
even open. NOTE: The caller is responsible for ensuring that the queue
exists and is of the correct type.
Args:
queue_urn: The urn of the queue to add to.
rdf_value: The rdf value to a... |
def ApprovalUrnBuilder(subject, user, approval_id):
return aff4.ROOT_URN.Add("ACL").Add(subject).Add(user).Add(approval_id) | Encode an approval URN. |
async def exist(self, key, param=None):
identity = self._gen_identity(key, param)
return await self.client.exists(identity) | see if specific identity exists |
def get(self, sid):
return QueueContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a QueueContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.queue.QueueContext
:rtype: twilio.rest.api.v2010.account.queue.QueueContext |
def createPenWidthCti(nodeName, defaultData=1.0, zeroValueText=None):
return FloatCti(nodeName, defaultData=defaultData, specialValueText=zeroValueText,
minValue=0.1 if zeroValueText is None else 0.0,
maxValue=100, stepSize=0.1, decimals=1) | Creates a FloatCti with defaults for configuring a QPen width.
If specialValueZero is set, this string will be displayed when 0.0 is selected.
If specialValueZero is None, the minValue will be 0.1 |
def make_relative(base, obj):
uri = obj.get("location", obj.get("path"))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base) | Relativize the location URI of a File or Directory object. |
def prune_feed_map(meta_graph, feed_map):
node_names = [x.name + ":0" for x in meta_graph.graph_def.node]
keys_to_delete = []
for k, _ in feed_map.items():
if k not in node_names:
keys_to_delete.append(k)
for k in keys_to_delete:
del feed_map[k] | Function to prune the feedmap of nodes which no longer exist. |
def open_xmldoc(fobj, **kwargs):
from ligo.lw.ligolw import (Document, LIGOLWContentHandler)
from ligo.lw.lsctables import use_in
from ligo.lw.utils import (load_filename, load_fileobj)
use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler))
try:
if isinstance(fobj, string_types):
... | Try and open an existing LIGO_LW-format file, or create a new Document
Parameters
----------
fobj : `str`, `file`
file path or open file object to read
**kwargs
other keyword arguments to pass to
:func:`~ligo.lw.utils.load_filename`, or
:func:`~ligo.lw.utils.load_fileob... |
def set_interface(interface, name=''):
global interfaces
if not interface: raise ValueError('interface is empty')
if name in interfaces:
interfaces[name].close()
interfaces[name] = interface | don't want to bother with a dsn? Use this method to make an interface available |
def use_mutation(module_path, operator, occurrence):
original_code, mutated_code = apply_mutation(module_path, operator,
occurrence)
try:
yield original_code, mutated_code
finally:
with module_path.open(mode='wt', encoding='utf-8') as handle:
... | A context manager that applies a mutation for the duration of a with-block.
This applies a mutation to a file on disk, and after the with-block it put the unmutated code
back in place.
Args:
module_path: The path to the module to mutate.
operator: The `Operator` instance to use.
oc... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.