text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get_rec_column_descr(self, colnum, vstorage):
"""
Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns
"""
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
name = self._info['colinfo'][colnum]['name']
if isvar:
if vstorage == 'object':
descr = (name, 'O')
else:
tform = self._info['colinfo'][colnum]['tform']
max_size = _extract_vararray_max(tform)
if max_size <= 0:
name = self._info['colinfo'][colnum]['name']
mess = 'Will read as an object field'
if max_size < 0:
mess = "Column '%s': No maximum size: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
else:
mess = "Column '%s': Max size is zero: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
# we are forced to read this as an object array
return self.get_rec_column_descr(colnum, 'object')
if npy_type[0] == 'S':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'S%d' % max_size
descr = (name, npy_type)
elif npy_type[0] == 'U':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'U%d' % max_size
descr = (name, npy_type)
else:
descr = (name, npy_type, max_size)
else:
tdim = self._info['colinfo'][colnum]['tdim']
shape = _tdim2shape(
tdim, name,
is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
if shape is not None:
descr = (name, npy_type, shape)
else:
descr = (name, npy_type)
return descr, isvar | [
"def",
"get_rec_column_descr",
"(",
"self",
",",
"colnum",
",",
"vstorage",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"name",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"c... | 39.431034 | 16.810345 |
def get(self, receiver_id=None, event_id=None):
"""Handle GET request."""
event = self._get_event(receiver_id, event_id)
return make_response(event) | [
"def",
"get",
"(",
"self",
",",
"receiver_id",
"=",
"None",
",",
"event_id",
"=",
"None",
")",
":",
"event",
"=",
"self",
".",
"_get_event",
"(",
"receiver_id",
",",
"event_id",
")",
"return",
"make_response",
"(",
"event",
")"
] | 42.25 | 6.5 |
def new_job_file(frontier, job_conf_file):
'''Returns new Job.'''
logging.info("loading %s", job_conf_file)
with open(job_conf_file) as f:
job_conf = yaml.safe_load(f)
return new_job(frontier, job_conf) | [
"def",
"new_job_file",
"(",
"frontier",
",",
"job_conf_file",
")",
":",
"logging",
".",
"info",
"(",
"\"loading %s\"",
",",
"job_conf_file",
")",
"with",
"open",
"(",
"job_conf_file",
")",
"as",
"f",
":",
"job_conf",
"=",
"yaml",
".",
"safe_load",
"(",
"f"... | 37.5 | 5.5 |
def __neighbor_indexes_points(self, optic_object):
"""!
@brief Return neighbors of the specified object in case of sequence of points.
@param[in] optic_object (optics_descriptor): Object for which neighbors should be returned in line with connectivity radius.
@return (list) List of indexes of neighbors in line the connectivity radius.
"""
kdnodes = self.__kdtree.find_nearest_dist_nodes(self.__sample_pointer[optic_object.index_object], self.__eps)
return [[node_tuple[1].payload, math.sqrt(node_tuple[0])] for node_tuple in kdnodes if
node_tuple[1].payload != optic_object.index_object] | [
"def",
"__neighbor_indexes_points",
"(",
"self",
",",
"optic_object",
")",
":",
"kdnodes",
"=",
"self",
".",
"__kdtree",
".",
"find_nearest_dist_nodes",
"(",
"self",
".",
"__sample_pointer",
"[",
"optic_object",
".",
"index_object",
"]",
",",
"self",
".",
"__eps... | 55.333333 | 39.416667 |
def get_current_async():
"""Return a reference to the currently executing Async job object
or None if not in an Async job.
"""
local_context = _local.get_local_context()
if local_context._executing_async:
return local_context._executing_async[-1]
raise errors.NotInContextError('Not in an _ExecutionContext.') | [
"def",
"get_current_async",
"(",
")",
":",
"local_context",
"=",
"_local",
".",
"get_local_context",
"(",
")",
"if",
"local_context",
".",
"_executing_async",
":",
"return",
"local_context",
".",
"_executing_async",
"[",
"-",
"1",
"]",
"raise",
"errors",
".",
... | 33.4 | 14.4 |
def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step %s of %s",
outer_step, self.BINARY_SEARCH_STEPS)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(
self.setup, {
self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST
})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([
self.train, self.loss, self.l2dist, self.output,
self.newimg
])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}").format(
iteration, self.MAX_ITERATIONS, l,
np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".format(
sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | [
"def",
"attack_batch",
"(",
"self",
",",
"imgs",
",",
"labs",
")",
":",
"def",
"compare",
"(",
"x",
",",
"y",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"(",
"float",
",",
"int",
",",
"np",
".",
"int64",
")",
")",
":",
"x",
"=",
"np"... | 34.934959 | 17.552846 |
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
----------
tag_sequence : torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : Optional[List[int]], optional, (default = None)
A list of length ``sequence_length`` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labelings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
Returns
-------
viterbi_path : List[int]
The tag indices of the maximum likelihood tag sequence.
viterbi_score : torch.Tensor
The score of the viterbi path.
"""
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score | [
"def",
"viterbi_decode",
"(",
"tag_sequence",
":",
"torch",
".",
"Tensor",
",",
"transition_matrix",
":",
"torch",
".",
"Tensor",
",",
"tag_observations",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
":",
"sequence_length",
",",
"n... | 46.464286 | 22.607143 |
def image_search(auth=None, **kwargs):
'''
Search for images
CLI Example:
.. code-block:: bash
salt '*' glanceng.image_search name=image1
salt '*' glanceng.image_search
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_images(**kwargs) | [
"def",
"image_search",
"(",
"auth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"cloud",
"=",
"get_operator_cloud",
"(",
"auth",
")",
"kwargs",
"=",
"_clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"return",
"cloud",
".",
"search_images",
"(",
"*",
"... | 22.357143 | 19.071429 |
def open(self):
"""
Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs.
"""
self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None,
(DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) )
return self.h_info | [
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"h_info",
"=",
"SetupDiGetClassDevs",
"(",
"byref",
"(",
"self",
".",
"guid",
")",
",",
"None",
",",
"None",
",",
"(",
"DIGCF",
".",
"PRESENT",
"|",
"DIGCF",
".",
"DEVICEINTERFACE",
")",
")",
"return... | 46 | 23.5 |
def load_off(file_obj, **kwargs):
"""
Load an OFF file into the kwargs for a Trimesh constructor
Parameters
----------
file_obj : file object
Contains an OFF file
Returns
----------
loaded : dict
kwargs for Trimesh constructor
"""
header_string = file_obj.readline()
if hasattr(header_string, 'decode'):
header_string = header_string.decode('utf-8')
header_string = header_string.strip().upper()
if not header_string == 'OFF':
raise NameError('Not an OFF file! Header was ' +
header_string)
header = np.array(
file_obj.readline().strip().split()).astype(np.int64)
vertex_count, face_count = header[:2]
# read the rest of the file
blob = np.array(file_obj.read().strip().split())
# there should be 3 points per vertex
# and 3 indexes + 1 count per face
data_ok = np.sum(header * [3, 4, 0]) == len(blob)
if not data_ok:
raise NameError('Incorrect number of vertices or faces!')
vertices = blob[:(vertex_count * 3)].astype(
np.float64).reshape((-1, 3))
# strip the first column which is a per- face count
faces = blob[(vertex_count * 3):].astype(
np.int64).reshape((-1, 4))[:, 1:]
kwargs = {'vertices': vertices,
'faces': faces}
return kwargs | [
"def",
"load_off",
"(",
"file_obj",
",",
"*",
"*",
"kwargs",
")",
":",
"header_string",
"=",
"file_obj",
".",
"readline",
"(",
")",
"if",
"hasattr",
"(",
"header_string",
",",
"'decode'",
")",
":",
"header_string",
"=",
"header_string",
".",
"decode",
"(",... | 29.422222 | 16.488889 |
def set_breakpoints(self, breakpoints):
"""Set breakpoints"""
self.clear_breakpoints()
for line_number, condition in breakpoints:
self.toogle_breakpoint(line_number, condition)
self.breakpoints = self.get_breakpoints() | [
"def",
"set_breakpoints",
"(",
"self",
",",
"breakpoints",
")",
":",
"self",
".",
"clear_breakpoints",
"(",
")",
"for",
"line_number",
",",
"condition",
"in",
"breakpoints",
":",
"self",
".",
"toogle_breakpoint",
"(",
"line_number",
",",
"condition",
")",
"sel... | 42.833333 | 7.666667 |
def generate_code_verifier(n_bytes=64):
"""
source: https://github.com/openstack/deb-python-oauth2client
Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
This is a 'high-entropy cryptographic random string' that will be
impractical for an attacker to guess.
Args:
n_bytes: integer between 31 and 96, inclusive. default: 64
number of bytes of entropy to include in verifier.
Returns:
Bytestring, representing urlsafe base64-encoded random data.
"""
verifier = base64.urlsafe_b64encode(
os.urandom(n_bytes)
).rstrip(b'=').decode('utf-8')
# https://tools.ietf.org/html/rfc7636#section-4.1
# minimum length of 43 characters and a maximum length of 128 characters.
if len(verifier) < 43:
raise ValueError("Verifier too short. n_bytes must be > 30.")
elif len(verifier) > 128:
raise ValueError("Verifier too long. n_bytes must be < 97.")
else:
return verifier | [
"def",
"generate_code_verifier",
"(",
"n_bytes",
"=",
"64",
")",
":",
"verifier",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"os",
".",
"urandom",
"(",
"n_bytes",
")",
")",
".",
"rstrip",
"(",
"b'='",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"# https:... | 40.458333 | 19.208333 |
def unwrap_single(self):
"""
Unwrap the single Result item. Call this from single-operation
methods to return the actual result
:return: The actual result
"""
try:
return next(self.itervalues())
except AttributeError:
return next(iter(self.values())) | [
"def",
"unwrap_single",
"(",
"self",
")",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"itervalues",
"(",
")",
")",
"except",
"AttributeError",
":",
"return",
"next",
"(",
"iter",
"(",
"self",
".",
"values",
"(",
")",
")",
")"
] | 32.1 | 9.9 |
def command(self, rs_id, command, *args):
"""Call a ReplicaSet method."""
rs = self._storage[rs_id]
try:
return getattr(rs, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ReplicaSet %s"
% (command, rs_id)) | [
"def",
"command",
"(",
"self",
",",
"rs_id",
",",
"command",
",",
"*",
"args",
")",
":",
"rs",
"=",
"self",
".",
"_storage",
"[",
"rs_id",
"]",
"try",
":",
"return",
"getattr",
"(",
"rs",
",",
"command",
")",
"(",
"*",
"args",
")",
"except",
"Att... | 40.5 | 11.875 |
def to_list_of(self, state):
# type: (S) -> List[B]
'''Returns a list of all the foci within `state`.
Requires kind Fold. This method will raise TypeError if the
optic has no way to get any foci.
'''
if not self._is_kind(Fold):
raise TypeError('Must be an instance of Fold to .to_list_of()')
pure = lambda a: Const([])
func = lambda a: Const([a])
return self.apply(func, pure, state).unwrap() | [
"def",
"to_list_of",
"(",
"self",
",",
"state",
")",
":",
"# type: (S) -> List[B]",
"if",
"not",
"self",
".",
"_is_kind",
"(",
"Fold",
")",
":",
"raise",
"TypeError",
"(",
"'Must be an instance of Fold to .to_list_of()'",
")",
"pure",
"=",
"lambda",
"a",
":",
... | 35.846154 | 18.461538 |
def p_declarations(self, p):
"""declarations : declarations declaration
| declaration"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = [p[1]] | [
"def",
"p_declarations",
"(",
"self",
",",
"p",
")",
":",
"n",
"=",
"len",
"(",
"p",
")",
"if",
"n",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"+",
"[",
"p",
"[",
"2",
"]",
"]",
"elif",
"n",
"==",
"2",
":",
"p",
"["... | 28.875 | 12.375 |
def parse_sidebar(self, media_page):
"""Parses the DOM and returns media attributes in the sidebar.
:type media_page: :class:`bs4.BeautifulSoup`
:param media_page: MAL media page's DOM
:rtype: dict
:return: media attributes.
:raises: InvalidMediaError, MalformedMediaPageError
"""
media_info = {}
# if MAL says the series doesn't exist, raise an InvalidMediaError.
error_tag = media_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMediaError(self.id)
try:
title_tag = media_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedMediaPageError.
raise MalformedMediaPageError(self.id, media_page, message="Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
try:
utilities.extract_tags(title_tag.find_all())
media_info[u'title'] = title_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = media_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
media_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# assemble alternative titles for this series.
media_info[u'alternative_titles'] = {}
alt_titles_header = info_panel_first.find(u'h2', text=u'Alternative Titles')
if alt_titles_header:
next_tag = alt_titles_header.find_next_sibling(u'div', {'class': 'spaceit_pad'})
while True:
if next_tag is None or not next_tag.find(u'span', {'class': 'dark_text'}):
# not a language node, break.
break
# get language and remove the node.
language = next_tag.find(u'span').text[:-1]
utilities.extract_tags(next_tag.find_all(u'span', {'class': 'dark_text'}))
names = next_tag.text.strip().split(u', ')
media_info[u'alternative_titles'][language] = names
next_tag = next_tag.find_next_sibling(u'div', {'class': 'spaceit_pad'})
except:
if not self.session.suppress_parse_exceptions:
raise
try:
type_tag = info_panel_first.find(text=u'Type:').parent.parent
utilities.extract_tags(type_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'type'] = type_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
try:
status_tag = info_panel_first.find(text=u'Status:').parent.parent
utilities.extract_tags(status_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'status'] = status_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
try:
genres_tag = info_panel_first.find(text=u'Genres:').parent.parent
utilities.extract_tags(genres_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'genres'] = []
for genre_link in genres_tag.find_all('a'):
link_parts = genre_link.get('href').split('[]=')
# of the form /anime|manga.php?genre[]=1
genre = self.session.genre(int(link_parts[1])).set({'name': genre_link.text})
media_info[u'genres'].append(genre)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# grab statistics for this media.
score_tag = info_panel_first.find(text=u'Score:').parent.parent
# get score and number of users.
users_node = [x for x in score_tag.find_all(u'small') if u'scored by' in x.text][0]
num_users = int(users_node.text.split(u'scored by ')[-1].split(u' users')[0])
utilities.extract_tags(score_tag.find_all())
media_info[u'score'] = (decimal.Decimal(score_tag.text.strip()), num_users)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
rank_tag = info_panel_first.find(text=u'Ranked:').parent.parent
utilities.extract_tags(rank_tag.find_all())
media_info[u'rank'] = int(rank_tag.text.strip()[1:].replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
popularity_tag = info_panel_first.find(text=u'Popularity:').parent.parent
utilities.extract_tags(popularity_tag.find_all())
media_info[u'popularity'] = int(popularity_tag.text.strip()[1:].replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
members_tag = info_panel_first.find(text=u'Members:').parent.parent
utilities.extract_tags(members_tag.find_all())
media_info[u'members'] = int(members_tag.text.strip().replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorites_tag = info_panel_first.find(text=u'Favorites:').parent.parent
utilities.extract_tags(favorites_tag.find_all())
media_info[u'favorites'] = int(favorites_tag.text.strip().replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# get popular tags.
tags_header = media_page.find(u'h2', text=u'Popular Tags')
tags_tag = tags_header.find_next_sibling(u'span')
media_info[u'popular_tags'] = {}
for tag_link in tags_tag.find_all('a'):
tag = self.session.tag(tag_link.text)
num_people = int(re.match(r'(?P<people>[0-9]+) people', tag_link.get('title')).group('people'))
media_info[u'popular_tags'][tag] = num_people
except:
if not self.session.suppress_parse_exceptions:
raise
return media_info | [
"def",
"parse_sidebar",
"(",
"self",
",",
"media_page",
")",
":",
"media_info",
"=",
"{",
"}",
"# if MAL says the series doesn't exist, raise an InvalidMediaError.",
"error_tag",
"=",
"media_page",
".",
"find",
"(",
"u'div'",
",",
"{",
"'class'",
":",
"'badresult'",
... | 37.119205 | 25.430464 |
def get_queryset_filters(self, query):
"""
Return the filtered queryset
"""
conditions = Q()
for field_name in self.fields:
conditions |= Q(**{
self._construct_qs_filter(field_name): query
})
return conditions | [
"def",
"get_queryset_filters",
"(",
"self",
",",
"query",
")",
":",
"conditions",
"=",
"Q",
"(",
")",
"for",
"field_name",
"in",
"self",
".",
"fields",
":",
"conditions",
"|=",
"Q",
"(",
"*",
"*",
"{",
"self",
".",
"_construct_qs_filter",
"(",
"field_nam... | 28.8 | 9.4 |
def _os_walk(directory, recurse=True, **kwargs):
"""
Work like os.walk but if recurse is False just list current directory
"""
if recurse:
for root, dirs, files in os.walk(directory, **kwargs):
yield root, dirs, files
else:
files = []
for filename in os.listdir(directory):
if os.path.isfile(os.path.join(directory, filename)):
files.append(filename)
yield directory, [], files | [
"def",
"_os_walk",
"(",
"directory",
",",
"recurse",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"recurse",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
",",
"*",
"*",
"kwargs",
")",
":",
"yi... | 35.153846 | 14.230769 |
def delete(self):
"""
Destructor.
"""
if self.glucose:
pysolvers.glucose3_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close() | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"glucose",
":",
"pysolvers",
".",
"glucose3_del",
"(",
"self",
".",
"glucose",
")",
"self",
".",
"glucose",
"=",
"None",
"if",
"self",
".",
"prfile",
":",
"self",
".",
"prfile",
".",
"close",... | 20.636364 | 15.545455 |
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema | [
"def",
"_inferSchema",
"(",
"self",
",",
"rdd",
",",
"samplingRatio",
"=",
"None",
",",
"names",
"=",
"None",
")",
":",
"first",
"=",
"rdd",
".",
"first",
"(",
")",
"if",
"not",
"first",
":",
"raise",
"ValueError",
"(",
"\"The first row in RDD is empty, \"... | 43.741935 | 19.225806 |
def pushtx(tx_hex, coin_symbol='btc', api_key=None):
'''
Takes a signed transaction hex binary (and coin_symbol) and broadcasts it to the bitcoin network.
'''
assert is_valid_coin_symbol(coin_symbol)
assert api_key, 'api_key required'
url = _get_pushtx_url(coin_symbol=coin_symbol)
logger.info(url)
data = {'tx': tx_hex}
params = {'token': api_key}
r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | [
"def",
"pushtx",
"(",
"tx_hex",
",",
"coin_symbol",
"=",
"'btc'",
",",
"api_key",
"=",
"None",
")",
":",
"assert",
"is_valid_coin_symbol",
"(",
"coin_symbol",
")",
"assert",
"api_key",
",",
"'api_key required'",
"url",
"=",
"_get_pushtx_url",
"(",
"coin_symbol",... | 27.555556 | 28 |
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
'''Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
'''
if not log_level:
log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
if not logger_format:
logger_format = (
"%(asctime)s.%(msecs)06dZ|"
"%(levelname)s: {}: %(message)s"
).format(service_name)
if not logger_date_format:
logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S")
logging.basicConfig(level=log_level,
format=logger_format,
datefmt=logger_date_format) | [
"def",
"logger_initial_config",
"(",
"service_name",
"=",
"None",
",",
"log_level",
"=",
"None",
",",
"logger_format",
"=",
"None",
",",
"logger_date_format",
"=",
"None",
")",
":",
"if",
"not",
"log_level",
":",
"log_level",
"=",
"os",
".",
"getenv",
"(",
... | 32.852941 | 20.029412 |
def nodes(self, type=None, failed=False, participant_id=None):
"""Get nodes in the network.
type specifies the type of Node. Failed can be "all", False
(default) or True. If a participant_id is passed only
nodes with that participant_id will be returned.
"""
if type is None:
type = Node
if not issubclass(type, Node):
raise TypeError("{} is not a valid node type.".format(type))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if participant_id is not None:
if failed == "all":
return type.query.filter_by(
network_id=self.id, participant_id=participant_id
).all()
else:
return type.query.filter_by(
network_id=self.id, participant_id=participant_id, failed=failed
).all()
else:
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(failed=failed, network_id=self.id).all() | [
"def",
"nodes",
"(",
"self",
",",
"type",
"=",
"None",
",",
"failed",
"=",
"False",
",",
"participant_id",
"=",
"None",
")",
":",
"if",
"type",
"is",
"None",
":",
"type",
"=",
"Node",
"if",
"not",
"issubclass",
"(",
"type",
",",
"Node",
")",
":",
... | 38.633333 | 21.2 |
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt | [
"def",
"get_contour_pd_plot",
"(",
"self",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"from",
"matplotlib",
"import",
"cm",
"pd",
"=",
"self",
".",
"_pd",
"entries",
"=",
"pd",
".",
"qhull_entries",
"data",
"=",
"np",
".",
"array",
"(",
"pd",
".... | 31.6 | 17.142857 |
def _stream(self):
"""execute subprocess with timeout
Usage::
>>> with cmd_proc.run_with_timeout() as cmd_proc:
... stdout, stderr = cmd_proc.communicate()
...
>>> assert cmd_proc.proc.return_code == 0, "proc exec failed"
"""
timer = None
try:
proc = subprocess.Popen(
self.cmd, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
timer = threading.Timer(
self.timeout,
kill_proc, [proc, self.cmd, time.time()]
)
timer.start()
yield proc
finally:
if timer is not None:
timer.cancel() | [
"def",
"_stream",
"(",
"self",
")",
":",
"timer",
"=",
"None",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"cmd",
",",
"cwd",
"=",
"self",
".",
"cwd",
",",
"env",
"=",
"self",
".",
"env",
",",
"stdout",
"=",
"subprocess... | 28.333333 | 18.222222 |
def logged(level=logging.DEBUG):
"""
Useful logging decorator. If a method is logged, the beginning and end of
the method call will be logged at a pre-specified level.
Args:
level: Level to log method at. Defaults to DEBUG.
"""
def wrap(f):
_logger = logging.getLogger("{}.{}".format(f.__module__, f.__name__))
def wrapped_f(*args, **kwargs):
_logger.log(level, "Called at {} with args = {} and kwargs = {}"
.format(datetime.datetime.now(), args, kwargs))
data = f(*args, **kwargs)
_logger.log(level, "Done at {} with args = {} and kwargs = {}"
.format(datetime.datetime.now(), args, kwargs))
return data
return wrapped_f
return wrap | [
"def",
"logged",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"def",
"wrap",
"(",
"f",
")",
":",
"_logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"{}.{}\"",
".",
"format",
"(",
"f",
".",
"__module__",
",",
"f",
".",
"__name__",
")",
")"... | 36.761905 | 23.238095 |
def request(method, url, **kwargs):
"""same as requests/requests/api.py request(...)"""
time_before_request = time()
# session start
session = SessionSinglePool()
# proxies
kwargs['proxies'] = settings['outgoing'].get('proxies') or None
# timeout
if 'timeout' in kwargs:
timeout = kwargs['timeout']
else:
timeout = getattr(threadLocal, 'timeout', None)
if timeout is not None:
kwargs['timeout'] = timeout
# do request
response = session.request(method=method, url=url, **kwargs)
time_after_request = time()
# is there a timeout for this engine ?
if timeout is not None:
timeout_overhead = 0.2 # seconds
# start_time = when the user request started
start_time = getattr(threadLocal, 'start_time', time_before_request)
search_duration = time_after_request - start_time
if search_duration > timeout + timeout_overhead:
raise requests.exceptions.Timeout(response=response)
# session end
session.close()
if hasattr(threadLocal, 'total_time'):
threadLocal.total_time += time_after_request - time_before_request
return response | [
"def",
"request",
"(",
"method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"time_before_request",
"=",
"time",
"(",
")",
"# session start",
"session",
"=",
"SessionSinglePool",
"(",
")",
"# proxies",
"kwargs",
"[",
"'proxies'",
"]",
"=",
"settings",
"... | 29.74359 | 21.410256 |
def custom_handler(self, glade, function_name, widget_name, str1, str2, int1, int2):
"""
Generic handler for creating custom widgets, internally used to
enable custom widgets (custom widgets of glade).
The custom widgets have a creation function specified in design time.
Those creation functions are always called with str1,str2,int1,int2 as
arguments, that are values specified in design time.
Methods of classes inheriting from SimpleGladeApp are used as
creation functions automatically.
If a custom widget has create_foo as creation function, then the
method named create_foo is called with str1,str2,int1,int2 as arguments.
"""
try:
handler = getattr(self, function_name)
return handler(str1, str2, int1, int2)
except AttributeError:
return None | [
"def",
"custom_handler",
"(",
"self",
",",
"glade",
",",
"function_name",
",",
"widget_name",
",",
"str1",
",",
"str2",
",",
"int1",
",",
"int2",
")",
":",
"try",
":",
"handler",
"=",
"getattr",
"(",
"self",
",",
"function_name",
")",
"return",
"handler"... | 43.75 | 24.15 |
def _create_db(self):
"""Creates a new databae or opens a connection to an existing one.
.. note::
You can't share sqlite3 connections between threads (by default)
hence we setup the db here. It has the upside of running async.
"""
log.debug("Creating sqlite database")
self._conn = sqlite3.connect(self._db_filename)
# create table structure
self._conn.cursor().execute("""
CREATE TABLE IF NOT EXISTS {}
(
ts REAL,
arbitration_id INTEGER,
extended INTEGER,
remote INTEGER,
error INTEGER,
dlc INTEGER,
data BLOB
)
""".format(self.table_name))
self._conn.commit()
self._insert_template = "INSERT INTO {} VALUES (?, ?, ?, ?, ?, ?, ?)".format(self.table_name) | [
"def",
"_create_db",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Creating sqlite database\"",
")",
"self",
".",
"_conn",
"=",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"_db_filename",
")",
"# create table structure",
"self",
".",
"_conn",
".",
"cu... | 32 | 19.692308 |
def _find_vm(name, data, quiet=False):
'''
Scan the query data for the named VM
'''
for hv_ in data:
# Check if data is a dict, and not '"virt.full_info" is not available.'
if not isinstance(data[hv_], dict):
continue
if name in data[hv_].get('vm_info', {}):
ret = {hv_: {name: data[hv_]['vm_info'][name]}}
if not quiet:
__jid_event__.fire_event({'data': ret, 'outputter': 'nested'}, 'progress')
return ret
return {} | [
"def",
"_find_vm",
"(",
"name",
",",
"data",
",",
"quiet",
"=",
"False",
")",
":",
"for",
"hv_",
"in",
"data",
":",
"# Check if data is a dict, and not '\"virt.full_info\" is not available.'",
"if",
"not",
"isinstance",
"(",
"data",
"[",
"hv_",
"]",
",",
"dict",... | 36.5 | 20.5 |
def get_client_settings_config_file(**kwargs): # pylint: disable=inconsistent-return-statements
"""Retrieve client settings from the possible config file locations.
:param \\*\\*kwargs: Arguments that are passed into the client instance
"""
config_files = ['/etc/softlayer.conf', '~/.softlayer']
if kwargs.get('config_file'):
config_files.append(kwargs.get('config_file'))
config_files = [os.path.expanduser(f) for f in config_files]
config = utils.configparser.RawConfigParser({
'username': '',
'api_key': '',
'endpoint_url': '',
'timeout': '0',
'proxy': '',
})
config.read(config_files)
if config.has_section('softlayer'):
return {
'endpoint_url': config.get('softlayer', 'endpoint_url'),
'timeout': config.getfloat('softlayer', 'timeout'),
'proxy': config.get('softlayer', 'proxy'),
'username': config.get('softlayer', 'username'),
'api_key': config.get('softlayer', 'api_key'),
} | [
"def",
"get_client_settings_config_file",
"(",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=inconsistent-return-statements",
"config_files",
"=",
"[",
"'/etc/softlayer.conf'",
",",
"'~/.softlayer'",
"]",
"if",
"kwargs",
".",
"get",
"(",
"'config_file'",
")",
":",
... | 39.576923 | 20.615385 |
def _wrap_paginated_response(cls, request, response, controls, data,
head=None):
"""Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response
"""
paging_response = response['paging']
if head is None:
head = response['head_id']
link = cls._build_url(
request,
head=head,
start=paging_response['start'],
limit=paging_response['limit'])
paging = {}
limit = controls.get('limit')
start = controls.get("start")
paging["limit"] = limit
paging["start"] = start
# If there are no resources, there should be nothing else in paging
if paging_response.get("next") == "":
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
})
next_id = paging_response['next']
paging['next_position'] = next_id
# Builds paging urls specific to this response
def build_pg_url(start=None):
return cls._build_url(request, head=head, limit=limit, start=start)
paging['next'] = build_pg_url(paging_response['next'])
return cls._wrap_response(
request,
data=data,
metadata={
'head': head,
'link': link,
'paging': paging
}) | [
"def",
"_wrap_paginated_response",
"(",
"cls",
",",
"request",
",",
"response",
",",
"controls",
",",
"data",
",",
"head",
"=",
"None",
")",
":",
"paging_response",
"=",
"response",
"[",
"'paging'",
"]",
"if",
"head",
"is",
"None",
":",
"head",
"=",
"res... | 32.446809 | 14.510638 |
async def start(self, **kwargs):
"""Start the pairing server and publish service."""
zeroconf = kwargs['zeroconf']
self._name = kwargs['name']
self._pairing_guid = kwargs.get('pairing_guid', None) or \
self._generate_random_guid()
self._web_server = web.Server(self.handle_request, loop=self._loop)
self._server = await self._loop.create_server(
self._web_server, '0.0.0.0')
# Get the allocated (random port) and include it in zeroconf service
allocated_port = self._server.sockets[0].getsockname()[1]
_LOGGER.debug('Started pairing web server at port %d', allocated_port)
self._setup_zeroconf(zeroconf, allocated_port) | [
"async",
"def",
"start",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"zeroconf",
"=",
"kwargs",
"[",
"'zeroconf'",
"]",
"self",
".",
"_name",
"=",
"kwargs",
"[",
"'name'",
"]",
"self",
".",
"_pairing_guid",
"=",
"kwargs",
".",
"get",
"(",
"'pairi... | 44.4375 | 20.25 |
def get_yaml_schema(self):
"""GetYamlSchema.
[Preview API]
:rtype: object
"""
response = self._send(http_method='GET',
location_id='1f9990b9-1dba-441f-9c2e-6485888c42b6',
version='5.1-preview.1')
return self._deserialize('object', response) | [
"def",
"get_yaml_schema",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'1f9990b9-1dba-441f-9c2e-6485888c42b6'",
",",
"version",
"=",
"'5.1-preview.1'",
")",
"return",
"self",
".",
"_des... | 37.777778 | 14 |
def _remlogic_time(time_cell, date):
"""Reads RemLogic time string to datetime
Parameters
----------
time_cell : str
entire time cell from text file
date : datetime
start date from text file
Returns
-------
datetime
date and time
"""
stage_start_time = datetime.strptime(time_cell[-8:], '%I:%M:%S')
start = datetime.combine(date.date(), stage_start_time.time())
if time_cell[1] == 'U':
start = start + timedelta(hours=12)
elif time_cell[-8:-10] == '12':
start = start + timedelta(hours=12)
else:
start = start + timedelta(hours=24)
return start | [
"def",
"_remlogic_time",
"(",
"time_cell",
",",
"date",
")",
":",
"stage_start_time",
"=",
"datetime",
".",
"strptime",
"(",
"time_cell",
"[",
"-",
"8",
":",
"]",
",",
"'%I:%M:%S'",
")",
"start",
"=",
"datetime",
".",
"combine",
"(",
"date",
".",
"date",... | 25.192308 | 18.576923 |
def CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.######" or
None if the timestamp is missing.
"""
if (self._timestamp is None or self._timestamp < self._INT64_MIN or
self._timestamp > self._INT64_MAX):
return None
return super(JavaTime, self).CopyToDateTimeString() | [
"def",
"CopyToDateTimeString",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_timestamp",
"is",
"None",
"or",
"self",
".",
"_timestamp",
"<",
"self",
".",
"_INT64_MIN",
"or",
"self",
".",
"_timestamp",
">",
"self",
".",
"_INT64_MAX",
")",
":",
"return"... | 34.583333 | 19 |
def get_command(self, ctx, cmd_name):
"""Get command for click."""
path = "%s.%s" % (__name__, cmd_name)
path = path.replace("-", "_")
module = importlib.import_module(path)
return getattr(module, 'cli') | [
"def",
"get_command",
"(",
"self",
",",
"ctx",
",",
"cmd_name",
")",
":",
"path",
"=",
"\"%s.%s\"",
"%",
"(",
"__name__",
",",
"cmd_name",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"module",
"=",
"importlib",
".",
"im... | 39.666667 | 3.333333 |
def cons(collection, value):
"""Extends a collection with a value."""
if isinstance(value, collections.Mapping):
if collection is None:
collection = {}
collection.update(**value)
elif isinstance(value, six.string_types):
if collection is None:
collection = []
collection.append(value)
elif isinstance(value, collections.Iterable):
if collection is None:
collection = []
collection.extend(value)
else:
if collection is None:
collection = []
collection.append(value)
return collection | [
"def",
"cons",
"(",
"collection",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"collections",
".",
"Mapping",
")",
":",
"if",
"collection",
"is",
"None",
":",
"collection",
"=",
"{",
"}",
"collection",
".",
"update",
"(",
"*",
"*",
... | 26.086957 | 15.826087 |
def stop_stack(awsclient, stack_name, use_suspend=False):
"""Stop an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
"""
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
client_ec2 = awsclient.get_client('ec2')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
for asg in autoscaling_groups:
# find instances in autoscaling group
ec2_instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# suspend all autoscaling processes
log.info('Suspending all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.suspend_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
_stop_ec2_instances(awsclient, ec2_instances)
else:
# resize autoscaling group (min, max = 0)
log.info('Resize autoscaling group \'%s\' to minSize=0, maxSize=0',
asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=0,
MaxSize=0
)
if ec2_instances:
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
# wait for instances to terminate
waiter_inst_terminated = client_ec2.get_waiter('instance_terminated')
waiter_inst_terminated.wait(InstanceIds=running_instances)
# setting ECS desiredCount to zero
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if services:
template, parameters = _get_template_parameters(awsclient, stack_name)
_stop_ecs_services(awsclient, services, template, parameters)
# stopping ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_stop_ec2_instances(awsclient, instances)
# stopping db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
running_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['available']
)
for db in running_db_instances:
log.info('Stopping RDS instance \'%s\'', db)
client_rds.stop_db_instance(DBInstanceIdentifier=db)
return exit_code | [
"def",
"stop_stack",
"(",
"awsclient",
",",
"stack_name",
",",
"use_suspend",
"=",
"False",
")",
":",
"exit_code",
"=",
"0",
"# check for DisableStop",
"#disable_stop = conf.get('deployment', {}).get('DisableStop', False)",
"#if disable_stop:",
"# log.warn('\\'DisableStop\\' i... | 40.948276 | 22 |
def checkResponse(request):
'''
Returns if a request has an okay error code, otherwise raises InvalidRequest.
'''
# Check the status code of the returned request
if str(request.status_code)[0] not in ['2', '3']:
w = str(request.text).split('\\r')[0][2:]
raise InvalidRequest(w)
return | [
"def",
"checkResponse",
"(",
"request",
")",
":",
"# Check the status code of the returned request",
"if",
"str",
"(",
"request",
".",
"status_code",
")",
"[",
"0",
"]",
"not",
"in",
"[",
"'2'",
",",
"'3'",
"]",
":",
"w",
"=",
"str",
"(",
"request",
".",
... | 34.8 | 23.2 |
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
return __obj(*args, **kwargs) | [
"def",
"call",
"(",
"__self",
",",
"__obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"__debug__",
":",
"__traceback_hide__",
"=",
"True",
"if",
"isinstance",
"(",
"__obj",
",",
"_context_function_types",
")",
":",
"if",
"getattr",
"(",
... | 46.625 | 10.375 |
def set_all_requested_intervals(self, requested_intervals):
"""
Sets the requested intervals for all workflow
:param requested_intervals: The requested intervals
:return: None
:type requested_intervals: TimeIntervals
"""
for workflow_id in self.workflows:
if self.workflows[workflow_id].online:
self.workflows[workflow_id].requested_intervals = requested_intervals | [
"def",
"set_all_requested_intervals",
"(",
"self",
",",
"requested_intervals",
")",
":",
"for",
"workflow_id",
"in",
"self",
".",
"workflows",
":",
"if",
"self",
".",
"workflows",
"[",
"workflow_id",
"]",
".",
"online",
":",
"self",
".",
"workflows",
"[",
"w... | 43.9 | 13.5 |
def _find_own_cgroups():
"""
For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup)
"""
try:
with open('/proc/self/cgroup', 'rt') as ownCgroupsFile:
for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile):
yield cgroup
except IOError:
logging.exception('Cannot read /proc/self/cgroup') | [
"def",
"_find_own_cgroups",
"(",
")",
":",
"try",
":",
"with",
"open",
"(",
"'/proc/self/cgroup'",
",",
"'rt'",
")",
"as",
"ownCgroupsFile",
":",
"for",
"cgroup",
"in",
"_parse_proc_pid_cgroup",
"(",
"ownCgroupsFile",
")",
":",
"yield",
"cgroup",
"except",
"IO... | 39.833333 | 18.833333 |
def _scalar2array(d):
"""Convert a dictionary with scalar elements and string indices '_1234'
to a dictionary of arrays. Unspecified entries are np.nan."""
da = {}
for k, v in d.items():
if '_' not in k:
da[k] = v
else:
name = ''.join(k.split('_')[:-1])
ind = k.split('_')[-1]
dim = len(ind)
if name not in da:
shape = tuple(3 for i in range(dim))
da[name] = np.empty(shape, dtype=complex)
da[name][:] = np.nan
da[name][tuple(int(i) - 1 for i in ind)] = v
return da | [
"def",
"_scalar2array",
"(",
"d",
")",
":",
"da",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"'_'",
"not",
"in",
"k",
":",
"da",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"name",
"=",
"''",
".",
"join",... | 35.588235 | 13.823529 |
def to_list(self):
"""
To a list of dicts (each dict is an instances)
"""
ret = []
for instance in self.instances:
ret.append(instance.to_dict())
return ret | [
"def",
"to_list",
"(",
"self",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"instance",
"in",
"self",
".",
"instances",
":",
"ret",
".",
"append",
"(",
"instance",
".",
"to_dict",
"(",
")",
")",
"return",
"ret"
] | 26.125 | 10.625 |
async def extended_analog(self, pin, data):
"""
This method will send an extended-data analog write command to the
selected pin.
:param pin: 0 - 127
:param data: 0 - 0xfffff
:returns: No return value
"""
analog_data = [pin, data & 0x7f, (data >> 7) & 0x7f, (data >> 14) & 0x7f]
await self._send_sysex(PrivateConstants.EXTENDED_ANALOG, analog_data) | [
"async",
"def",
"extended_analog",
"(",
"self",
",",
"pin",
",",
"data",
")",
":",
"analog_data",
"=",
"[",
"pin",
",",
"data",
"&",
"0x7f",
",",
"(",
"data",
">>",
"7",
")",
"&",
"0x7f",
",",
"(",
"data",
">>",
"14",
")",
"&",
"0x7f",
"]",
"aw... | 31.538462 | 21.692308 |
def fit(self, X, y, **kwargs):
"""
Fits the estimator to calculate feature correlation to
dependent variable.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Keyword arguments passed to the fit method of the estimator.
Returns
-------
self : visualizer
The fit method must always return self to support pipelines.
"""
self._create_labels_for_features(X)
self._select_features_to_plot(X)
# Calculate Features correlation with target variable
if self.method == "pearson":
self.scores_ = np.array(
[pearsonr(x, y, **kwargs)[0] for x in np.asarray(X).T]
)
else:
self.scores_ = np.array(
self.correlation_methods[self.method](X, y, **kwargs)
)
# If feature indices are given, plot only the given features
if self.feature_index:
self.scores_ = self.scores_[self.feature_index]
self.features_ = self.features_[self.feature_index]
# Sort features by correlation
if self.sort:
sort_idx = np.argsort(self.scores_)
self.scores_ = self.scores_[sort_idx]
self.features_ = self.features_[sort_idx]
self.draw()
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_create_labels_for_features",
"(",
"X",
")",
"self",
".",
"_select_features_to_plot",
"(",
"X",
")",
"# Calculate Features correlation with target variable",
"if",
... | 30.916667 | 20.5 |
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.") | [
"def",
"_load",
"(",
"self",
",",
"exit_on_failure",
")",
":",
"# pylint: disable-msg=W0621",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'argtoolbox'",
")",
"discoveredFileList",
"=",
"[",
"]",
"if",
"self",
".",
"config_file",
":",
"if",
"isinstance",
"("... | 42.068182 | 16.772727 |
def get_lowest_probable_prepared_certificate_in_view(
self, view_no) -> Optional[int]:
"""
Return lowest pp_seq_no of the view for which can be prepared but
choose from unprocessed PRE-PREPAREs and PREPAREs.
"""
# TODO: Naive implementation, dont need to iterate over the complete
# data structures, fix this later
seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs
# pp_seq_no of PREPAREs with count of PREPAREs for each
seq_no_p = set()
for (v, p) in self.prePreparesPendingPrevPP:
if v == view_no:
seq_no_pp.add(p)
if v > view_no:
break
for (v, p), pr in self.preparesWaitingForPrePrepare.items():
if v == view_no and len(pr) >= self.quorums.prepare.value:
seq_no_p.add(p)
for n in seq_no_pp:
if n in seq_no_p:
return n
return None | [
"def",
"get_lowest_probable_prepared_certificate_in_view",
"(",
"self",
",",
"view_no",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"# TODO: Naive implementation, dont need to iterate over the complete",
"# data structures, fix this later",
"seq_no_pp",
"=",
"SortedList",
"(",
... | 36.269231 | 18.346154 |
def delete(self, key, sort_key):
primary_key = key
key = self.prefixed('{}:{}'.format(key, sort_key))
""" Delete an element in dictionary """
self.logger.debug('Storage - delete {}'.format(key))
if sort_key is not None:
self.cache[self.prefixed(primary_key)].remove(sort_key)
for index in self._secondary_indexes:
obj = json.loads(self.cache[key])
if index in obj.keys():
self.cache['secondary_indexes'][index][obj[index]].remove(
key)
del(self.cache[key])
return True | [
"def",
"delete",
"(",
"self",
",",
"key",
",",
"sort_key",
")",
":",
"primary_key",
"=",
"key",
"key",
"=",
"self",
".",
"prefixed",
"(",
"'{}:{}'",
".",
"format",
"(",
"key",
",",
"sort_key",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'S... | 42.214286 | 13.857143 |
def _get_src_path_line_nodes_jacoco(self, xml_document, src_path):
"""
Return a list of nodes containing line information for `src_path`
in `xml_document`.
If file is not present in `xml_document`, return None
"""
files = []
packages = [pkg for pkg in xml_document.findall(".//package")]
for pkg in packages:
_files = [_file
for _file in pkg.findall('sourcefile')
if self._measured_source_path_matches(pkg.get('name'), _file.get('name'), src_path)
or []
]
files.extend(_files)
if not files:
return None
lines = [file_tree.findall('./line')
for file_tree in files]
return [elem for elem in itertools.chain(*lines)] | [
"def",
"_get_src_path_line_nodes_jacoco",
"(",
"self",
",",
"xml_document",
",",
"src_path",
")",
":",
"files",
"=",
"[",
"]",
"packages",
"=",
"[",
"pkg",
"for",
"pkg",
"in",
"xml_document",
".",
"findall",
"(",
"\".//package\"",
")",
"]",
"for",
"pkg",
"... | 35.782609 | 20.478261 |
def most_recent_year():
"""
This year, if it's December.
The most recent year, otherwise.
Note: Advent of Code started in 2015
"""
aoc_now = datetime.datetime.now(tz=AOC_TZ)
year = aoc_now.year
if aoc_now.month < 12:
year -= 1
if year < 2015:
raise AocdError("Time travel not supported yet")
return year | [
"def",
"most_recent_year",
"(",
")",
":",
"aoc_now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"tz",
"=",
"AOC_TZ",
")",
"year",
"=",
"aoc_now",
".",
"year",
"if",
"aoc_now",
".",
"month",
"<",
"12",
":",
"year",
"-=",
"1",
"if",
"year",
"... | 26.692308 | 11.615385 |
def serialize_footer(signer):
"""Uses the signer object which has been used to sign the message to generate
the signature, then serializes that signature.
:param signer: Cryptographic signer object
:type signer: aws_encryption_sdk.internal.crypto.Signer
:returns: Serialized footer
:rtype: bytes
"""
footer = b""
if signer is not None:
signature = signer.finalize()
footer = struct.pack(">H{sig_len}s".format(sig_len=len(signature)), len(signature), signature)
return footer | [
"def",
"serialize_footer",
"(",
"signer",
")",
":",
"footer",
"=",
"b\"\"",
"if",
"signer",
"is",
"not",
"None",
":",
"signature",
"=",
"signer",
".",
"finalize",
"(",
")",
"footer",
"=",
"struct",
".",
"pack",
"(",
"\">H{sig_len}s\"",
".",
"format",
"("... | 37 | 17.428571 |
def _get_privacy(self, table_name):
"""gets current privacy of a table"""
ds_manager = DatasetManager(self.auth_client)
try:
dataset = ds_manager.get(table_name)
return dataset.privacy.lower()
except NotFoundException:
return None | [
"def",
"_get_privacy",
"(",
"self",
",",
"table_name",
")",
":",
"ds_manager",
"=",
"DatasetManager",
"(",
"self",
".",
"auth_client",
")",
"try",
":",
"dataset",
"=",
"ds_manager",
".",
"get",
"(",
"table_name",
")",
"return",
"dataset",
".",
"privacy",
"... | 36.375 | 10 |
def git(ctx, url, private, sync): # pylint:disable=assign-to-new-keyword
"""Set/Sync git repo on this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon project git --url=https://github.com/polyaxon/polyaxon-quick-start
```
\b
```bash
$ polyaxon project git --url=https://github.com/polyaxon/polyaxon-quick-start --private
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
def git_set_url():
if private:
click.echo('\nSetting a private git repo "{}" on project: {} ...\n'.format(
url, project_name))
else:
click.echo('\nSetting a public git repo "{}" on project: {} ...\n'.format(
url, project_name))
try:
PolyaxonClient().project.set_repo(user, project_name, url, not private)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not set git repo on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Project was successfully initialized with `{}`.'.format(url))
def git_sync_repo():
try:
response = PolyaxonClient().project.sync_repo(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not sync git repo on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
click.echo(response.status_code)
Printer.print_success('Project was successfully synced with latest changes.')
if url:
git_set_url()
if sync:
git_sync_repo() | [
"def",
"git",
"(",
"ctx",
",",
"url",
",",
"private",
",",
"sync",
")",
":",
"# pylint:disable=assign-to-new-keyword",
"user",
",",
"project_name",
"=",
"get_project_or_local",
"(",
"ctx",
".",
"obj",
".",
"get",
"(",
"'project'",
")",
")",
"def",
"git_set_u... | 35.470588 | 31.196078 |
def token_list_width(tokenlist):
"""
Return the character width of this token list.
(Take double width characters into account.)
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(get_cwidth(c) for item in tokenlist for c in item[1] if item[0] != ZeroWidthEscape) | [
"def",
"token_list_width",
"(",
"tokenlist",
")",
":",
"ZeroWidthEscape",
"=",
"Token",
".",
"ZeroWidthEscape",
"return",
"sum",
"(",
"get_cwidth",
"(",
"c",
")",
"for",
"item",
"in",
"tokenlist",
"for",
"c",
"in",
"item",
"[",
"1",
"]",
"if",
"item",
"[... | 38.9 | 17.3 |
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1 :
(prefix, localname) = string.split(':',1)
else:
prefix = None
localname = string
T = unicode(localname)
N = len(localname)
X = [];
for i in range(N) :
if i< N-1 and T[i]==u'_' and T[i+1]==u'x':
X.append(u'_x005F_')
elif i==0 and N >= 3 and \
( T[0]==u'x' or T[0]==u'X' ) and \
( T[1]==u'm' or T[1]==u'M' ) and \
( T[2]==u'l' or T[2]==u'L' ):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
if prefix:
return "%s:%s" % (prefix, u''.join(X))
return u''.join(X) | [
"def",
"toXMLname",
"(",
"string",
")",
":",
"if",
"string",
".",
"find",
"(",
"':'",
")",
"!=",
"-",
"1",
":",
"(",
"prefix",
",",
"localname",
")",
"=",
"string",
".",
"split",
"(",
"':'",
",",
"1",
")",
"else",
":",
"prefix",
"=",
"None",
"l... | 29.642857 | 16.892857 |
def open(self, inp, opts={}):
"""Use this to set where to read from.
Set opts['try_lineedit'] if you want this input to interact
with GNU-like readline library. By default, we will assume to
try importing and using readline. If readline is not
importable, line editing is not available whether or not
opts['try_readline'] is set.
Set opts['use_raw'] if input should use Python's use_raw(). If
however 'inp' is a string and opts['use_raw'] is not set, we
will assume no raw output. Note that an individual readline
may override the setting.
"""
get_option = lambda key: Mmisc.option_set(opts, key,
self.DEFAULT_OPEN_READ_OPTS)
if (isinstance(inp, io.TextIOWrapper) or
isinstance(inp, io.StringIO) or
hasattr(inp, 'isatty') and inp.isatty()):
self.use_raw = get_option('use_raw')
elif isinstance(inp, 'string'.__class__): # FIXME
if opts is None:
self.use_raw = False
else:
self.use_raw = get_option('use_raw')
pass
inp = open(inp, 'r')
else:
raise IOError("Invalid input type (%s) for %s" % (type(inp),
inp))
self.input = inp
self.line_edit = get_option('try_readline') and readline_importable()
self.closed = False
return | [
"def",
"open",
"(",
"self",
",",
"inp",
",",
"opts",
"=",
"{",
"}",
")",
":",
"get_option",
"=",
"lambda",
"key",
":",
"Mmisc",
".",
"option_set",
"(",
"opts",
",",
"key",
",",
"self",
".",
"DEFAULT_OPEN_READ_OPTS",
")",
"if",
"(",
"isinstance",
"(",... | 43.911765 | 19.117647 |
def main(self) -> None:
"""
Main entry point. Runs :func:`service`.
"""
# Actual main service code.
try:
self.service()
except Exception as e:
self.error("Unexpected exception: {e}\n{t}".format(
e=e, t=traceback.format_exc())) | [
"def",
"main",
"(",
"self",
")",
"->",
"None",
":",
"# Actual main service code.",
"try",
":",
"self",
".",
"service",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"error",
"(",
"\"Unexpected exception: {e}\\n{t}\"",
".",
"format",
"(",
"e",... | 30.5 | 11.1 |
def put_value(self, value, timeout=None):
"""Put a value to the Attribute and wait for completion"""
self._context.put(self._data.path + ["value"], value, timeout=timeout) | [
"def",
"put_value",
"(",
"self",
",",
"value",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_context",
".",
"put",
"(",
"self",
".",
"_data",
".",
"path",
"+",
"[",
"\"value\"",
"]",
",",
"value",
",",
"timeout",
"=",
"timeout",
")"
] | 61.666667 | 13 |
def iter_annotation_values(graph, annotation: str) -> Iterable[str]:
"""Iterate over all of the values for an annotation used in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to grab
"""
return (
value
for _, _, data in graph.edges(data=True)
if edge_has_annotation(data, annotation)
for value in data[ANNOTATIONS][annotation]
) | [
"def",
"iter_annotation_values",
"(",
"graph",
",",
"annotation",
":",
"str",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"return",
"(",
"value",
"for",
"_",
",",
"_",
",",
"data",
"in",
"graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
"if",
... | 34.833333 | 16.416667 |
def experiments_fmri_create(self, experiment_id, filename):
"""Create functional data object from given file and associate the
object with the specified experiment.
Parameters
----------
experiment_id : string
Unique experiment identifier
filename : File-type object
Functional data file
Returns
-------
FMRIDataHandle
Handle for created fMRI object or None if identified experiment
is unknown
"""
# Get the experiment to ensure that it exist before we even create the
# functional data object
experiment = self.experiments_get(experiment_id)
if experiment is None:
return None
# Create functional data object from given file
fmri = self.funcdata.create_object(filename)
# Update experiment to associate it with created fMRI object. Assign
# result to experiment. Should the experiment have been deleted in
# parallel the result will be None
experiment = self.experiments.update_fmri_data(experiment_id, fmri.identifier)
if experiment is None:
# Delete fMRI object's data directory
shutil.rmtree(fmri.directory)
# Delete functional data object from databases
self.funcdata.delete_object(fmri.identifier, erase=True)
return None
else:
return funcdata.FMRIDataHandle(fmri, experiment_id) | [
"def",
"experiments_fmri_create",
"(",
"self",
",",
"experiment_id",
",",
"filename",
")",
":",
"# Get the experiment to ensure that it exist before we even create the",
"# functional data object",
"experiment",
"=",
"self",
".",
"experiments_get",
"(",
"experiment_id",
")",
... | 40.555556 | 18.305556 |
def target_to_ipv4_long(target):
""" Attempt to return a IPv4 long-range list from a target string. """
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = inet_pton(socket.AF_INET, splitted[0])
end_packed = inet_pton(socket.AF_INET, splitted[1])
except socket.error:
return None
if end_packed < start_packed:
return None
return ipv4_range_to_list(start_packed, end_packed) | [
"def",
"target_to_ipv4_long",
"(",
"target",
")",
":",
"splitted",
"=",
"target",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"splitted",
")",
"!=",
"2",
":",
"return",
"None",
"try",
":",
"start_packed",
"=",
"inet_pton",
"(",
"socket",
".",
"AF_... | 32.928571 | 17.357143 |
def meta_features_path(self, path):
"""Returns path for meta-features
Args:
path (str): Absolute/local path of xcessiv folder
"""
return os.path.join(
path,
app.config['XCESSIV_META_FEATURES_FOLDER'],
str(self.id)
) + '.npy' | [
"def",
"meta_features_path",
"(",
"self",
",",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"app",
".",
"config",
"[",
"'XCESSIV_META_FEATURES_FOLDER'",
"]",
",",
"str",
"(",
"self",
".",
"id",
")",
")",
"+",
"'.npy'"
] | 29 | 15.727273 |
def preprocess(options):
assert options.bfile!=None, 'Please specify a bfile.'
""" computing the covariance matrix """
if options.compute_cov:
assert options.bfile!=None, 'Please specify a bfile.'
assert options.cfile is not None, 'Specify covariance matrix basename'
print('Computing covariance matrix')
t0 = time.time()
computeCovarianceMatrix(options.plink_path,options.bfile,options.cfile,options.sim_type)
t1 = time.time()
print(('... finished in %s seconds'%(t1-t0)))
print('Computing eigenvalue decomposition')
t0 = time.time()
eighCovarianceMatrix(options.cfile)
t1 = time.time()
print(('... finished in %s seconds'%(t1-t0)))
""" computing principal components """
if options.compute_PCs>0:
assert options.ffile is not None, 'Specify fix effects basename for saving PCs'
t0 = time.time()
computePCs(options.plink_path,options.compute_PCs,options.bfile,options.ffile)
t1 = time.time()
print(('... finished in %s seconds'%(t1-t0)))
""" fitting the null model """
if options.fit_null:
if options.nfile is None:
options.nfile = os.path.split(options.bfile)[-1]
warnings.warn('nfile not specifed, set to %s'%options.nfile)
print('Fitting null model')
assert options.pfile is not None, 'phenotype file needs to be specified'
# read pheno
Y = readPhenoFile(options.pfile,idx=options.trait_idx)
# read covariance
if options.cfile is None:
cov = {'eval':None,'evec':None}
warnings.warn('cfile not specifed, a one variance compoenent model will be considered')
else:
cov = readCovarianceMatrixFile(options.cfile,readCov=False)
assert Y.shape[0]==cov['eval'].shape[0], 'dimension mismatch'
# read covariates
F = None
if options.ffile is not None:
F = readCovariatesFile(options.ffile)
assert Y.shape[0]==F.shape[0], 'dimensions mismatch'
t0 = time.time()
fit_null(Y,cov['eval'],cov['evec'],options.nfile, F)
t1 = time.time()
print(('.. finished in %s seconds'%(t1-t0)))
""" precomputing the windows """
if options.precompute_windows:
if options.wfile==None:
options.wfile = os.path.split(options.bfile)[-1] + '.%d'%options.window_size
warnings.warn('wfile not specifed, set to %s'%options.wfile)
print('Precomputing windows')
t0 = time.time()
pos = readBimFile(options.bfile)
nWnds,nSnps=splitGeno(pos,size=options.window_size,out_file=options.wfile+'.wnd')
print(('Number of variants:',pos.shape[0]))
print(('Number of windows:',nWnds))
print(('Minimum number of snps:',nSnps.min()))
print(('Maximum number of snps:',nSnps.max()))
t1 = time.time()
print(('.. finished in %s seconds'%(t1-t0)))
# plot distribution of nSnps
if options.plot_windows:
print('Plotting ditribution of number of SNPs')
plot_file = options.wfile+'.wnd.pdf'
plt = pl.subplot(1,1,1)
pl.hist(nSnps,30)
pl.xlabel('Number of SNPs')
pl.ylabel('Number of windows')
pl.savefig(plot_file) | [
"def",
"preprocess",
"(",
"options",
")",
":",
"assert",
"options",
".",
"bfile",
"!=",
"None",
",",
"'Please specify a bfile.'",
"if",
"options",
".",
"compute_cov",
":",
"assert",
"options",
".",
"bfile",
"!=",
"None",
",",
"'Please specify a bfile.'",
"assert... | 41.397436 | 18.512821 |
def _unpack_token_compact(token):
"""
Unpack a compact-form serialized JWT.
Returns (header, payload, signature, signing_input) on success
Raises DecodeError on bad input
"""
if isinstance(token, (str, unicode)):
token = token.encode('utf-8')
try:
signing_input, crypto_segment = token.rsplit(b'.', 1)
header_segment, payload_segment = signing_input.split(b'.', 1)
except ValueError:
raise DecodeError('Not enough segments')
try:
header_data = base64url_decode(header_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid header padding')
try:
header = json.loads(header_data.decode('utf-8'))
except ValueError as e:
raise DecodeError('Invalid header string: %s' % e)
if not isinstance(header, Mapping):
raise DecodeError('Invalid header string: must be a json object')
try:
payload_data = base64url_decode(payload_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid payload padding')
try:
payload = json.loads(payload_data.decode('utf-8'))
except ValueError as e:
raise DecodeError('Invalid payload string: %s' % e)
try:
signature = base64url_decode(crypto_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid crypto padding')
return (header, payload, signature, signing_input) | [
"def",
"_unpack_token_compact",
"(",
"token",
")",
":",
"if",
"isinstance",
"(",
"token",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"token",
"=",
"token",
".",
"encode",
"(",
"'utf-8'",
")",
"try",
":",
"signing_input",
",",
"crypto_segment",
"=",
... | 31.818182 | 19.454545 |
def voice(self):
"""tuple. contain text and lang code
"""
dbid = self.lldb.dbid
text, lang = self._voiceoverdb.get_text_lang(dbid)
return text, lang | [
"def",
"voice",
"(",
"self",
")",
":",
"dbid",
"=",
"self",
".",
"lldb",
".",
"dbid",
"text",
",",
"lang",
"=",
"self",
".",
"_voiceoverdb",
".",
"get_text_lang",
"(",
"dbid",
")",
"return",
"text",
",",
"lang"
] | 30.5 | 11.333333 |
def clean(self):
"""
Pass the provided username and password to the active
authentication backends and verify the user account is
not disabled. If authentication succeeds, the ``User`` object
is assigned to the form so it can be accessed in the view.
"""
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
try:
self.user = authenticate(request=self.request, username=username, password=password)
except Exception:
logger.exception("Error authenticating %s" % username)
error_msg = _('Internal error while authenticating user')
raise forms.ValidationError(error_msg)
if self.user is None:
logger.warning("Failed authentication for %s" % username)
error_msg = _('The username or password is not correct')
raise forms.ValidationError(error_msg)
else:
if not self.user.is_active:
logger.warning("User account %s is disabled" % username)
error_msg = _('This user account is disabled')
raise forms.ValidationError(error_msg)
return self.cleaned_data | [
"def",
"clean",
"(",
"self",
")",
":",
"username",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'username'",
")",
"password",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password'",
")",
"if",
"username",
"and",
"password",
":",
"try",
... | 44.793103 | 22.241379 |
def _set_route_source(self, v, load=False):
"""
Setter method for route_source, mapped from YANG variable /rbridge_id/route_map/content/match/ipv6/route_source (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_source() directly.
YANG Description: Source address of route
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_source.route_source, is_container='container', presence=False, yang_name="route-source", rest_name="route-source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source address of route'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_source must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_source.route_source, is_container='container', presence=False, yang_name="route-source", rest_name="route-source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source address of route'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__route_source = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_route_source",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"b... | 71.125 | 34.416667 |
def main():
"""
Example application that opens a serial device and prints messages to the terminal.
"""
try:
# Retrieve the specified serial device.
device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE))
# Set up an event handler and open the device
device.on_message += handle_message
# Override the default SerialDevice baudrate since we're using a USB device
# over serial in this example.
with device.open(baudrate=BAUDRATE):
while True:
time.sleep(1)
except Exception as ex:
print('Exception:', ex) | [
"def",
"main",
"(",
")",
":",
"try",
":",
"# Retrieve the specified serial device.",
"device",
"=",
"AlarmDecoder",
"(",
"SerialDevice",
"(",
"interface",
"=",
"SERIAL_DEVICE",
")",
")",
"# Set up an event handler and open the device",
"device",
".",
"on_message",
"+=",... | 31.894737 | 19.894737 |
def create_tag(self, tag_name):
"""
Create a new tag based on the working tree's revision.
:param tag_name: The name of the tag to create (a string).
"""
# Make sure the local repository exists and supports a working tree.
self.create()
self.ensure_working_tree()
# Create the new tag in the local repository.
logger.info("Creating tag '%s' in %s ..", tag_name, format_path(self.local))
self.context.execute(*self.get_create_tag_command(tag_name)) | [
"def",
"create_tag",
"(",
"self",
",",
"tag_name",
")",
":",
"# Make sure the local repository exists and supports a working tree.",
"self",
".",
"create",
"(",
")",
"self",
".",
"ensure_working_tree",
"(",
")",
"# Create the new tag in the local repository.",
"logger",
"."... | 43.083333 | 20.25 |
def do_status(self, args):
'''print the number of work units in an existing work spec'''
work_spec_name = self._get_work_spec_name(args)
status = self.task_master.status(work_spec_name)
self.stdout.write(json.dumps(status, indent=4, sort_keys=True) +
'\n') | [
"def",
"do_status",
"(",
"self",
",",
"args",
")",
":",
"work_spec_name",
"=",
"self",
".",
"_get_work_spec_name",
"(",
"args",
")",
"status",
"=",
"self",
".",
"task_master",
".",
"status",
"(",
"work_spec_name",
")",
"self",
".",
"stdout",
".",
"write",
... | 51.5 | 19.166667 |
def create_database(self):
""" Create postgres database. """
self.print_message("Creating database '%s'" % self.databases['destination']['name'])
self.export_pgpassword('destination')
args = [
"createdb",
self.databases['destination']['name'],
]
args.extend(self.databases['destination']['args'])
for arg in self.databases['destination']['args']:
if arg[:7] == '--user=':
args.append('--owner=%s' % arg[7:])
subprocess.check_call(args) | [
"def",
"create_database",
"(",
"self",
")",
":",
"self",
".",
"print_message",
"(",
"\"Creating database '%s'\"",
"%",
"self",
".",
"databases",
"[",
"'destination'",
"]",
"[",
"'name'",
"]",
")",
"self",
".",
"export_pgpassword",
"(",
"'destination'",
")",
"a... | 41.461538 | 16 |
def load_foundation_sample_data(fd):
"""
Sample data for the Foundation object
:param fd: Foundation Object
:return:
"""
# foundation
fd.width = 16.0 # m
fd.length = 18.0 # m
fd.depth = 0.0 # m
fd.mass = 0.0 | [
"def",
"load_foundation_sample_data",
"(",
"fd",
")",
":",
"# foundation",
"fd",
".",
"width",
"=",
"16.0",
"# m",
"fd",
".",
"length",
"=",
"18.0",
"# m",
"fd",
".",
"depth",
"=",
"0.0",
"# m",
"fd",
".",
"mass",
"=",
"0.0"
] | 21.818182 | 12.363636 |
def _delete_handler(self, handler_class):
"""Delete a specific handler from our logger."""
to_remove = self._get_handler(handler_class)
if not to_remove:
logging.warning('Error we should have an element to remove')
else:
self.handlers.remove(to_remove)
self.logger.removeHandler(to_remove) | [
"def",
"_delete_handler",
"(",
"self",
",",
"handler_class",
")",
":",
"to_remove",
"=",
"self",
".",
"_get_handler",
"(",
"handler_class",
")",
"if",
"not",
"to_remove",
":",
"logging",
".",
"warning",
"(",
"'Error we should have an element to remove'",
")",
"els... | 43.75 | 12.25 |
def unselectRow(self, row):
'Unselect given row, return True if selected; else return False. O(log n)'
if id(row) in self._selectedRows:
del self._selectedRows[id(row)]
return True
else:
return False | [
"def",
"unselectRow",
"(",
"self",
",",
"row",
")",
":",
"if",
"id",
"(",
"row",
")",
"in",
"self",
".",
"_selectedRows",
":",
"del",
"self",
".",
"_selectedRows",
"[",
"id",
"(",
"row",
")",
"]",
"return",
"True",
"else",
":",
"return",
"False"
] | 36.142857 | 17 |
def get_details(self, obj):
""" return detail url """
return reverse('api_user_social_links_detail',
args=[obj.user.username, obj.pk],
request=self.context.get('request'),
format=self.context.get('format')) | [
"def",
"get_details",
"(",
"self",
",",
"obj",
")",
":",
"return",
"reverse",
"(",
"'api_user_social_links_detail'",
",",
"args",
"=",
"[",
"obj",
".",
"user",
".",
"username",
",",
"obj",
".",
"pk",
"]",
",",
"request",
"=",
"self",
".",
"context",
".... | 47.666667 | 13.166667 |
def delete(adapter, case_obj, update=False, existing_case=False):
"""Delete a case and all of it's variants from the database.
Args:
adapter: Connection to database
case_obj(models.Case)
update(bool): If we are in the middle of an update
existing_case(models.Case): If something failed during an update we need to revert
to the original case
"""
# This will overwrite the updated case with the previous one
if update:
adapter.add_case(existing_case)
else:
adapter.delete_case(case_obj)
for file_type in ['vcf_path','vcf_sv_path']:
if not case_obj.get(file_type):
continue
variant_file = case_obj[file_type]
# Get a cyvcf2.VCF object
vcf_obj = get_vcf(variant_file)
delete_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
) | [
"def",
"delete",
"(",
"adapter",
",",
"case_obj",
",",
"update",
"=",
"False",
",",
"existing_case",
"=",
"False",
")",
":",
"# This will overwrite the updated case with the previous one",
"if",
"update",
":",
"adapter",
".",
"add_case",
"(",
"existing_case",
")",
... | 32 | 17.551724 |
def get_tok(self, tok):
'''
Return the name associated with the token, or False if the token is
not valid
'''
tdata = self.tokens["{0}.get_token".format(self.opts['eauth_tokens'])](self.opts, tok)
if not tdata:
return {}
rm_tok = False
if 'expire' not in tdata:
# invalid token, delete it!
rm_tok = True
if tdata.get('expire', '0') < time.time():
rm_tok = True
if rm_tok:
self.rm_token(tok)
return tdata | [
"def",
"get_tok",
"(",
"self",
",",
"tok",
")",
":",
"tdata",
"=",
"self",
".",
"tokens",
"[",
"\"{0}.get_token\"",
".",
"format",
"(",
"self",
".",
"opts",
"[",
"'eauth_tokens'",
"]",
")",
"]",
"(",
"self",
".",
"opts",
",",
"tok",
")",
"if",
"not... | 28.157895 | 22.263158 |
def build_D3bubbleChart(old, MAX_DEPTH, level=1, toplayer=None):
"""
Similar to standar d3, but nodes with children need to be duplicated otherwise they are
not depicted explicitly but just color coded
"name": "all",
"children": [
{"name": "Biological Science", "size": 9000},
{"name": "Biological Science", "children": [
{"name": "Biological techniques", "size": 6939},
{"name": "Cell biology", "size": 4166},
{"name": "Drug discovery X", "size": 3620, "children": [
{"name": "Biochemistry X", "size": 4585},
{"name": "Biochemistry X", "size": 4585 },
]},
{"name": "Drug discovery Y", "size": 3620, "children": [
{"name": "Biochemistry Y", "size": 4585},
{"name": "Biochemistry Y", "size": 4585 },
]},
{"name": "Drug discovery A", "size": 3620, "children": [
{"name": "Biochemistry A", "size": 4585},
]},
{"name": "Drug discovery B", "size": 3620, },
]},
etc...
"""
out = []
if not old:
old = toplayer
for x in old:
d = {}
# print "*" * level, x.label
d['qname'] = x.qname
d['name'] = x.bestLabel(quotes=False).replace("_", " ")
d['objid'] = x.id
if x.children() and level < MAX_DEPTH:
duplicate_row = {}
duplicate_row['qname'] = x.qname
duplicate_row['name'] = x.bestLabel(quotes=False).replace("_", " ")
duplicate_row['objid'] = x.id
duplicate_row['size'] = len(x.children()) + 5 # fake size
duplicate_row['realsize'] = len(x.children()) # real size
out += [duplicate_row]
d['children'] = build_D3bubbleChart(x.children(), MAX_DEPTH,
level + 1)
else:
d['size'] = 1 # default size
d['realsize'] = 0 # default size
out += [d]
return out | [
"def",
"build_D3bubbleChart",
"(",
"old",
",",
"MAX_DEPTH",
",",
"level",
"=",
"1",
",",
"toplayer",
"=",
"None",
")",
":",
"out",
"=",
"[",
"]",
"if",
"not",
"old",
":",
"old",
"=",
"toplayer",
"for",
"x",
"in",
"old",
":",
"d",
"=",
"{",
"}",
... | 35.705882 | 18.27451 |
def _perform_type_validation(self, path, typ, value, results):
"""
Validates a given value to match specified type.
The type can be defined as a Schema, type, a type name or [[TypeCode]].
When type is a Schema, it executes validation recursively against that Schema.
:param path: a dot notation path to the value.
:param typ: a type to match the value type
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
# If type it not defined then skip
if typ == None:
return
# Perform validation against schema
if isinstance(typ, Schema):
schema = typ
schema._perform_validation(path, value, results)
return
# If value is null then skip
value = ObjectReader.get_value(value)
if value == None:
return
name = path if path != None else "value"
value_type = type(value)
# Match types
if TypeMatcher.match_type(typ, value_type):
return
# Generate type mismatch error
results.append(
ValidationResult(
path,
ValidationResultType.Error,
"TYPE_MISMATCH",
name + " type must be " + self._type_to_string(typ) + " but found " + self._type_to_string(value_type),
typ,
value_type
)
) | [
"def",
"_perform_type_validation",
"(",
"self",
",",
"path",
",",
"typ",
",",
"value",
",",
"results",
")",
":",
"# If type it not defined then skip",
"if",
"typ",
"==",
"None",
":",
"return",
"# Perform validation against schema",
"if",
"isinstance",
"(",
"typ",
... | 31.212766 | 21.085106 |
def get_structure_seqs(self, model):
"""Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.
Args:
model (Model): Biopython Model object of the structure you would like to parse
"""
# Don't overwrite existing ChainProp objects
dont_overwrite = []
chains = list(model.get_chains())
for x in chains:
if self.chains.has_id(x.id):
if self.chains.get_by_id(x.id).seq_record:
dont_overwrite.append(x.id)
if len(dont_overwrite) == len(chains):
log.debug('Not writing structure sequences, already stored')
return
# Returns the structures sequences with Xs added
structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model)
log.debug('{}: gathered chain sequences'.format(self.id))
# Associate with ChainProps
for seq_record in structure_seqs:
log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id))
my_chain = self.chains.get_by_id(seq_record.id)
my_chain.seq_record = seq_record | [
"def",
"get_structure_seqs",
"(",
"self",
",",
"model",
")",
":",
"# Don't overwrite existing ChainProp objects",
"dont_overwrite",
"=",
"[",
"]",
"chains",
"=",
"list",
"(",
"model",
".",
"get_chains",
"(",
")",
")",
"for",
"x",
"in",
"chains",
":",
"if",
"... | 42.035714 | 20.821429 |
def create_local_copy(self, effects=None, store=None):
"""Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project.
"""
effects = self._build_effects(effects)
store = store or ''
data = {
'source': self.cdn_path(effects)
}
if store:
data['store'] = store
return rest_request('POST', 'files/', data=data) | [
"def",
"create_local_copy",
"(",
"self",
",",
"effects",
"=",
"None",
",",
"store",
"=",
"None",
")",
":",
"effects",
"=",
"self",
".",
"_build_effects",
"(",
"effects",
")",
"store",
"=",
"store",
"or",
"''",
"data",
"=",
"{",
"'source'",
":",
"self",... | 37.142857 | 20.761905 |
def get_zone(self, id=None, name=None):
""" Get zone object by name or id.
"""
log.info("Picking zone: %s (%s)" % (name, id))
return self.zones[id or name] | [
"def",
"get_zone",
"(",
"self",
",",
"id",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"\"Picking zone: %s (%s)\"",
"%",
"(",
"name",
",",
"id",
")",
")",
"return",
"self",
".",
"zones",
"[",
"id",
"or",
"name",
"]"
] | 36.6 | 3.6 |
def peng_float(snum):
r"""
Return floating point equivalent of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_float
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_float(peng.peng(1235.6789E3, 3, False))
1236000.0
"""
# This can be coded as peng_mant(snum)*(peng_power(snum)[1]), but the
# "function unrolling" is about 4x faster
snum = snum.rstrip()
power = _SUFFIX_POWER_DICT[" " if snum[-1].isdigit() else snum[-1]]
return float(snum if snum[-1].isdigit() else snum[:-1]) * power | [
"def",
"peng_float",
"(",
"snum",
")",
":",
"# This can be coded as peng_mant(snum)*(peng_power(snum)[1]), but the",
"# \"function unrolling\" is about 4x faster",
"snum",
"=",
"snum",
".",
"rstrip",
"(",
")",
"power",
"=",
"_SUFFIX_POWER_DICT",
"[",
"\" \"",
"if",
"snum",
... | 29.321429 | 23.785714 |
def translate(self, frame=0):
'''Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2'''
return Fasta(self.id, ''.join([genetic_codes.codes[genetic_code].get(self.seq[x:x+3].upper(), 'X') for x in range(frame, len(self)-1-frame, 3)])) | [
"def",
"translate",
"(",
"self",
",",
"frame",
"=",
"0",
")",
":",
"return",
"Fasta",
"(",
"self",
".",
"id",
",",
"''",
".",
"join",
"(",
"[",
"genetic_codes",
".",
"codes",
"[",
"genetic_code",
"]",
".",
"get",
"(",
"self",
".",
"seq",
"[",
"x"... | 106.333333 | 73.666667 |
def update_child_calls(self):
"""Replace child nodes on original function call with their partials"""
for node in filter(lambda n: len(n.arg_name), self.child_list):
self.data["bound_args"].arguments[node.arg_name] = node.partial()
self.updated = True | [
"def",
"update_child_calls",
"(",
"self",
")",
":",
"for",
"node",
"in",
"filter",
"(",
"lambda",
"n",
":",
"len",
"(",
"n",
".",
"arg_name",
")",
",",
"self",
".",
"child_list",
")",
":",
"self",
".",
"data",
"[",
"\"bound_args\"",
"]",
".",
"argume... | 47.166667 | 22 |
def threw(cls, spy, error_type=None):
"""
Checking the inspector is raised error_type
Args: SinonSpy, Exception (defaut: None)
"""
cls.__is_spy(spy)
if not (spy.threw(error_type)):
raise cls.failException(cls.message) | [
"def",
"threw",
"(",
"cls",
",",
"spy",
",",
"error_type",
"=",
"None",
")",
":",
"cls",
".",
"__is_spy",
"(",
"spy",
")",
"if",
"not",
"(",
"spy",
".",
"threw",
"(",
"error_type",
")",
")",
":",
"raise",
"cls",
".",
"failException",
"(",
"cls",
... | 33.75 | 5.75 |
def SignalAbort(self):
"""Signals the process to abort."""
self._abort = True
if self._foreman_status_wait_event:
self._foreman_status_wait_event.set()
if self._analysis_mediator:
self._analysis_mediator.SignalAbort() | [
"def",
"SignalAbort",
"(",
"self",
")",
":",
"self",
".",
"_abort",
"=",
"True",
"if",
"self",
".",
"_foreman_status_wait_event",
":",
"self",
".",
"_foreman_status_wait_event",
".",
"set",
"(",
")",
"if",
"self",
".",
"_analysis_mediator",
":",
"self",
".",... | 34.142857 | 7.428571 |
def ensemble_mean_std_max_min(ens):
"""Calculate ensemble statistics between a results from an ensemble of climate simulations
Returns a dataset containing ensemble mean, standard-deviation,
minimum and maximum for input climate simulations.
Parameters
----------
ens : Ensemble dataset (see xclim.utils.create_ensemble)
Returns
-------
xarray dataset with containing data variables of ensemble statistics
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*tas*.nc')
Create ensemble dataset
>>> ens = utils.create_ensemble(ncfiles)
Calculate ensemble statistics
>>> ens_means_std = utils.ensemble_mean_std_max_min(ens)
>>> print(ens_mean_std['tas_mean'])
"""
dsOut = ens.drop(ens.data_vars)
for v in ens.data_vars:
dsOut[v + '_mean'] = ens[v].mean(dim='realization')
dsOut[v + '_stdev'] = ens[v].std(dim='realization')
dsOut[v + '_max'] = ens[v].max(dim='realization')
dsOut[v + '_min'] = ens[v].min(dim='realization')
for vv in dsOut.data_vars:
dsOut[vv].attrs = ens[v].attrs
if 'description' in dsOut[vv].attrs.keys():
vv.split()
dsOut[vv].attrs['description'] = dsOut[vv].attrs['description'] + ' : ' + vv.split('_')[
-1] + ' of ensemble'
return dsOut | [
"def",
"ensemble_mean_std_max_min",
"(",
"ens",
")",
":",
"dsOut",
"=",
"ens",
".",
"drop",
"(",
"ens",
".",
"data_vars",
")",
"for",
"v",
"in",
"ens",
".",
"data_vars",
":",
"dsOut",
"[",
"v",
"+",
"'_mean'",
"]",
"=",
"ens",
"[",
"v",
"]",
".",
... | 33.341463 | 20.341463 |
def reset(self):
'''
Reset Stan model and all tracked distributions and parameters.
'''
self.parameters = []
self.transformed_parameters = []
self.expressions = []
self.data = []
self.transformed_data = []
self.X = {}
self.model = []
self.mu_cont = []
self.mu_cat = []
self._original_names = {}
# variables to suppress in output. Stan uses limited set for variable
# names, so track variable names we may need to simplify for the model
# code and then sub back later.
self._suppress_vars = ['yhat', 'lp__'] | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"parameters",
"=",
"[",
"]",
"self",
".",
"transformed_parameters",
"=",
"[",
"]",
"self",
".",
"expressions",
"=",
"[",
"]",
"self",
".",
"data",
"=",
"[",
"]",
"self",
".",
"transformed_data",
"="... | 34.722222 | 17.611111 |
def _remote_methodcall(id, method_name, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call method """
obj = distob.engine[id]
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = getattr(obj, method_name)(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result | [
"def",
"_remote_methodcall",
"(",
"id",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"distob",
".",
"engine",
"[",
"id",
"]",
"nargs",
"=",
"[",
"]",
"for",
"a",
"in",
"args",
":",
"if",
"isinstance",
"(",
... | 40.529412 | 15.029412 |
def from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, nproc=1, **kwargs):
"""Calculate the coherence `Spectrogram` between two `TimeSeries`.
Parameters
----------
timeseries : :class:`~gwpy.timeseries.TimeSeries`
input time-series to process.
stride : `float`
number of seconds in single PSD (column of spectrogram).
fftlength : `float`
number of seconds in single FFT.
overlap : `int`, optiona, default: fftlength
number of seconds of overlap between FFTs, defaults to no overlap
window : `timeseries.window.Window`, optional, default: `None`
window function to apply to timeseries prior to FFT.
nproc : `int`, default: ``1``
maximum number of independent frame reading processes, default
is set to single-process file reading.
Returns
-------
spectrogram : :class:`~gwpy.spectrogram.Spectrogram`
time-frequency power spectrogram as generated from the
input time-series.
"""
# format FFT parameters
if fftlength is None:
fftlength = stride / 2.
# get size of spectrogram
nsteps = int(ts1.size // (stride * ts1.sample_rate.value))
nproc = min(nsteps, nproc)
# single-process return
if nsteps == 0 or nproc == 1:
return _from_timeseries(ts1, ts2, stride, fftlength=fftlength,
overlap=overlap, window=window, **kwargs)
# wrap spectrogram generator
def _specgram(queue_, tsa, tsb):
try:
queue_.put(_from_timeseries(tsa, tsb, stride, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs))
except Exception as exc: # pylint: disable=broad-except
queue_.put(exc)
# otherwise build process list
stepperproc = int(ceil(nsteps / nproc))
nsamp = [stepperproc * ts.sample_rate.value * stride for ts in (ts1, ts2)]
queue = ProcessQueue(nproc)
processlist = []
for i in range(nproc):
process = Process(target=_specgram,
args=(queue, ts1[i * nsamp[0]:(i + 1) * nsamp[0]],
ts2[i * nsamp[1]:(i + 1) * nsamp[1]]))
process.daemon = True
processlist.append(process)
process.start()
if ((i + 1) * nsamp[0]) >= ts1.size:
break
# get data
data = []
for process in processlist:
result = queue.get()
if isinstance(result, Exception):
raise result
else:
data.append(result)
# and block
for process in processlist:
process.join()
# format and return
out = SpectrogramList(*data)
out.sort(key=lambda spec: spec.epoch.gps)
return out.join() | [
"def",
"from_timeseries",
"(",
"ts1",
",",
"ts2",
",",
"stride",
",",
"fftlength",
"=",
"None",
",",
"overlap",
"=",
"None",
",",
"window",
"=",
"None",
",",
"nproc",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"# format FFT parameters",
"if",
"fftlen... | 33.987654 | 19.432099 |
def river_flow(self, source, world, river_list, lake_list):
"""simulate fluid dynamics by using starting point and flowing to the
lowest available point"""
current_location = source
path = [source]
# start the flow
while True:
x, y = current_location
# is there a river nearby, flow into it
for dx, dy in DIR_NEIGHBORS:
ax, ay = x + dx, y + dy
if self.wrap:
ax, ay = overflow(ax, world.width), overflow(ay,
world.height)
for river in river_list:
if [ax, ay] in river:
merge = False
for rx, ry in river:
if [ax, ay] == [rx, ry]:
merge = True
path.append([rx, ry])
elif merge:
path.append([rx, ry])
return path # skip the rest, return path
# found a sea?
if world.is_ocean((x, y)):
break
# find our immediate lowest elevation and flow there
quick_section = self.find_quick_path(current_location, world)
if quick_section:
path.append(quick_section)
current_location = quick_section
continue # stop here and enter back into loop
is_wrapped, lower_elevation = self.findLowerElevation(
current_location, world)
if lower_elevation and not is_wrapped:
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
if lower_path:
path += lower_path
current_location = path[-1]
else:
break
elif lower_elevation and is_wrapped:
# TODO: make this more natural
max_radius = 40
cx, cy = current_location
lx, ly = lower_elevation
if x < 0 or y < 0 or x > world.width or y > world.height:
raise Exception(
"BUG: fix me... we shouldn't be here: %s %s" % (
current_location, lower_elevation))
if not in_circle(max_radius, cx, cy, lx, cy):
# are we wrapping on x axis?
if cx - lx < 0:
lx = 0 # move to left edge
nx = world.width - 1 # next step is wrapped around
else:
lx = world.width - 1 # move to right edge
nx = 0 # next step is wrapped around
ly = ny = int((cy + ly) / 2) # move halfway
elif not in_circle(max_radius, cx, cy, cx, ly):
# are we wrapping on y axis?
if cy - ly < 0:
ly = 0 # move to top edge
ny = world.height - 1 # next step is wrapped around
else:
ly = world.height - 1 # move to bottom edge
ny = 0 # next step is wrapped around
lx = nx = int((cx + lx) / 2) # move halfway
else:
raise Exception(
"BUG: fix me... we are not in circle: %s %s" % (
current_location, lower_elevation))
# find our way to the edge
edge_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, [cx, cy], [lx, ly])
if not edge_path:
# can't find another other path, make it a lake
lake_list.append(current_location)
break
path += edge_path # add our newly found path
path.append([nx, ny]) # finally add our overflow to other side
current_location = path[-1]
# find our way to lowest position original found
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
path += lower_path
current_location = path[-1]
else: # can't find any other path, make it a lake
lake_list.append(current_location)
break # end of river
if not world.contains(current_location):
print("Why are we here:", current_location)
return path | [
"def",
"river_flow",
"(",
"self",
",",
"source",
",",
"world",
",",
"river_list",
",",
"lake_list",
")",
":",
"current_location",
"=",
"source",
"path",
"=",
"[",
"source",
"]",
"# start the flow",
"while",
"True",
":",
"x",
",",
"y",
"=",
"current_locatio... | 42.5 | 18.627273 |
def _stable_names(self):
'''
This private method extracts the element names from stable_el.
Note that stable_names is a misnomer as stable_el also contains
unstable element names with a number 999 for the *stable* mass
numbers. (?!??)
'''
stable_names=[]
for i in range(len(self.stable_el)):
stable_names.append(self.stable_el[i][0])
self.stable_names=stable_names | [
"def",
"_stable_names",
"(",
"self",
")",
":",
"stable_names",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stable_el",
")",
")",
":",
"stable_names",
".",
"append",
"(",
"self",
".",
"stable_el",
"[",
"i",
"]",
"[",
"0"... | 36.5 | 21.5 |
def stage_redis(self, variable, data):
"""Stage data in Redis.
Args:
variable (str): The Redis variable name.
data (dict|list|str): The data to store in Redis.
"""
if isinstance(data, int):
data = str(data)
# handle binary
if variable.endswith('Binary'):
try:
data = base64.b64decode(data)
except binascii.Error:
msg = 'The Binary staging data for variable {} is not properly base64 encoded.'
msg = msg.format(variable)
sys.exit(msg)
elif variable.endswith('BinaryArray'):
if isinstance(data, string_types):
data = json.loads(data)
try:
# loop through each entry
decoded_data = []
for d in data:
d_decoded = base64.b64decode(d)
decoded_data.append(d_decoded)
data = decoded_data
except binascii.Error:
msg = 'The BinaryArray staging data for variable {} is not properly base64 encoded.'
msg = msg.format(variable)
sys.exit(msg)
self.log.info(u'[stage] Creating variable {}'.format(variable))
self.tcex.playbook.create(variable, data) | [
"def",
"stage_redis",
"(",
"self",
",",
"variable",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"int",
")",
":",
"data",
"=",
"str",
"(",
"data",
")",
"# handle binary",
"if",
"variable",
".",
"endswith",
"(",
"'Binary'",
")",
":",
"... | 38.294118 | 14.176471 |
def _xy2hash(x, y, dim):
"""Convert (x, y) to hashcode.
Based on the implementation here:
https://en.wikipedia.org/w/index.php?title=Hilbert_curve&oldid=797332503
Pure python implementation.
Parameters:
x: int x value of point [0, dim) in dim x dim coord system
y: int y value of point [0, dim) in dim x dim coord system
dim: int Number of coding points each x, y value can take.
Corresponds to 2^level of the hilbert curve.
Returns:
int: hashcode ∈ [0, dim**2)
"""
d = 0
lvl = dim >> 1
while (lvl > 0):
rx = int((x & lvl) > 0)
ry = int((y & lvl) > 0)
d += lvl * lvl * ((3 * rx) ^ ry)
x, y = _rotate(lvl, x, y, rx, ry)
lvl >>= 1
return d | [
"def",
"_xy2hash",
"(",
"x",
",",
"y",
",",
"dim",
")",
":",
"d",
"=",
"0",
"lvl",
"=",
"dim",
">>",
"1",
"while",
"(",
"lvl",
">",
"0",
")",
":",
"rx",
"=",
"int",
"(",
"(",
"x",
"&",
"lvl",
")",
">",
"0",
")",
"ry",
"=",
"int",
"(",
... | 29.923077 | 21.192308 |
def export(id, local=False, scrub_pii=False):
"""Export data from an experiment."""
print("Preparing to export the data...")
if local:
db_uri = db.db_url
else:
db_uri = HerokuApp(id).db_uri
# Create the data package if it doesn't already exist.
subdata_path = os.path.join("data", id, "data")
try:
os.makedirs(subdata_path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(subdata_path):
raise
# Copy in the data.
copy_db_to_csv(db_uri, subdata_path, scrub_pii=scrub_pii)
# Copy the experiment code into a code/ subdirectory.
try:
shutil.copyfile(
os.path.join("snapshots", id + "-code.zip"),
os.path.join("data", id, id + "-code.zip"),
)
except Exception:
pass
# Copy in the DATA readme.
# open(os.path.join(id, "README.txt"), "a").close()
# Save the experiment id.
with open(os.path.join("data", id, "experiment_id.md"), "a+") as file:
file.write(id)
# Zip data
src = os.path.join("data", id)
dst = os.path.join("data", id + "-data.zip")
archive_data(id, src, dst)
cwd = os.getcwd()
data_filename = "{}-data.zip".format(id)
path_to_data = os.path.join(cwd, "data", data_filename)
# Backup data on S3 unless run locally
if not local:
bucket = user_s3_bucket()
bucket.upload_file(path_to_data, data_filename)
url = _generate_s3_url(bucket, data_filename)
# Register experiment UUID with dallinger
register(id, url)
return path_to_data | [
"def",
"export",
"(",
"id",
",",
"local",
"=",
"False",
",",
"scrub_pii",
"=",
"False",
")",
":",
"print",
"(",
"\"Preparing to export the data...\"",
")",
"if",
"local",
":",
"db_uri",
"=",
"db",
".",
"db_url",
"else",
":",
"db_uri",
"=",
"HerokuApp",
"... | 26.87931 | 21.965517 |
def SetCredentials(api_username,api_passwd):
"""Establish API username and password associated with APIv2 commands."""
global V2_API_USERNAME
global V2_API_PASSWD
global _V2_ENABLED
_V2_ENABLED = True
V2_API_USERNAME = api_username
V2_API_PASSWD = api_passwd | [
"def",
"SetCredentials",
"(",
"api_username",
",",
"api_passwd",
")",
":",
"global",
"V2_API_USERNAME",
"global",
"V2_API_PASSWD",
"global",
"_V2_ENABLED",
"_V2_ENABLED",
"=",
"True",
"V2_API_USERNAME",
"=",
"api_username",
"V2_API_PASSWD",
"=",
"api_passwd"
] | 32.25 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.