text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
Arguments:
code: code object.
Yields:
Address and line number pairs.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno) | [
"def",
"_findlinestarts",
"(",
"code",
")",
":",
"byte_increments",
"=",
"[",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"code",
".",
"co_lnotab",
"[",
"0",
":",
":",
"2",
"]",
"]",
"line_increments",
"=",
"[",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"code",
".",
"co_lnotab",
"[",
"1",
":",
":",
"2",
"]",
"]",
"lastlineno",
"=",
"None",
"lineno",
"=",
"code",
".",
"co_firstlineno",
"addr",
"=",
"0",
"for",
"byte_incr",
",",
"line_incr",
"in",
"zip",
"(",
"byte_increments",
",",
"line_increments",
")",
":",
"if",
"byte_incr",
":",
"if",
"lineno",
"!=",
"lastlineno",
":",
"yield",
"(",
"addr",
",",
"lineno",
")",
"lastlineno",
"=",
"lineno",
"addr",
"+=",
"byte_incr",
"lineno",
"+=",
"line_incr",
"if",
"lineno",
"!=",
"lastlineno",
":",
"yield",
"(",
"addr",
",",
"lineno",
")"
] | 26.615385 | 20.423077 |
def image_as_moving_sequence(
image, sequence_length=20, output_size=(64, 64), velocity=0.1,
start_position=None):
"""Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.compat.v1.enable_eager_execution()
def animate(sequence):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sequence = np.squeeze(sequence, axis=-1)
fig = plt.figure()
plt.axis("off")
ims = [[plt.imshow(im, cmap="gray", animated=True)] for im in sequence]
# don't remove `anim =` as linter may suggets
# weird behaviour, plot will freeze on last frame
anim = animation.ArtistAnimation(
fig, ims, interval=50, blit=True, repeat_delay=100)
plt.show()
plt.close()
tf.enable_eager_execution()
mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
mnist_ds = mnist_ds.repeat().shuffle(1024)
def map_fn(image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(
lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))
# # for comparison with test data provided by original authors
# moving_mnist_ds = tfds.load("moving_mnist", split=tfds.Split.TEST)
for seq in moving_mnist_ds:
animate(seq["image_sequence"].numpy())
```
Args:
image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing
around.
sequence_length: int, length of sequence.
output_size: (out_h, out_w) size returned images.
velocity: scalar speed or 2D velocity of image. If scalar, the 2D
velocity is randomly generated with this magnitude. This is the
normalized distance moved each time step by the sub-image, where
normalization occurs over the feasible distance the sub-image can move
e.g if the input image is [10 x 10] and the output image is [60 x 60],
a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per
time step.
start_position: 2D float32 normalized initial position of each
image in [0, 1]. Randomized uniformly if not given.
Returns:
`MovingSequence` namedtuple containing:
`image_sequence`:
[sequence_length, out_h, out_w, n_channels] image at each time step.
padded values are all zero. Same dtype as input image.
`trajectory`: [sequence_length, 2] float32 in [0, 1]
2D normalized coordinates of the image at every time step.
`start_position`: 2D float32 initial position in [0, 1].
2D normalized initial position of image. Same as input if provided,
otherwise the randomly value generated.
`velocity`: 2D float32 normalized velocity. Same as input velocity
if provided as a 2D tensor, otherwise the random velocity generated.
"""
ndims = 2
image = tf.convert_to_tensor(image)
if image.shape.ndims != 3:
raise ValueError("image must be rank 3, got %s" % str(image))
output_size = tf.TensorShape(output_size)
if len(output_size) != ndims:
raise ValueError("output_size must have exactly %d elements, got %s"
% (ndims, output_size))
image_shape = tf.shape(image)
if start_position is None:
start_position = tf.random.uniform((ndims,), dtype=tf.float32)
elif start_position.shape != (ndims,):
raise ValueError("start_positions must (%d,)" % ndims)
velocity = tf.convert_to_tensor(velocity, dtype=tf.float32)
if velocity.shape.ndims == 0:
velocity = _get_random_unit_vector(ndims, tf.float32) * velocity
elif velocity.shape.ndims != 1:
raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity)
t = tf.range(sequence_length, dtype=tf.float32)
trajectory = _get_linear_trajectory(start_position, velocity, t)
trajectory = _bounce_to_bbox(trajectory)
total_padding = output_size - image_shape[:2]
if not tf.executing_eagerly():
cond = tf.compat.v1.assert_greater(total_padding, -1)
with tf.control_dependencies([cond]):
total_padding = tf.identity(total_padding)
sequence_pad_lefts = tf.cast(
tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32)
sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding)
sequence.set_shape(
[sequence_length] + output_size.as_list() + [image.shape[-1]])
return MovingSequence(
image_sequence=sequence,
trajectory=trajectory,
start_position=start_position,
velocity=velocity) | [
"def",
"image_as_moving_sequence",
"(",
"image",
",",
"sequence_length",
"=",
"20",
",",
"output_size",
"=",
"(",
"64",
",",
"64",
")",
",",
"velocity",
"=",
"0.1",
",",
"start_position",
"=",
"None",
")",
":",
"ndims",
"=",
"2",
"image",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"image",
")",
"if",
"image",
".",
"shape",
".",
"ndims",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"image must be rank 3, got %s\"",
"%",
"str",
"(",
"image",
")",
")",
"output_size",
"=",
"tf",
".",
"TensorShape",
"(",
"output_size",
")",
"if",
"len",
"(",
"output_size",
")",
"!=",
"ndims",
":",
"raise",
"ValueError",
"(",
"\"output_size must have exactly %d elements, got %s\"",
"%",
"(",
"ndims",
",",
"output_size",
")",
")",
"image_shape",
"=",
"tf",
".",
"shape",
"(",
"image",
")",
"if",
"start_position",
"is",
"None",
":",
"start_position",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"(",
"ndims",
",",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"elif",
"start_position",
".",
"shape",
"!=",
"(",
"ndims",
",",
")",
":",
"raise",
"ValueError",
"(",
"\"start_positions must (%d,)\"",
"%",
"ndims",
")",
"velocity",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"velocity",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"if",
"velocity",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"velocity",
"=",
"_get_random_unit_vector",
"(",
"ndims",
",",
"tf",
".",
"float32",
")",
"*",
"velocity",
"elif",
"velocity",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"velocity must be rank 0 or rank 1, got %s\"",
"%",
"velocity",
")",
"t",
"=",
"tf",
".",
"range",
"(",
"sequence_length",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"trajectory",
"=",
"_get_linear_trajectory",
"(",
"start_position",
",",
"velocity",
",",
"t",
")",
"trajectory",
"=",
"_bounce_to_bbox",
"(",
"trajectory",
")",
"total_padding",
"=",
"output_size",
"-",
"image_shape",
"[",
":",
"2",
"]",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"cond",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"(",
"total_padding",
",",
"-",
"1",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"cond",
"]",
")",
":",
"total_padding",
"=",
"tf",
".",
"identity",
"(",
"total_padding",
")",
"sequence_pad_lefts",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"math",
".",
"round",
"(",
"trajectory",
"*",
"tf",
".",
"cast",
"(",
"total_padding",
",",
"tf",
".",
"float32",
")",
")",
",",
"tf",
".",
"int32",
")",
"sequence",
"=",
"_create_moving_sequence",
"(",
"image",
",",
"sequence_pad_lefts",
",",
"total_padding",
")",
"sequence",
".",
"set_shape",
"(",
"[",
"sequence_length",
"]",
"+",
"output_size",
".",
"as_list",
"(",
")",
"+",
"[",
"image",
".",
"shape",
"[",
"-",
"1",
"]",
"]",
")",
"return",
"MovingSequence",
"(",
"image_sequence",
"=",
"sequence",
",",
"trajectory",
"=",
"trajectory",
",",
"start_position",
"=",
"start_position",
",",
"velocity",
"=",
"velocity",
")"
] | 39.283333 | 21.083333 |
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True | [
"def",
"_EnsureFileExists",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_filename",
")",
":",
"old_umask",
"=",
"os",
".",
"umask",
"(",
"0o177",
")",
"try",
":",
"open",
"(",
"self",
".",
"_filename",
",",
"'a+b'",
")",
".",
"close",
"(",
")",
"except",
"OSError",
":",
"return",
"False",
"finally",
":",
"os",
".",
"umask",
"(",
"old_umask",
")",
"return",
"True"
] | 34.454545 | 11.363636 |
def _compute_filename(self, request: BaseRequest):
'''Get the appropriate filename from the request.'''
path = self._path_namer.get_filename(request.url_info)
if os.path.isdir(path):
path += '.f'
else:
dir_name, name = os.path.split(path)
path = os.path.join(anti_clobber_dir_path(dir_name), name)
return path | [
"def",
"_compute_filename",
"(",
"self",
",",
"request",
":",
"BaseRequest",
")",
":",
"path",
"=",
"self",
".",
"_path_namer",
".",
"get_filename",
"(",
"request",
".",
"url_info",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"path",
"+=",
"'.f'",
"else",
":",
"dir_name",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"anti_clobber_dir_path",
"(",
"dir_name",
")",
",",
"name",
")",
"return",
"path"
] | 34.272727 | 22.090909 |
def unregister_service(self, registration):
# type: (ServiceRegistration) -> bool
"""
Unregisters the given service
:param registration: A ServiceRegistration to the service to unregister
:raise BundleException: Invalid reference
"""
# Get the Service Reference
reference = registration.get_reference()
# Remove the service from the registry
svc_instance = self._registry.unregister(reference)
# Keep a track of the unregistering reference
self.__unregistering_services[reference] = svc_instance
# Call the listeners
event = ServiceEvent(ServiceEvent.UNREGISTERING, reference)
self._dispatcher.fire_service_event(event)
# Update the bundle registration information
bundle = reference.get_bundle()
bundle._unregistered_service(registration)
# Remove the unregistering reference
del self.__unregistering_services[reference]
return True | [
"def",
"unregister_service",
"(",
"self",
",",
"registration",
")",
":",
"# type: (ServiceRegistration) -> bool",
"# Get the Service Reference",
"reference",
"=",
"registration",
".",
"get_reference",
"(",
")",
"# Remove the service from the registry",
"svc_instance",
"=",
"self",
".",
"_registry",
".",
"unregister",
"(",
"reference",
")",
"# Keep a track of the unregistering reference",
"self",
".",
"__unregistering_services",
"[",
"reference",
"]",
"=",
"svc_instance",
"# Call the listeners",
"event",
"=",
"ServiceEvent",
"(",
"ServiceEvent",
".",
"UNREGISTERING",
",",
"reference",
")",
"self",
".",
"_dispatcher",
".",
"fire_service_event",
"(",
"event",
")",
"# Update the bundle registration information",
"bundle",
"=",
"reference",
".",
"get_bundle",
"(",
")",
"bundle",
".",
"_unregistered_service",
"(",
"registration",
")",
"# Remove the unregistering reference",
"del",
"self",
".",
"__unregistering_services",
"[",
"reference",
"]",
"return",
"True"
] | 35 | 17.214286 |
def zip_dir(path, zip_handler, include_dir=True, use_arc_name=False):
"""
zip all files and items in dir
:param only_init:
:param path:
:param zip_handler: zip file handler
:param boolean include_dir: specify if we want the archive with or without the directory
"""
for root, dirs, files in os.walk(path):
for file_to_zip in files:
filename = os.path.join(root, file_to_zip)
zip_con = filename.replace('\\', '/')
if zip_con in zip_handler.namelist():
continue
add_file(filename, zip_handler, include_dir, use_arc_name) | [
"def",
"zip_dir",
"(",
"path",
",",
"zip_handler",
",",
"include_dir",
"=",
"True",
",",
"use_arc_name",
"=",
"False",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"file_to_zip",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file_to_zip",
")",
"zip_con",
"=",
"filename",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"if",
"zip_con",
"in",
"zip_handler",
".",
"namelist",
"(",
")",
":",
"continue",
"add_file",
"(",
"filename",
",",
"zip_handler",
",",
"include_dir",
",",
"use_arc_name",
")"
] | 33.777778 | 18.777778 |
def namespace(sharing=None, owner=None, app=None, **kwargs):
"""This function constructs a Splunk namespace.
Every Splunk resource belongs to a namespace. The namespace is specified by
the pair of values ``owner`` and ``app`` and is governed by a ``sharing`` mode.
The possible values for ``sharing`` are: "user", "app", "global" and "system",
which map to the following combinations of ``owner`` and ``app`` values:
"user" => {owner}, {app}
"app" => nobody, {app}
"global" => nobody, {app}
"system" => nobody, system
"nobody" is a special user name that basically means no user, and "system"
is the name reserved for system resources.
"-" is a wildcard that can be used for both ``owner`` and ``app`` values and
refers to all users and all apps, respectively.
In general, when you specify a namespace you can specify any combination of
these three values and the library will reconcile the triple, overriding the
provided values as appropriate.
Finally, if no namespacing is specified the library will make use of the
``/services`` branch of the REST API, which provides a namespaced view of
Splunk resources equivelent to using ``owner={currentUser}`` and
``app={defaultApp}``.
The ``namespace`` function returns a representation of the namespace from
reconciling the values you provide. It ignores any keyword arguments other
than ``owner``, ``app``, and ``sharing``, so you can provide ``dicts`` of
configuration information without first having to extract individual keys.
:param sharing: The sharing mode (the default is "user").
:type sharing: "system", "global", "app", or "user"
:param owner: The owner context (the default is "None").
:type owner: ``string``
:param app: The app context (the default is "None").
:type app: ``string``
:returns: A :class:`splunklib.data.Record` containing the reconciled
namespace.
**Example**::
import splunklib.binding as binding
n = binding.namespace(sharing="user", owner="boris", app="search")
n = binding.namespace(sharing="global", app="search")
"""
if sharing in ["system"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': "system" })
if sharing in ["global", "app"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': app})
if sharing in ["user", None]:
return record({'sharing': sharing, 'owner': owner, 'app': app})
raise ValueError("Invalid value for argument: 'sharing'") | [
"def",
"namespace",
"(",
"sharing",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"app",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"sharing",
"in",
"[",
"\"system\"",
"]",
":",
"return",
"record",
"(",
"{",
"'sharing'",
":",
"sharing",
",",
"'owner'",
":",
"\"nobody\"",
",",
"'app'",
":",
"\"system\"",
"}",
")",
"if",
"sharing",
"in",
"[",
"\"global\"",
",",
"\"app\"",
"]",
":",
"return",
"record",
"(",
"{",
"'sharing'",
":",
"sharing",
",",
"'owner'",
":",
"\"nobody\"",
",",
"'app'",
":",
"app",
"}",
")",
"if",
"sharing",
"in",
"[",
"\"user\"",
",",
"None",
"]",
":",
"return",
"record",
"(",
"{",
"'sharing'",
":",
"sharing",
",",
"'owner'",
":",
"owner",
",",
"'app'",
":",
"app",
"}",
")",
"raise",
"ValueError",
"(",
"\"Invalid value for argument: 'sharing'\"",
")"
] | 43.62069 | 26.793103 |
def _parse(self,filename):
"""
Reads an isochrone in the old Padova format (Girardi 2002,
Marigo 2008) and determines the age (log10 yrs and Gyr),
metallicity (Z and [Fe/H]), and creates arrays with the
initial stellar mass and corresponding magnitudes for each
step along the isochrone.
http://stev.oapd.inaf.it/cgi-bin/cmd
"""
try:
columns = self.columns[self.survey.lower()]
except KeyError as e:
logger.warning('did not recognize survey %s'%(survey))
raise(e)
kwargs = dict(delimiter='\t',usecols=list(columns.keys()),dtype=list(columns.values()))
self.data = np.genfromtxt(filename,**kwargs)
self.mass_init = self.data['mass_init']
self.mass_act = self.data['mass_act']
self.luminosity = 10**self.data['log_lum']
self.mag_1 = self.data[self.band_1]
self.mag_2 = self.data[self.band_2]
self.stage = np.char.array(self.data['stage']).strip()
for i,s in enumerate(self.stage):
if i>0 and s=='' and self.stage[i-1]!='':
self.stage[i] = self.stage[i-1]
# Check where post-AGB isochrone data points begin
self.mass_init_upper_bound = np.max(self.mass_init)
if np.any(self.stage == 'LTP'):
self.index = np.nonzero(self.stage == 'LTP')[0][0]
else:
self.index = len(self.mass_init)
self.mag = self.mag_1 if self.band_1_detection else self.mag_2
self.color = self.mag_1 - self.mag_2 | [
"def",
"_parse",
"(",
"self",
",",
"filename",
")",
":",
"try",
":",
"columns",
"=",
"self",
".",
"columns",
"[",
"self",
".",
"survey",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'did not recognize survey %s'",
"%",
"(",
"survey",
")",
")",
"raise",
"(",
"e",
")",
"kwargs",
"=",
"dict",
"(",
"delimiter",
"=",
"'\\t'",
",",
"usecols",
"=",
"list",
"(",
"columns",
".",
"keys",
"(",
")",
")",
",",
"dtype",
"=",
"list",
"(",
"columns",
".",
"values",
"(",
")",
")",
")",
"self",
".",
"data",
"=",
"np",
".",
"genfromtxt",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"mass_init",
"=",
"self",
".",
"data",
"[",
"'mass_init'",
"]",
"self",
".",
"mass_act",
"=",
"self",
".",
"data",
"[",
"'mass_act'",
"]",
"self",
".",
"luminosity",
"=",
"10",
"**",
"self",
".",
"data",
"[",
"'log_lum'",
"]",
"self",
".",
"mag_1",
"=",
"self",
".",
"data",
"[",
"self",
".",
"band_1",
"]",
"self",
".",
"mag_2",
"=",
"self",
".",
"data",
"[",
"self",
".",
"band_2",
"]",
"self",
".",
"stage",
"=",
"np",
".",
"char",
".",
"array",
"(",
"self",
".",
"data",
"[",
"'stage'",
"]",
")",
".",
"strip",
"(",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"self",
".",
"stage",
")",
":",
"if",
"i",
">",
"0",
"and",
"s",
"==",
"''",
"and",
"self",
".",
"stage",
"[",
"i",
"-",
"1",
"]",
"!=",
"''",
":",
"self",
".",
"stage",
"[",
"i",
"]",
"=",
"self",
".",
"stage",
"[",
"i",
"-",
"1",
"]",
"# Check where post-AGB isochrone data points begin",
"self",
".",
"mass_init_upper_bound",
"=",
"np",
".",
"max",
"(",
"self",
".",
"mass_init",
")",
"if",
"np",
".",
"any",
"(",
"self",
".",
"stage",
"==",
"'LTP'",
")",
":",
"self",
".",
"index",
"=",
"np",
".",
"nonzero",
"(",
"self",
".",
"stage",
"==",
"'LTP'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"self",
".",
"index",
"=",
"len",
"(",
"self",
".",
"mass_init",
")",
"self",
".",
"mag",
"=",
"self",
".",
"mag_1",
"if",
"self",
".",
"band_1_detection",
"else",
"self",
".",
"mag_2",
"self",
".",
"color",
"=",
"self",
".",
"mag_1",
"-",
"self",
".",
"mag_2"
] | 41.675676 | 17.297297 |
def _Fierz_to_Bern_III_IV_V(Fqqqq, qqqq):
"""From Fierz to 4-quark Bern basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc."""
# 2nd != 4th, color-octet redundant
if qqqq in ['sbss', 'dbdd', 'dbds', 'sbsd', 'bsbd', 'dsdd']:
return {
'1' + qqqq : -Fqqqq['F' + qqqq + '1'] / 3
+ 4 * Fqqqq['F' + qqqq + '3'] / 3,
'3' + qqqq : Fqqqq['F' + qqqq + '1'] / 12 - Fqqqq['F' + qqqq + '3'] / 12,
'5' + qqqq : -Fqqqq['F' + qqqq + '5p'] / 3
+ 4 * Fqqqq['F' + qqqq + '7p'] / 3,
'7' + qqqq : Fqqqq['F' + qqqq + '5p'] / 3 - Fqqqq['F' + qqqq + '7p'] / 3
+ Fqqqq['F' + qqqq + '9p'],
'9' + qqqq : Fqqqq['F' + qqqq + '5p'] / 48
- Fqqqq['F' + qqqq + '7p'] / 48,
'1p' + qqqq : -Fqqqq['F' + qqqq + '1p'] / 3
+ 4 * Fqqqq['F' + qqqq + '3p'] / 3,
'3p' + qqqq : Fqqqq['F' + qqqq + '1p'] / 12
- Fqqqq['F' + qqqq + '3p'] / 12,
'5p' + qqqq : -Fqqqq['F' + qqqq + '5'] / 3
+ 4 * Fqqqq['F' + qqqq + '7'] / 3,
'7p' + qqqq : Fqqqq['F' + qqqq + '5'] / 3 - Fqqqq['F' + qqqq + '7'] / 3
+ Fqqqq['F' + qqqq + '9'],
'9p' + qqqq : Fqqqq['F' + qqqq + '5'] / 48
- Fqqqq['F' + qqqq + '7'] / 48
}
if qqqq in ['dbbb', 'sbbb', 'dsss']: # 2nd = 4th, color-octet redundant
return {
'1' + qqqq : -Fqqqq['F' + qqqq + '1'] / 3
+ 4 * Fqqqq['F' + qqqq + '3'] / 3,
'3' + qqqq : Fqqqq['F' + qqqq + '1'] / 12 - Fqqqq['F' + qqqq + '3'] / 12,
'5' + qqqq : -Fqqqq['F' + qqqq + '5'] / 3
+ 4 * Fqqqq['F' + qqqq + '7'] / 3,
'7' + qqqq : Fqqqq['F' + qqqq + '5'] / 3 - Fqqqq['F' + qqqq + '7'] / 3
+ Fqqqq['F' + qqqq + '9'],
'9' + qqqq : Fqqqq['F' + qqqq + '5'] / 48
- Fqqqq['F' + qqqq + '7'] / 48,
'1p' + qqqq : -Fqqqq['F' + qqqq + '1p'] / 3
+ 4 * Fqqqq['F' + qqqq + '3p'] / 3,
'3p' + qqqq : Fqqqq['F' + qqqq + '1p'] / 12
- Fqqqq['F' + qqqq + '3p'] / 12,
'5p' + qqqq : -Fqqqq['F' + qqqq + '5p'] / 3
+ 4 * Fqqqq['F' + qqqq + '7p'] / 3,
'7p' + qqqq : Fqqqq['F' + qqqq + '5p'] / 3 - Fqqqq['F' + qqqq + '7p'] / 3
+ Fqqqq['F' + qqqq + '9p'],
'9p' + qqqq : Fqqqq['F' + qqqq + '5p'] / 48
- Fqqqq['F' + qqqq + '7p'] / 48
}
# generic case
if qqqq in ['sbuu', 'sbdd', 'sbuu', 'sbuc', 'sbcu', 'sbcc',
'dbuu', 'dbss', 'dbuu', 'dbuc', 'dbcu', 'dbcc',
'dsuu', 'dsbb', 'dsuu', 'dsuc', 'dscu', 'dscc',]:
return {
'1'+qqqq : -Fqqqq['F' + qqqq + '1']/3 + 4 * Fqqqq['F' + qqqq + '3'] / 3
- Fqqqq['F' + qqqq + '2']/(3 * Nc)
+ 4 * Fqqqq['F' + qqqq + '4'] / (3 * Nc),
'2'+qqqq : -2 * Fqqqq['F' + qqqq + '2'] / 3
+ 8 * Fqqqq['F' + qqqq + '4'] / 3,
'3'+qqqq : Fqqqq['F' + qqqq + '1'] / 12
- Fqqqq['F' + qqqq + '3'] / 12
+ Fqqqq['F' + qqqq + '2'] / (12 * Nc)
- Fqqqq['F' + qqqq + '4'] / (12 * Nc),
'4'+ qqqq : Fqqqq['F' + qqqq + '2'] / 6 - Fqqqq['F' + qqqq + '4'] / 6,
'5'+ qqqq : -Fqqqq['F' + qqqq + '5'] / 3
+ 4 * Fqqqq['F' + qqqq + '7'] / 3
- Fqqqq['F' + qqqq + '6']/(3 * Nc)
+ 4 * Fqqqq['F' + qqqq + '8']/(3 * Nc),
'6'+qqqq : -2 * Fqqqq['F' + qqqq + '6'] / 3
+ 8 * Fqqqq['F' + qqqq + '8'] / 3,
'7'+qqqq : Fqqqq['F' + qqqq + '5'] / 3 - Fqqqq['F' + qqqq + '7'] / 3
+ Fqqqq['F' + qqqq + '9'] + Fqqqq['F' + qqqq + '10'] / Nc
+ Fqqqq['F' + qqqq + '6']/(3 * Nc)
- Fqqqq['F' + qqqq + '8']/(3 * Nc),
'8'+qqqq : 2*Fqqqq['F' + qqqq + '10'] + 2 * Fqqqq['F' + qqqq + '6'] / 3
-2 * Fqqqq['F' + qqqq + '8'] / 3,
'9'+qqqq : Fqqqq['F' + qqqq + '5'] / 48 - Fqqqq['F' + qqqq + '7'] / 48
+ Fqqqq['F' + qqqq + '6'] / (48 * Nc)
- Fqqqq['F' + qqqq + '8'] / (48 * Nc),
'10'+qqqq : Fqqqq['F' + qqqq + '6'] / 24 - Fqqqq['F' + qqqq + '8'] / 24,
'1p'+qqqq : -Fqqqq['F' + qqqq + '1p'] / 3
+ 4 * Fqqqq['F' + qqqq + '3p'] / 3
- Fqqqq['F' + qqqq + '2p'] / (3 * Nc)
+ 4 * Fqqqq['F' + qqqq + '4p'] / (3 * Nc),
'2p'+qqqq : -2 * Fqqqq['F' + qqqq + '2p'] / 3
+ 8 * Fqqqq['F' + qqqq + '4p'] / 3,
'3p'+qqqq : Fqqqq['F' + qqqq + '1p'] / 12
- Fqqqq['F' + qqqq + '3p'] / 12
+ Fqqqq['F' + qqqq + '2p'] / (12 * Nc)
- Fqqqq['F' + qqqq + '4p'] / (12 * Nc),
'4p'+qqqq : Fqqqq['F' + qqqq + '2p'] / 6 - Fqqqq['F' + qqqq + '4p'] / 6,
'5p'+qqqq : -Fqqqq['F' + qqqq + '5p'] / 3
+ 4 * Fqqqq['F' + qqqq + '7p'] / 3
- Fqqqq['F' + qqqq + '6p'] / (3 * Nc)
+ 4 * Fqqqq['F' + qqqq + '8p'] / (3 * Nc),
'6p'+qqqq : -2 * Fqqqq['F' + qqqq + '6p'] / 3
+ 8 * Fqqqq['F' + qqqq + '8p'] / 3,
'7p'+qqqq : Fqqqq['F' + qqqq + '5p'] / 3 - Fqqqq['F' + qqqq + '7p'] / 3
+ Fqqqq['F' + qqqq + '9p'] + Fqqqq['F' + qqqq + '10p'] / Nc
+ Fqqqq['F' + qqqq + '6p']/(3 * Nc)
- Fqqqq['F' + qqqq + '8p']/(3 * Nc),
'8p'+qqqq : 2 * Fqqqq['F' + qqqq + '10p']
+ 2 * Fqqqq['F' + qqqq + '6p'] / 3
- 2 * Fqqqq['F' + qqqq + '8p'] / 3,
'9p'+qqqq : Fqqqq['F' + qqqq + '5p'] / 48
- Fqqqq['F' + qqqq + '7p'] / 48
+ Fqqqq['F' + qqqq + '6p'] / (48 * Nc)
- Fqqqq['F' + qqqq + '8p'] / (48 * Nc),
'10p'+qqqq : Fqqqq['F' + qqqq + '6p'] / 24
- Fqqqq['F' + qqqq + '8p'] / 24
}
raise ValueError("Case not implemented: {}".format(qqqq)) | [
"def",
"_Fierz_to_Bern_III_IV_V",
"(",
"Fqqqq",
",",
"qqqq",
")",
":",
"# 2nd != 4th, color-octet redundant",
"if",
"qqqq",
"in",
"[",
"'sbss'",
",",
"'dbdd'",
",",
"'dbds'",
",",
"'sbsd'",
",",
"'bsbd'",
",",
"'dsdd'",
"]",
":",
"return",
"{",
"'1'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"3",
",",
"'3'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"12",
",",
"'5'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
",",
"'7'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9p'",
"]",
",",
"'9'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"48",
",",
"'1p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"3",
",",
"'3p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"12",
",",
"'5p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
",",
"'7p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9'",
"]",
",",
"'9p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"48",
"}",
"if",
"qqqq",
"in",
"[",
"'dbbb'",
",",
"'sbbb'",
",",
"'dsss'",
"]",
":",
"# 2nd = 4th, color-octet redundant",
"return",
"{",
"'1'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"3",
",",
"'3'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"12",
",",
"'5'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
",",
"'7'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9'",
"]",
",",
"'9'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"48",
",",
"'1p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"3",
",",
"'3p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"12",
",",
"'5p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
",",
"'7p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9p'",
"]",
",",
"'9p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"48",
"}",
"# generic case",
"if",
"qqqq",
"in",
"[",
"'sbuu'",
",",
"'sbdd'",
",",
"'sbuu'",
",",
"'sbuc'",
",",
"'sbcu'",
",",
"'sbcc'",
",",
"'dbuu'",
",",
"'dbss'",
",",
"'dbuu'",
",",
"'dbuc'",
",",
"'dbcu'",
",",
"'dbcc'",
",",
"'dsuu'",
",",
"'dsbb'",
",",
"'dsuu'",
",",
"'dsuc'",
",",
"'dscu'",
",",
"'dscc'",
",",
"]",
":",
"return",
"{",
"'1'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'2'",
"+",
"qqqq",
":",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2'",
"]",
"/",
"3",
"+",
"8",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4'",
"]",
"/",
"3",
",",
"'3'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3'",
"]",
"/",
"12",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2'",
"]",
"/",
"(",
"12",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4'",
"]",
"/",
"(",
"12",
"*",
"Nc",
")",
",",
"'4'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2'",
"]",
"/",
"6",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4'",
"]",
"/",
"6",
",",
"'5'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'6'",
"+",
"qqqq",
":",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"3",
"+",
"8",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"3",
",",
"'7'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9'",
"]",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'10'",
"]",
"/",
"Nc",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'8'",
"+",
"qqqq",
":",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'10'",
"]",
"+",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"3",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"3",
",",
"'9'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7'",
"]",
"/",
"48",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"(",
"48",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"(",
"48",
"*",
"Nc",
")",
",",
"'10'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6'",
"]",
"/",
"24",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8'",
"]",
"/",
"24",
",",
"'1p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'2p'",
"+",
"qqqq",
":",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2p'",
"]",
"/",
"3",
"+",
"8",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4p'",
"]",
"/",
"3",
",",
"'3p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'1p'",
"]",
"/",
"12",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'3p'",
"]",
"/",
"12",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2p'",
"]",
"/",
"(",
"12",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4p'",
"]",
"/",
"(",
"12",
"*",
"Nc",
")",
",",
"'4p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'2p'",
"]",
"/",
"6",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'4p'",
"]",
"/",
"6",
",",
"'5p'",
"+",
"qqqq",
":",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"+",
"4",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'6p'",
"+",
"qqqq",
":",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"3",
"+",
"8",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"3",
",",
"'7p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"3",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"3",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'9p'",
"]",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'10p'",
"]",
"/",
"Nc",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"(",
"3",
"*",
"Nc",
")",
",",
"'8p'",
"+",
"qqqq",
":",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'10p'",
"]",
"+",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"3",
"-",
"2",
"*",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"3",
",",
"'9p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'5p'",
"]",
"/",
"48",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'7p'",
"]",
"/",
"48",
"+",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"(",
"48",
"*",
"Nc",
")",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"(",
"48",
"*",
"Nc",
")",
",",
"'10p'",
"+",
"qqqq",
":",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'6p'",
"]",
"/",
"24",
"-",
"Fqqqq",
"[",
"'F'",
"+",
"qqqq",
"+",
"'8p'",
"]",
"/",
"24",
"}",
"raise",
"ValueError",
"(",
"\"Case not implemented: {}\"",
".",
"format",
"(",
"qqqq",
")",
")"
] | 54.460177 | 17.849558 |
def preprocess_search_hit(pid, record_hit, links_factory=None, **kwargs):
"""Prepare a record hit from Elasticsearch for serialization."""
links_factory = links_factory or (lambda x, **k: dict())
record = dict(
pid=pid,
metadata=record_hit['_source'],
links=links_factory(pid, record_hit=record_hit, **kwargs),
revision=record_hit['_version'],
created=None,
updated=None,
)
# Move created/updated attrs from source to object.
for key in ['_created', '_updated']:
if key in record['metadata']:
record[key[1:]] = record['metadata'][key]
del record['metadata'][key]
return record | [
"def",
"preprocess_search_hit",
"(",
"pid",
",",
"record_hit",
",",
"links_factory",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"links_factory",
"=",
"links_factory",
"or",
"(",
"lambda",
"x",
",",
"*",
"*",
"k",
":",
"dict",
"(",
")",
")",
"record",
"=",
"dict",
"(",
"pid",
"=",
"pid",
",",
"metadata",
"=",
"record_hit",
"[",
"'_source'",
"]",
",",
"links",
"=",
"links_factory",
"(",
"pid",
",",
"record_hit",
"=",
"record_hit",
",",
"*",
"*",
"kwargs",
")",
",",
"revision",
"=",
"record_hit",
"[",
"'_version'",
"]",
",",
"created",
"=",
"None",
",",
"updated",
"=",
"None",
",",
")",
"# Move created/updated attrs from source to object.",
"for",
"key",
"in",
"[",
"'_created'",
",",
"'_updated'",
"]",
":",
"if",
"key",
"in",
"record",
"[",
"'metadata'",
"]",
":",
"record",
"[",
"key",
"[",
"1",
":",
"]",
"]",
"=",
"record",
"[",
"'metadata'",
"]",
"[",
"key",
"]",
"del",
"record",
"[",
"'metadata'",
"]",
"[",
"key",
"]",
"return",
"record"
] | 43.058824 | 15.058824 |
def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False) | [
"def",
"hide_ticks",
"(",
"plot",
",",
"min_tick_value",
"=",
"None",
",",
"max_tick_value",
"=",
"None",
")",
":",
"for",
"tick",
",",
"tick_value",
"in",
"zip",
"(",
"plot",
".",
"get_yticklabels",
"(",
")",
",",
"plot",
".",
"get_yticks",
"(",
")",
")",
":",
"tick_label",
"=",
"as_numeric",
"(",
"tick_value",
")",
"if",
"tick_label",
":",
"if",
"(",
"min_tick_value",
"is",
"not",
"None",
"and",
"tick_label",
"<",
"min_tick_value",
"or",
"max_tick_value",
"is",
"not",
"None",
"and",
"tick_label",
">",
"max_tick_value",
")",
":",
"tick",
".",
"set_visible",
"(",
"False",
")"
] | 59.5 | 19.375 |
def add_timing_signal_1d_given_position(x,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor with shape [batch, length, channels]
position: a Tensor with shape [batch, length]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
channels = common_layers.shape_list(x)[2]
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
signal = common_layers.cast_like(signal, x)
return x + signal | [
"def",
"add_timing_signal_1d_given_position",
"(",
"x",
",",
"position",
",",
"min_timescale",
"=",
"1.0",
",",
"max_timescale",
"=",
"1.0e4",
")",
":",
"channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"2",
"]",
"num_timescales",
"=",
"channels",
"//",
"2",
"log_timescale_increment",
"=",
"(",
"math",
".",
"log",
"(",
"float",
"(",
"max_timescale",
")",
"/",
"float",
"(",
"min_timescale",
")",
")",
"/",
"(",
"tf",
".",
"to_float",
"(",
"num_timescales",
")",
"-",
"1",
")",
")",
"inv_timescales",
"=",
"min_timescale",
"*",
"tf",
".",
"exp",
"(",
"tf",
".",
"to_float",
"(",
"tf",
".",
"range",
"(",
"num_timescales",
")",
")",
"*",
"-",
"log_timescale_increment",
")",
"scaled_time",
"=",
"(",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"to_float",
"(",
"position",
")",
",",
"2",
")",
"*",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"expand_dims",
"(",
"inv_timescales",
",",
"0",
")",
",",
"0",
")",
")",
"signal",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"sin",
"(",
"scaled_time",
")",
",",
"tf",
".",
"cos",
"(",
"scaled_time",
")",
"]",
",",
"axis",
"=",
"2",
")",
"signal",
"=",
"tf",
".",
"pad",
"(",
"signal",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"tf",
".",
"mod",
"(",
"channels",
",",
"2",
")",
"]",
"]",
")",
"signal",
"=",
"common_layers",
".",
"cast_like",
"(",
"signal",
",",
"x",
")",
"return",
"x",
"+",
"signal"
] | 39.655172 | 16.103448 |
def _start(self, args):
"""
start connection negotiation
This method starts the connection negotiation process by
telling the client the protocol version that the server
proposes, along with a list of security mechanisms which the
client can use for authentication.
RULE:
If the client cannot handle the protocol version suggested
by the server it MUST close the socket connection.
RULE:
The server MUST provide a protocol version that is lower
than or equal to that requested by the client in the
protocol header. If the server cannot support the
specified protocol it MUST NOT send this method, but MUST
close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to
use, which cannot be higher than the client's major
version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to
use, which cannot be higher than the client's minor
version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server
supports, delimited by spaces. Currently ASL supports
these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server
supports, delimited by spaces. The locale defines the
language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US
locale.
"""
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
AMQP_LOGGER.debug('Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s'
% (self.version_major, self.version_minor,
str(self.server_properties), self.mechanisms, self.locales)) | [
"def",
"_start",
"(",
"self",
",",
"args",
")",
":",
"self",
".",
"version_major",
"=",
"args",
".",
"read_octet",
"(",
")",
"self",
".",
"version_minor",
"=",
"args",
".",
"read_octet",
"(",
")",
"self",
".",
"server_properties",
"=",
"args",
".",
"read_table",
"(",
")",
"self",
".",
"mechanisms",
"=",
"args",
".",
"read_longstr",
"(",
")",
".",
"split",
"(",
"' '",
")",
"self",
".",
"locales",
"=",
"args",
".",
"read_longstr",
"(",
")",
".",
"split",
"(",
"' '",
")",
"AMQP_LOGGER",
".",
"debug",
"(",
"'Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s'",
"%",
"(",
"self",
".",
"version_major",
",",
"self",
".",
"version_minor",
",",
"str",
"(",
"self",
".",
"server_properties",
")",
",",
"self",
".",
"mechanisms",
",",
"self",
".",
"locales",
")",
")"
] | 33.067568 | 24.445946 |
def _import_to_py_ast(ctx: GeneratorContext, node: Import) -> GeneratedPyAST:
"""Return a Python AST node for a Basilisp `import*` expression."""
assert node.op == NodeOp.IMPORT
last = None
deps: List[ast.AST] = []
for alias in node.aliases:
safe_name = munge(alias.name)
try:
module = importlib.import_module(safe_name)
if alias.alias is not None:
ctx.add_import(sym.symbol(alias.name), module, sym.symbol(alias.alias))
else:
ctx.add_import(sym.symbol(alias.name), module)
except ModuleNotFoundError as e:
raise ImportError(
f"Python module '{alias.name}' not found", node.form, node
) from e
py_import_alias = (
munge(alias.alias)
if alias.alias is not None
else safe_name.split(".", maxsplit=1)[0]
)
deps.append(
ast.Assign(
targets=[ast.Name(id=py_import_alias, ctx=ast.Store())],
value=ast.Call(
func=_load_attr("builtins.__import__"),
args=[ast.Str(safe_name)],
keywords=[],
),
)
)
last = ast.Name(id=py_import_alias, ctx=ast.Load())
# Note that we add this import to the live running system in the above
# calls to `ctx.add_import`, however, since we compile and cache Python
# bytecode, we need to generate calls to `add_import` for the running
# namespace so when this code is reloaded from the cache, the runtime
# is correctly configured.
deps.append(
ast.Call(
func=_load_attr(f"{_NS_VAR_VALUE}.add_import"),
args=[
ast.Call(
func=_NEW_SYM_FN_NAME, args=[ast.Str(safe_name)], keywords=[]
),
last,
],
keywords=[],
)
)
assert last is not None, "import* node must have at least one import"
return GeneratedPyAST(node=last, dependencies=deps) | [
"def",
"_import_to_py_ast",
"(",
"ctx",
":",
"GeneratorContext",
",",
"node",
":",
"Import",
")",
"->",
"GeneratedPyAST",
":",
"assert",
"node",
".",
"op",
"==",
"NodeOp",
".",
"IMPORT",
"last",
"=",
"None",
"deps",
":",
"List",
"[",
"ast",
".",
"AST",
"]",
"=",
"[",
"]",
"for",
"alias",
"in",
"node",
".",
"aliases",
":",
"safe_name",
"=",
"munge",
"(",
"alias",
".",
"name",
")",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"safe_name",
")",
"if",
"alias",
".",
"alias",
"is",
"not",
"None",
":",
"ctx",
".",
"add_import",
"(",
"sym",
".",
"symbol",
"(",
"alias",
".",
"name",
")",
",",
"module",
",",
"sym",
".",
"symbol",
"(",
"alias",
".",
"alias",
")",
")",
"else",
":",
"ctx",
".",
"add_import",
"(",
"sym",
".",
"symbol",
"(",
"alias",
".",
"name",
")",
",",
"module",
")",
"except",
"ModuleNotFoundError",
"as",
"e",
":",
"raise",
"ImportError",
"(",
"f\"Python module '{alias.name}' not found\"",
",",
"node",
".",
"form",
",",
"node",
")",
"from",
"e",
"py_import_alias",
"=",
"(",
"munge",
"(",
"alias",
".",
"alias",
")",
"if",
"alias",
".",
"alias",
"is",
"not",
"None",
"else",
"safe_name",
".",
"split",
"(",
"\".\"",
",",
"maxsplit",
"=",
"1",
")",
"[",
"0",
"]",
")",
"deps",
".",
"append",
"(",
"ast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"ast",
".",
"Name",
"(",
"id",
"=",
"py_import_alias",
",",
"ctx",
"=",
"ast",
".",
"Store",
"(",
")",
")",
"]",
",",
"value",
"=",
"ast",
".",
"Call",
"(",
"func",
"=",
"_load_attr",
"(",
"\"builtins.__import__\"",
")",
",",
"args",
"=",
"[",
"ast",
".",
"Str",
"(",
"safe_name",
")",
"]",
",",
"keywords",
"=",
"[",
"]",
",",
")",
",",
")",
")",
"last",
"=",
"ast",
".",
"Name",
"(",
"id",
"=",
"py_import_alias",
",",
"ctx",
"=",
"ast",
".",
"Load",
"(",
")",
")",
"# Note that we add this import to the live running system in the above",
"# calls to `ctx.add_import`, however, since we compile and cache Python",
"# bytecode, we need to generate calls to `add_import` for the running",
"# namespace so when this code is reloaded from the cache, the runtime",
"# is correctly configured.",
"deps",
".",
"append",
"(",
"ast",
".",
"Call",
"(",
"func",
"=",
"_load_attr",
"(",
"f\"{_NS_VAR_VALUE}.add_import\"",
")",
",",
"args",
"=",
"[",
"ast",
".",
"Call",
"(",
"func",
"=",
"_NEW_SYM_FN_NAME",
",",
"args",
"=",
"[",
"ast",
".",
"Str",
"(",
"safe_name",
")",
"]",
",",
"keywords",
"=",
"[",
"]",
")",
",",
"last",
",",
"]",
",",
"keywords",
"=",
"[",
"]",
",",
")",
")",
"assert",
"last",
"is",
"not",
"None",
",",
"\"import* node must have at least one import\"",
"return",
"GeneratedPyAST",
"(",
"node",
"=",
"last",
",",
"dependencies",
"=",
"deps",
")"
] | 36.754386 | 21.684211 |
def register(self, cls):
"""
Adds a preview to the index.
"""
preview = cls(site=self)
logger.debug('Registering %r with %r', preview, self)
index = self.__previews.setdefault(preview.module, {})
index[cls.__name__] = preview | [
"def",
"register",
"(",
"self",
",",
"cls",
")",
":",
"preview",
"=",
"cls",
"(",
"site",
"=",
"self",
")",
"logger",
".",
"debug",
"(",
"'Registering %r with %r'",
",",
"preview",
",",
"self",
")",
"index",
"=",
"self",
".",
"__previews",
".",
"setdefault",
"(",
"preview",
".",
"module",
",",
"{",
"}",
")",
"index",
"[",
"cls",
".",
"__name__",
"]",
"=",
"preview"
] | 34.25 | 9.25 |
def from_dict(data, ctx):
"""
Instantiate a new MarketOrderTransaction from a dict (generally from
loading a JSON response). The data used to instantiate the
MarketOrderTransaction is a shallow copy of the dict passed in, with
any complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('priceBound') is not None:
data['priceBound'] = ctx.convert_decimal_number(
data.get('priceBound')
)
if data.get('tradeClose') is not None:
data['tradeClose'] = \
ctx.transaction.MarketOrderTradeClose.from_dict(
data['tradeClose'], ctx
)
if data.get('longPositionCloseout') is not None:
data['longPositionCloseout'] = \
ctx.transaction.MarketOrderPositionCloseout.from_dict(
data['longPositionCloseout'], ctx
)
if data.get('shortPositionCloseout') is not None:
data['shortPositionCloseout'] = \
ctx.transaction.MarketOrderPositionCloseout.from_dict(
data['shortPositionCloseout'], ctx
)
if data.get('marginCloseout') is not None:
data['marginCloseout'] = \
ctx.transaction.MarketOrderMarginCloseout.from_dict(
data['marginCloseout'], ctx
)
if data.get('delayedTradeClose') is not None:
data['delayedTradeClose'] = \
ctx.transaction.MarketOrderDelayedTradeClose.from_dict(
data['delayedTradeClose'], ctx
)
if data.get('clientExtensions') is not None:
data['clientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['clientExtensions'], ctx
)
if data.get('takeProfitOnFill') is not None:
data['takeProfitOnFill'] = \
ctx.transaction.TakeProfitDetails.from_dict(
data['takeProfitOnFill'], ctx
)
if data.get('stopLossOnFill') is not None:
data['stopLossOnFill'] = \
ctx.transaction.StopLossDetails.from_dict(
data['stopLossOnFill'], ctx
)
if data.get('trailingStopLossOnFill') is not None:
data['trailingStopLossOnFill'] = \
ctx.transaction.TrailingStopLossDetails.from_dict(
data['trailingStopLossOnFill'], ctx
)
if data.get('tradeClientExtensions') is not None:
data['tradeClientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['tradeClientExtensions'], ctx
)
return MarketOrderTransaction(**data) | [
"def",
"from_dict",
"(",
"data",
",",
"ctx",
")",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"data",
".",
"get",
"(",
"'units'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'units'",
"]",
"=",
"ctx",
".",
"convert_decimal_number",
"(",
"data",
".",
"get",
"(",
"'units'",
")",
")",
"if",
"data",
".",
"get",
"(",
"'priceBound'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'priceBound'",
"]",
"=",
"ctx",
".",
"convert_decimal_number",
"(",
"data",
".",
"get",
"(",
"'priceBound'",
")",
")",
"if",
"data",
".",
"get",
"(",
"'tradeClose'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'tradeClose'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"MarketOrderTradeClose",
".",
"from_dict",
"(",
"data",
"[",
"'tradeClose'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'longPositionCloseout'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'longPositionCloseout'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"MarketOrderPositionCloseout",
".",
"from_dict",
"(",
"data",
"[",
"'longPositionCloseout'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'shortPositionCloseout'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'shortPositionCloseout'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"MarketOrderPositionCloseout",
".",
"from_dict",
"(",
"data",
"[",
"'shortPositionCloseout'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'marginCloseout'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'marginCloseout'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"MarketOrderMarginCloseout",
".",
"from_dict",
"(",
"data",
"[",
"'marginCloseout'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'delayedTradeClose'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'delayedTradeClose'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"MarketOrderDelayedTradeClose",
".",
"from_dict",
"(",
"data",
"[",
"'delayedTradeClose'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'clientExtensions'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'clientExtensions'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"ClientExtensions",
".",
"from_dict",
"(",
"data",
"[",
"'clientExtensions'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'takeProfitOnFill'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'takeProfitOnFill'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"TakeProfitDetails",
".",
"from_dict",
"(",
"data",
"[",
"'takeProfitOnFill'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'stopLossOnFill'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'stopLossOnFill'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"StopLossDetails",
".",
"from_dict",
"(",
"data",
"[",
"'stopLossOnFill'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'trailingStopLossOnFill'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'trailingStopLossOnFill'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"TrailingStopLossDetails",
".",
"from_dict",
"(",
"data",
"[",
"'trailingStopLossOnFill'",
"]",
",",
"ctx",
")",
"if",
"data",
".",
"get",
"(",
"'tradeClientExtensions'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'tradeClientExtensions'",
"]",
"=",
"ctx",
".",
"transaction",
".",
"ClientExtensions",
".",
"from_dict",
"(",
"data",
"[",
"'tradeClientExtensions'",
"]",
",",
"ctx",
")",
"return",
"MarketOrderTransaction",
"(",
"*",
"*",
"data",
")"
] | 36.444444 | 19.185185 |
def run_simulation(self):
"""Runs the complete simulation"""
print('Starting simulations...')
for i in range(self.num_trials):
print('---Trial {}---'.format(i))
self.run_trial(i)
print('Simulation completed.') | [
"def",
"run_simulation",
"(",
"self",
")",
":",
"print",
"(",
"'Starting simulations...'",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_trials",
")",
":",
"print",
"(",
"'---Trial {}---'",
".",
"format",
"(",
"i",
")",
")",
"self",
".",
"run_trial",
"(",
"i",
")",
"print",
"(",
"'Simulation completed.'",
")"
] | 37 | 4.714286 |
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param int comment_id: (required), id of the comment used by GitHub
:returns: :class:`RepoComment <github3.repos.comment.RepoComment>` if
successful, otherwise None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None | [
"def",
"commit_comment",
"(",
"self",
",",
"comment_id",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'comments'",
",",
"str",
"(",
"comment_id",
")",
",",
"base_url",
"=",
"self",
".",
"_api",
")",
"json",
"=",
"self",
".",
"_json",
"(",
"self",
".",
"_get",
"(",
"url",
")",
",",
"200",
")",
"return",
"RepoComment",
"(",
"json",
",",
"self",
")",
"if",
"json",
"else",
"None"
] | 45.7 | 17.7 |
def run(self):
""" Todo """
self.logger.debug("heartbeat started")
while True:
time.sleep(self.interval)
self.send_heartbeat() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"heartbeat started\"",
")",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"interval",
")",
"self",
".",
"send_heartbeat",
"(",
")"
] | 24.166667 | 12.5 |
def _notify_unload_dll(self, event):
"""
Notify the release of a loaded module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
lpBaseOfDll = event.get_module_base()
## if self.has_module(lpBaseOfDll): # XXX this would trigger a scan
if lpBaseOfDll in self.__moduleDict:
self._del_module(lpBaseOfDll)
return True | [
"def",
"_notify_unload_dll",
"(",
"self",
",",
"event",
")",
":",
"lpBaseOfDll",
"=",
"event",
".",
"get_module_base",
"(",
")",
"## if self.has_module(lpBaseOfDll): # XXX this would trigger a scan",
"if",
"lpBaseOfDll",
"in",
"self",
".",
"__moduleDict",
":",
"self",
".",
"_del_module",
"(",
"lpBaseOfDll",
")",
"return",
"True"
] | 33.777778 | 16.777778 |
def within_n_mads(n, series):
"""Return true if all values in sequence are within n MADs"""
mad_score = (series - series.mean()) / series.mad()
return (mad_score.abs() <= n).all() | [
"def",
"within_n_mads",
"(",
"n",
",",
"series",
")",
":",
"mad_score",
"=",
"(",
"series",
"-",
"series",
".",
"mean",
"(",
")",
")",
"/",
"series",
".",
"mad",
"(",
")",
"return",
"(",
"mad_score",
".",
"abs",
"(",
")",
"<=",
"n",
")",
".",
"all",
"(",
")"
] | 47 | 6.75 |
def score_alignment(a, b, gap_open, gap_extend, matrix):
'''Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module.
'''
al = a
bl = b
l = len(al)
score = 0
assert len(bl) == l, 'Alignment lengths must be the same'
mat = as_ord_matrix(matrix)
gap_started = 0
for i in range(l):
if al[i] == '-' or bl[i] == '-':
score += gap_extend if gap_started else gap_open
gap_started = 1
else:
score += mat[ord(al[i]), ord(bl[i])]
gap_started = 0
return score | [
"def",
"score_alignment",
"(",
"a",
",",
"b",
",",
"gap_open",
",",
"gap_extend",
",",
"matrix",
")",
":",
"al",
"=",
"a",
"bl",
"=",
"b",
"l",
"=",
"len",
"(",
"al",
")",
"score",
"=",
"0",
"assert",
"len",
"(",
"bl",
")",
"==",
"l",
",",
"'Alignment lengths must be the same'",
"mat",
"=",
"as_ord_matrix",
"(",
"matrix",
")",
"gap_started",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"l",
")",
":",
"if",
"al",
"[",
"i",
"]",
"==",
"'-'",
"or",
"bl",
"[",
"i",
"]",
"==",
"'-'",
":",
"score",
"+=",
"gap_extend",
"if",
"gap_started",
"else",
"gap_open",
"gap_started",
"=",
"1",
"else",
":",
"score",
"+=",
"mat",
"[",
"ord",
"(",
"al",
"[",
"i",
"]",
")",
",",
"ord",
"(",
"bl",
"[",
"i",
"]",
")",
"]",
"gap_started",
"=",
"0",
"return",
"score"
] | 30.3125 | 22.125 |
def stderr(self):
"""
The job stderr
:return: string or None
"""
streams = self._payload.get('streams', None)
return streams[1] if streams is not None and len(streams) >= 2 else '' | [
"def",
"stderr",
"(",
"self",
")",
":",
"streams",
"=",
"self",
".",
"_payload",
".",
"get",
"(",
"'streams'",
",",
"None",
")",
"return",
"streams",
"[",
"1",
"]",
"if",
"streams",
"is",
"not",
"None",
"and",
"len",
"(",
"streams",
")",
">=",
"2",
"else",
"''"
] | 31.714286 | 14.285714 |
def raw_rsa_public_crypt(certificate_or_public_key, data):
"""
Performs a raw RSA algorithm in a byte string using a certificate or
public key. This is a low-level primitive and is prone to disastrous results
if used incorrectly.
:param certificate_or_public_key:
An oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate
object
:param data:
A byte string of the signature when verifying, or padded plaintext when
encrypting. Must be less than or equal to the length of the public key.
When verifying, padding will need to be removed afterwards. When
encrypting, padding must be applied before.
:return:
A byte string of the transformed data
"""
if _backend != 'winlegacy':
raise SystemError('Pure-python RSA crypt is only for Windows XP/2003')
has_asn1 = hasattr(certificate_or_public_key, 'asn1')
valid_types = (PublicKeyInfo, Certificate)
if not has_asn1 or not isinstance(certificate_or_public_key.asn1, valid_types):
raise TypeError(pretty_message(
'''
certificate_or_public_key must be an instance of the
oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate
classes, not %s
''',
type_name(certificate_or_public_key)
))
algo = certificate_or_public_key.asn1['algorithm']['algorithm'].native
if algo != 'rsa':
raise ValueError(pretty_message(
'''
certificate_or_public_key must be an RSA key, not %s
''',
algo.upper()
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
rsa_public_key = certificate_or_public_key.asn1['public_key'].parsed
transformed_int = pow(
int_from_bytes(data),
rsa_public_key['public_exponent'].native,
rsa_public_key['modulus'].native
)
return int_to_bytes(
transformed_int,
width=certificate_or_public_key.asn1.byte_size
) | [
"def",
"raw_rsa_public_crypt",
"(",
"certificate_or_public_key",
",",
"data",
")",
":",
"if",
"_backend",
"!=",
"'winlegacy'",
":",
"raise",
"SystemError",
"(",
"'Pure-python RSA crypt is only for Windows XP/2003'",
")",
"has_asn1",
"=",
"hasattr",
"(",
"certificate_or_public_key",
",",
"'asn1'",
")",
"valid_types",
"=",
"(",
"PublicKeyInfo",
",",
"Certificate",
")",
"if",
"not",
"has_asn1",
"or",
"not",
"isinstance",
"(",
"certificate_or_public_key",
".",
"asn1",
",",
"valid_types",
")",
":",
"raise",
"TypeError",
"(",
"pretty_message",
"(",
"'''\n certificate_or_public_key must be an instance of the\n oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate\n classes, not %s\n '''",
",",
"type_name",
"(",
"certificate_or_public_key",
")",
")",
")",
"algo",
"=",
"certificate_or_public_key",
".",
"asn1",
"[",
"'algorithm'",
"]",
"[",
"'algorithm'",
"]",
".",
"native",
"if",
"algo",
"!=",
"'rsa'",
":",
"raise",
"ValueError",
"(",
"pretty_message",
"(",
"'''\n certificate_or_public_key must be an RSA key, not %s\n '''",
",",
"algo",
".",
"upper",
"(",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"byte_cls",
")",
":",
"raise",
"TypeError",
"(",
"pretty_message",
"(",
"'''\n data must be a byte string, not %s\n '''",
",",
"type_name",
"(",
"data",
")",
")",
")",
"rsa_public_key",
"=",
"certificate_or_public_key",
".",
"asn1",
"[",
"'public_key'",
"]",
".",
"parsed",
"transformed_int",
"=",
"pow",
"(",
"int_from_bytes",
"(",
"data",
")",
",",
"rsa_public_key",
"[",
"'public_exponent'",
"]",
".",
"native",
",",
"rsa_public_key",
"[",
"'modulus'",
"]",
".",
"native",
")",
"return",
"int_to_bytes",
"(",
"transformed_int",
",",
"width",
"=",
"certificate_or_public_key",
".",
"asn1",
".",
"byte_size",
")"
] | 33.887097 | 22.532258 |
def insert(self, database, key, value, callback=None):
"""
Insert an item into the given database.
:param database: The database into which to insert the value.
:type database: .BlobDatabaseID
:param key: The key to insert.
:type key: uuid.UUID
:param value: The value to insert.
:type value: bytes
:param callback: A callback to be called on success or failure.
"""
token = self._get_token()
self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database,
content=InsertCommand(key=key.bytes, value=value)),
callback)) | [
"def",
"insert",
"(",
"self",
",",
"database",
",",
"key",
",",
"value",
",",
"callback",
"=",
"None",
")",
":",
"token",
"=",
"self",
".",
"_get_token",
"(",
")",
"self",
".",
"_enqueue",
"(",
"self",
".",
"_PendingItem",
"(",
"token",
",",
"BlobCommand",
"(",
"token",
"=",
"token",
",",
"database",
"=",
"database",
",",
"content",
"=",
"InsertCommand",
"(",
"key",
"=",
"key",
".",
"bytes",
",",
"value",
"=",
"value",
")",
")",
",",
"callback",
")",
")"
] | 44.9375 | 18.0625 |
def get_or_add_childTnLst(self):
"""Return parent element for a new `p:video` child element.
The `p:video` element causes play controls to appear under a video
shape (pic shape containing video). There can be more than one video
shape on a slide, which causes the precondition to vary. It needs to
handle the case when there is no `p:sld/p:timing` element and when
that element already exists. If the case isn't simple, it just nukes
what's there and adds a fresh one. This could theoretically remove
desired existing timing information, but there isn't any evidence
available to me one way or the other, so I've taken the simple
approach.
"""
childTnLst = self._childTnLst
if childTnLst is None:
childTnLst = self._add_childTnLst()
return childTnLst | [
"def",
"get_or_add_childTnLst",
"(",
"self",
")",
":",
"childTnLst",
"=",
"self",
".",
"_childTnLst",
"if",
"childTnLst",
"is",
"None",
":",
"childTnLst",
"=",
"self",
".",
"_add_childTnLst",
"(",
")",
"return",
"childTnLst"
] | 50.529412 | 22.294118 |
def step_next_char(self):
"""Puts the cursor on the next character."""
self._index += 1
self._col_offset += 1
if self._index > self._maxindex:
self._maxindex = self._index
self._maxcol = self._col_offset
self._maxline = self._lineno | [
"def",
"step_next_char",
"(",
"self",
")",
":",
"self",
".",
"_index",
"+=",
"1",
"self",
".",
"_col_offset",
"+=",
"1",
"if",
"self",
".",
"_index",
">",
"self",
".",
"_maxindex",
":",
"self",
".",
"_maxindex",
"=",
"self",
".",
"_index",
"self",
".",
"_maxcol",
"=",
"self",
".",
"_col_offset",
"self",
".",
"_maxline",
"=",
"self",
".",
"_lineno"
] | 36.625 | 5.625 |
def dumps(self) -> str:
"""Writes the step information to an HTML-formatted string"""
code_file_path = os.path.join(
self.project.source_directory,
self.filename
)
code = dict(
filename=self.filename,
path=code_file_path,
code=render.code_file(code_file_path)
)
if not self.is_running:
# If no longer running, make sure to flush the stdout buffer so
# any print statements at the end of the step get included in
# the body
self.report.flush_stdout()
# Create a copy of the body for dumping
body = self.report.body[:]
if self.is_running:
# If still running add a temporary copy of anything not flushed
# from the stdout buffer to the copy of the body for display. Do
# not flush the buffer though until the step is done running or
# it gets flushed by another display call.
body.append(self.report.read_stdout())
body = ''.join(body)
has_body = len(body) > 0 and (
body.find('<div') != -1 or
body.find('<span') != -1 or
body.find('<p') != -1 or
body.find('<pre') != -1 or
body.find('<h') != -1 or
body.find('<ol') != -1 or
body.find('<ul') != -1 or
body.find('<li') != -1
)
std_err = (
self.report.read_stderr()
if self.is_running else
self.report.flush_stderr()
).strip('\n').rstrip()
# The step will be visible in the display if any of the following
# conditions are true.
is_visible = self.is_visible or self.is_running or self.error
dom = templating.render_template(
'step-body.html',
last_display_update=self.report.last_update_time,
elapsed_time=self.get_elapsed_timestamp(),
code=code,
body=body,
has_body=has_body,
id=self.definition.name,
title=self.report.title,
subtitle=self.report.subtitle,
summary=self.report.summary,
error=self.error,
index=self.index,
is_running=self.is_running,
is_visible=is_visible,
progress_message=self.progress_message,
progress=int(round(max(0, min(100, 100 * self.progress)))),
sub_progress_message=self.sub_progress_message,
sub_progress=int(round(max(0, min(100, 100 * self.sub_progress)))),
std_err=std_err
)
if not self.is_running:
self.dom = dom
return dom | [
"def",
"dumps",
"(",
"self",
")",
"->",
"str",
":",
"code_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project",
".",
"source_directory",
",",
"self",
".",
"filename",
")",
"code",
"=",
"dict",
"(",
"filename",
"=",
"self",
".",
"filename",
",",
"path",
"=",
"code_file_path",
",",
"code",
"=",
"render",
".",
"code_file",
"(",
"code_file_path",
")",
")",
"if",
"not",
"self",
".",
"is_running",
":",
"# If no longer running, make sure to flush the stdout buffer so",
"# any print statements at the end of the step get included in",
"# the body",
"self",
".",
"report",
".",
"flush_stdout",
"(",
")",
"# Create a copy of the body for dumping",
"body",
"=",
"self",
".",
"report",
".",
"body",
"[",
":",
"]",
"if",
"self",
".",
"is_running",
":",
"# If still running add a temporary copy of anything not flushed",
"# from the stdout buffer to the copy of the body for display. Do",
"# not flush the buffer though until the step is done running or",
"# it gets flushed by another display call.",
"body",
".",
"append",
"(",
"self",
".",
"report",
".",
"read_stdout",
"(",
")",
")",
"body",
"=",
"''",
".",
"join",
"(",
"body",
")",
"has_body",
"=",
"len",
"(",
"body",
")",
">",
"0",
"and",
"(",
"body",
".",
"find",
"(",
"'<div'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<span'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<p'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<pre'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<h'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<ol'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<ul'",
")",
"!=",
"-",
"1",
"or",
"body",
".",
"find",
"(",
"'<li'",
")",
"!=",
"-",
"1",
")",
"std_err",
"=",
"(",
"self",
".",
"report",
".",
"read_stderr",
"(",
")",
"if",
"self",
".",
"is_running",
"else",
"self",
".",
"report",
".",
"flush_stderr",
"(",
")",
")",
".",
"strip",
"(",
"'\\n'",
")",
".",
"rstrip",
"(",
")",
"# The step will be visible in the display if any of the following",
"# conditions are true.",
"is_visible",
"=",
"self",
".",
"is_visible",
"or",
"self",
".",
"is_running",
"or",
"self",
".",
"error",
"dom",
"=",
"templating",
".",
"render_template",
"(",
"'step-body.html'",
",",
"last_display_update",
"=",
"self",
".",
"report",
".",
"last_update_time",
",",
"elapsed_time",
"=",
"self",
".",
"get_elapsed_timestamp",
"(",
")",
",",
"code",
"=",
"code",
",",
"body",
"=",
"body",
",",
"has_body",
"=",
"has_body",
",",
"id",
"=",
"self",
".",
"definition",
".",
"name",
",",
"title",
"=",
"self",
".",
"report",
".",
"title",
",",
"subtitle",
"=",
"self",
".",
"report",
".",
"subtitle",
",",
"summary",
"=",
"self",
".",
"report",
".",
"summary",
",",
"error",
"=",
"self",
".",
"error",
",",
"index",
"=",
"self",
".",
"index",
",",
"is_running",
"=",
"self",
".",
"is_running",
",",
"is_visible",
"=",
"is_visible",
",",
"progress_message",
"=",
"self",
".",
"progress_message",
",",
"progress",
"=",
"int",
"(",
"round",
"(",
"max",
"(",
"0",
",",
"min",
"(",
"100",
",",
"100",
"*",
"self",
".",
"progress",
")",
")",
")",
")",
",",
"sub_progress_message",
"=",
"self",
".",
"sub_progress_message",
",",
"sub_progress",
"=",
"int",
"(",
"round",
"(",
"max",
"(",
"0",
",",
"min",
"(",
"100",
",",
"100",
"*",
"self",
".",
"sub_progress",
")",
")",
")",
")",
",",
"std_err",
"=",
"std_err",
")",
"if",
"not",
"self",
".",
"is_running",
":",
"self",
".",
"dom",
"=",
"dom",
"return",
"dom"
] | 34.789474 | 16.539474 |
def _postprocess_variants(record_file, data, ref_file, out_file):
"""Post-process variants, converting into standard VCF file.
"""
if not utils.file_uptodate(out_file, record_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = ["dv_postprocess_variants.py", "--ref", ref_file,
"--infile", record_file, "--outfile", tx_out_file]
do.run(cmd, "DeepVariant postprocess_variants %s" % dd.get_sample_name(data))
return out_file | [
"def",
"_postprocess_variants",
"(",
"record_file",
",",
"data",
",",
"ref_file",
",",
"out_file",
")",
":",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"record_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"cmd",
"=",
"[",
"\"dv_postprocess_variants.py\"",
",",
"\"--ref\"",
",",
"ref_file",
",",
"\"--infile\"",
",",
"record_file",
",",
"\"--outfile\"",
",",
"tx_out_file",
"]",
"do",
".",
"run",
"(",
"cmd",
",",
"\"DeepVariant postprocess_variants %s\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"return",
"out_file"
] | 55 | 20.666667 |
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Neutron server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from keystoneauth1
self._append_global_identity_args(parser)
return parser | [
"def",
"build_option_parser",
"(",
"self",
",",
"description",
",",
"version",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
",",
"add_help",
"=",
"False",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"__version__",
",",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"'--debug'",
",",
"action",
"=",
"'count'",
",",
"dest",
"=",
"'verbose_level'",
",",
"default",
"=",
"self",
".",
"DEFAULT_VERBOSE_LEVEL",
",",
"help",
"=",
"_",
"(",
"'Increase verbosity of output and show tracebacks on'",
"' errors. You can repeat this option.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-q'",
",",
"'--quiet'",
",",
"action",
"=",
"'store_const'",
",",
"dest",
"=",
"'verbose_level'",
",",
"const",
"=",
"0",
",",
"help",
"=",
"_",
"(",
"'Suppress output except warnings and errors.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-h'",
",",
"'--help'",
",",
"action",
"=",
"HelpAction",
",",
"nargs",
"=",
"0",
",",
"default",
"=",
"self",
",",
"# tricky",
"help",
"=",
"_",
"(",
"\"Show this help message and exit.\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--retries'",
",",
"metavar",
"=",
"\"NUM\"",
",",
"type",
"=",
"check_non_negative_int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"_",
"(",
"\"How many times the request to the Neutron server should \"",
"\"be retried if it fails.\"",
")",
")",
"# FIXME(bklei): this method should come from keystoneauth1",
"self",
".",
"_append_global_identity_args",
"(",
"parser",
")",
"return",
"parser"
] | 36.416667 | 13.875 |
def fitlin_clipped(xy,uv,verbose=False,mode='rscale',nclip=3,reject=3):
""" Perform a clipped fit based on the number of iterations and rejection limit
(in sigma) specified by the user. This will more closely replicate the results
obtained by 'geomap' using 'maxiter' and 'reject' parameters.
"""
fitting_funcs = {'rscale':fitlin_rscale,'general':fitlin}
# Get the fitting function to be used
fit_func = fitting_funcs[mode.lower()]
# Perform the initial fit
P,Q = fit_func(xy,uv)
xyc = apply_fitlin(xy,P,Q)
# compute residuals from fit for input positions
dx = uv[:,0] - xyc[0]
dy = uv[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if nclip > 0:
data = xy.copy()
outdata = uv.copy()
numclipped = 0
for i in range(nclip):
iterclipped = 0
xyc = apply_fitlin(data,P,Q)
# compute residuals from fit for input positions
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
# find indices of outliers in x and y
xout = np.where(np.abs(dx - dx.mean()) > reject*dx.std())
yout = np.where(np.abs(dy - dy.mean()) > reject*dy.std())
# concatenate those indices and sort them
outliers_indx = xout[0].tolist()+yout[0].tolist()
outliers_indx.sort()
# define the full range of indices for the data points left
full_indx = list(range(data.shape[0]))
# remove all unique indices specified in outliers from full range
for o in outliers_indx:
# only remove if it has not been removed already
# accounts for the same point being an outlier in both x and y
if full_indx.count(o) > 0:
full_indx.remove(o)
iterclipped += 1
if iterclipped == 0:
break
numclipped += iterclipped
if verbose:
print('Removed a total of ',numclipped,' points through iteration ',i+1)
# create clipped data
data_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
if verbose:
print('Iter #',i+1,' data:',data.shape,data_iter.shape,len(full_indx))
data_iter[:,0] = data[:,0][full_indx]
data_iter[:,1] = data[:,1][full_indx]
outdata_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
outdata_iter[:,0] = outdata[:,0][full_indx]
outdata_iter[:,1] = outdata[:,1][full_indx]
# perform the fit again with the clipped data and go to the next iteration
data = data_iter
outdata = outdata_iter
P,Q = fit_func(data,outdata)
# compute residuals from fit for input positions
xyc = apply_fitlin(data,P,Q)
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if verbose:
print('Fit clipped ',numclipped,' points over ',nclip,' iterations.')
return P,Q,fit_rms | [
"def",
"fitlin_clipped",
"(",
"xy",
",",
"uv",
",",
"verbose",
"=",
"False",
",",
"mode",
"=",
"'rscale'",
",",
"nclip",
"=",
"3",
",",
"reject",
"=",
"3",
")",
":",
"fitting_funcs",
"=",
"{",
"'rscale'",
":",
"fitlin_rscale",
",",
"'general'",
":",
"fitlin",
"}",
"# Get the fitting function to be used",
"fit_func",
"=",
"fitting_funcs",
"[",
"mode",
".",
"lower",
"(",
")",
"]",
"# Perform the initial fit",
"P",
",",
"Q",
"=",
"fit_func",
"(",
"xy",
",",
"uv",
")",
"xyc",
"=",
"apply_fitlin",
"(",
"xy",
",",
"P",
",",
"Q",
")",
"# compute residuals from fit for input positions",
"dx",
"=",
"uv",
"[",
":",
",",
"0",
"]",
"-",
"xyc",
"[",
"0",
"]",
"dy",
"=",
"uv",
"[",
":",
",",
"1",
"]",
"-",
"xyc",
"[",
"1",
"]",
"fit_rms",
"=",
"[",
"dx",
".",
"std",
"(",
")",
",",
"dy",
".",
"std",
"(",
")",
"]",
"if",
"nclip",
">",
"0",
":",
"data",
"=",
"xy",
".",
"copy",
"(",
")",
"outdata",
"=",
"uv",
".",
"copy",
"(",
")",
"numclipped",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"nclip",
")",
":",
"iterclipped",
"=",
"0",
"xyc",
"=",
"apply_fitlin",
"(",
"data",
",",
"P",
",",
"Q",
")",
"# compute residuals from fit for input positions",
"dx",
"=",
"outdata",
"[",
":",
",",
"0",
"]",
"-",
"xyc",
"[",
"0",
"]",
"dy",
"=",
"outdata",
"[",
":",
",",
"1",
"]",
"-",
"xyc",
"[",
"1",
"]",
"# find indices of outliers in x and y",
"xout",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"dx",
"-",
"dx",
".",
"mean",
"(",
")",
")",
">",
"reject",
"*",
"dx",
".",
"std",
"(",
")",
")",
"yout",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"dy",
"-",
"dy",
".",
"mean",
"(",
")",
")",
">",
"reject",
"*",
"dy",
".",
"std",
"(",
")",
")",
"# concatenate those indices and sort them",
"outliers_indx",
"=",
"xout",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"+",
"yout",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"outliers_indx",
".",
"sort",
"(",
")",
"# define the full range of indices for the data points left",
"full_indx",
"=",
"list",
"(",
"range",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
")",
"# remove all unique indices specified in outliers from full range",
"for",
"o",
"in",
"outliers_indx",
":",
"# only remove if it has not been removed already",
"# accounts for the same point being an outlier in both x and y",
"if",
"full_indx",
".",
"count",
"(",
"o",
")",
">",
"0",
":",
"full_indx",
".",
"remove",
"(",
"o",
")",
"iterclipped",
"+=",
"1",
"if",
"iterclipped",
"==",
"0",
":",
"break",
"numclipped",
"+=",
"iterclipped",
"if",
"verbose",
":",
"print",
"(",
"'Removed a total of '",
",",
"numclipped",
",",
"' points through iteration '",
",",
"i",
"+",
"1",
")",
"# create clipped data",
"data_iter",
"=",
"np",
".",
"zeros",
"(",
"[",
"len",
"(",
"full_indx",
")",
",",
"2",
"]",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"if",
"verbose",
":",
"print",
"(",
"'Iter #'",
",",
"i",
"+",
"1",
",",
"' data:'",
",",
"data",
".",
"shape",
",",
"data_iter",
".",
"shape",
",",
"len",
"(",
"full_indx",
")",
")",
"data_iter",
"[",
":",
",",
"0",
"]",
"=",
"data",
"[",
":",
",",
"0",
"]",
"[",
"full_indx",
"]",
"data_iter",
"[",
":",
",",
"1",
"]",
"=",
"data",
"[",
":",
",",
"1",
"]",
"[",
"full_indx",
"]",
"outdata_iter",
"=",
"np",
".",
"zeros",
"(",
"[",
"len",
"(",
"full_indx",
")",
",",
"2",
"]",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"outdata_iter",
"[",
":",
",",
"0",
"]",
"=",
"outdata",
"[",
":",
",",
"0",
"]",
"[",
"full_indx",
"]",
"outdata_iter",
"[",
":",
",",
"1",
"]",
"=",
"outdata",
"[",
":",
",",
"1",
"]",
"[",
"full_indx",
"]",
"# perform the fit again with the clipped data and go to the next iteration",
"data",
"=",
"data_iter",
"outdata",
"=",
"outdata_iter",
"P",
",",
"Q",
"=",
"fit_func",
"(",
"data",
",",
"outdata",
")",
"# compute residuals from fit for input positions",
"xyc",
"=",
"apply_fitlin",
"(",
"data",
",",
"P",
",",
"Q",
")",
"dx",
"=",
"outdata",
"[",
":",
",",
"0",
"]",
"-",
"xyc",
"[",
"0",
"]",
"dy",
"=",
"outdata",
"[",
":",
",",
"1",
"]",
"-",
"xyc",
"[",
"1",
"]",
"fit_rms",
"=",
"[",
"dx",
".",
"std",
"(",
")",
",",
"dy",
".",
"std",
"(",
")",
"]",
"if",
"verbose",
":",
"print",
"(",
"'Fit clipped '",
",",
"numclipped",
",",
"' points over '",
",",
"nclip",
",",
"' iterations.'",
")",
"return",
"P",
",",
"Q",
",",
"fit_rms"
] | 37.552632 | 19.026316 |
def pretty_xml(string_input, add_ns=False):
""" pretty indent string_input """
if add_ns:
elem = "<foo "
for key, value in DOC_CONTENT_ATTRIB.items():
elem += ' %s="%s"' % (key, value)
string_input = elem + ">" + string_input + "</foo>"
doc = minidom.parseString(string_input)
if add_ns:
s1 = doc.childNodes[0].childNodes[0].toprettyxml(" ")
else:
s1 = doc.toprettyxml(" ")
return s1 | [
"def",
"pretty_xml",
"(",
"string_input",
",",
"add_ns",
"=",
"False",
")",
":",
"if",
"add_ns",
":",
"elem",
"=",
"\"<foo \"",
"for",
"key",
",",
"value",
"in",
"DOC_CONTENT_ATTRIB",
".",
"items",
"(",
")",
":",
"elem",
"+=",
"' %s=\"%s\"'",
"%",
"(",
"key",
",",
"value",
")",
"string_input",
"=",
"elem",
"+",
"\">\"",
"+",
"string_input",
"+",
"\"</foo>\"",
"doc",
"=",
"minidom",
".",
"parseString",
"(",
"string_input",
")",
"if",
"add_ns",
":",
"s1",
"=",
"doc",
".",
"childNodes",
"[",
"0",
"]",
".",
"childNodes",
"[",
"0",
"]",
".",
"toprettyxml",
"(",
"\" \"",
")",
"else",
":",
"s1",
"=",
"doc",
".",
"toprettyxml",
"(",
"\" \"",
")",
"return",
"s1"
] | 34.538462 | 15.307692 |
def update_ip_rule(self, ip, mac):
"""Update a rule associated with given ip and mac."""
rule_no = self._find_rule_no(mac)
chain = self._find_chain_name(mac)
if not rule_no or not chain:
LOG.error('Failed to update ip rule for %(ip)s %(mac)s',
{'ip': ip, 'mac': mac})
return
update_cmd = ['iptables', '-R', '%s' % chain, '%s' % rule_no,
'-s', '%s/32' % ip, '-m', 'mac', '--mac-source',
'%s' % mac, '-j', 'RETURN']
LOG.debug('Execute command: %s', update_cmd)
dsl.execute(update_cmd, self._root_helper, log_output=False) | [
"def",
"update_ip_rule",
"(",
"self",
",",
"ip",
",",
"mac",
")",
":",
"rule_no",
"=",
"self",
".",
"_find_rule_no",
"(",
"mac",
")",
"chain",
"=",
"self",
".",
"_find_chain_name",
"(",
"mac",
")",
"if",
"not",
"rule_no",
"or",
"not",
"chain",
":",
"LOG",
".",
"error",
"(",
"'Failed to update ip rule for %(ip)s %(mac)s'",
",",
"{",
"'ip'",
":",
"ip",
",",
"'mac'",
":",
"mac",
"}",
")",
"return",
"update_cmd",
"=",
"[",
"'iptables'",
",",
"'-R'",
",",
"'%s'",
"%",
"chain",
",",
"'%s'",
"%",
"rule_no",
",",
"'-s'",
",",
"'%s/32'",
"%",
"ip",
",",
"'-m'",
",",
"'mac'",
",",
"'--mac-source'",
",",
"'%s'",
"%",
"mac",
",",
"'-j'",
",",
"'RETURN'",
"]",
"LOG",
".",
"debug",
"(",
"'Execute command: %s'",
",",
"update_cmd",
")",
"dsl",
".",
"execute",
"(",
"update_cmd",
",",
"self",
".",
"_root_helper",
",",
"log_output",
"=",
"False",
")"
] | 43.533333 | 17.066667 |
def _eval_function_wrapper(func):
"""Decorate an eval function.
Note
----
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
Parameters
----------
func : callable
Expects a callable with following signatures:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and returns (eval_name->string, eval_result->float, is_bigger_better->bool):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
weight : array-like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func : callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
dataset : Dataset
The training set from which the labels will be extracted using ``dataset.get_label()``.
"""
def inner(preds, dataset):
"""Call passed function with appropriate arguments."""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner | [
"def",
"_eval_function_wrapper",
"(",
"func",
")",
":",
"def",
"inner",
"(",
"preds",
",",
"dataset",
")",
":",
"\"\"\"Call passed function with appropriate arguments.\"\"\"",
"labels",
"=",
"dataset",
".",
"get_label",
"(",
")",
"argc",
"=",
"argc_",
"(",
"func",
")",
"if",
"argc",
"==",
"2",
":",
"return",
"func",
"(",
"labels",
",",
"preds",
")",
"elif",
"argc",
"==",
"3",
":",
"return",
"func",
"(",
"labels",
",",
"preds",
",",
"dataset",
".",
"get_weight",
"(",
")",
")",
"elif",
"argc",
"==",
"4",
":",
"return",
"func",
"(",
"labels",
",",
"preds",
",",
"dataset",
".",
"get_weight",
"(",
")",
",",
"dataset",
".",
"get_group",
"(",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Self-defined eval function should have 2, 3 or 4 arguments, got %d\"",
"%",
"argc",
")",
"return",
"inner"
] | 39.54 | 23.7 |
def get_boundaries(bounddict_file, slitlet_number):
"""Read the bounddict json file and return the polynomial boundaries.
Parameters
----------
bounddict_file : file handler
File containing the bounddict JSON data.
slitlet_number : int
Number of slitlet.
Returns
-------
pol_lower_boundary : numpy polynomial
Polynomial defining the lower boundary of the slitlet.
pol_upper_boundary : numpy polynomial
Polynomial defining the upper boundary of the slitlet.
xmin_lower : float
Minimum abscissae for the lower boundary.
xmax_lower : float
Maximum abscissae for the lower boundary.
xmin_upper : float
Minimum abscissae for the upper boundary.
xmax_upper : float
Maximum abscissae for the upper boundary.
csu_bar_slit_center : float
CSU bar slit center (in mm)
"""
bounddict = json.loads(open(bounddict_file.name).read())
# return values in case the requested slitlet number is not defined
pol_lower_boundary = None
pol_upper_boundary = None
xmin_lower = None
xmax_lower = None
xmin_upper = None
xmax_upper = None
csu_bar_slit_center = None
# search the slitlet number in bounddict
slitlet_label = "slitlet" + str(slitlet_number).zfill(2)
if slitlet_label in bounddict['contents'].keys():
list_date_obs = list(bounddict['contents'][slitlet_label].keys())
list_date_obs.sort()
num_date_obs = len(list_date_obs)
if num_date_obs == 1:
date_obs = list_date_obs[0]
tmp_dict = bounddict['contents'][slitlet_label][date_obs]
pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower'])
pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper'])
xmin_lower = tmp_dict['boundary_xmin_lower']
xmax_lower = tmp_dict['boundary_xmax_lower']
xmin_upper = tmp_dict['boundary_xmin_upper']
xmax_upper = tmp_dict['boundary_xmax_upper']
csu_bar_slit_center = tmp_dict['csu_bar_slit_center']
else:
raise ValueError("num_date_obs =", num_date_obs,
" (must be 1)")
else:
print("WARNING: slitlet number " + str(slitlet_number) +
" is not available in " + bounddict_file.name)
# return result
return pol_lower_boundary, pol_upper_boundary, \
xmin_lower, xmax_lower, xmin_upper, xmax_upper, \
csu_bar_slit_center | [
"def",
"get_boundaries",
"(",
"bounddict_file",
",",
"slitlet_number",
")",
":",
"bounddict",
"=",
"json",
".",
"loads",
"(",
"open",
"(",
"bounddict_file",
".",
"name",
")",
".",
"read",
"(",
")",
")",
"# return values in case the requested slitlet number is not defined",
"pol_lower_boundary",
"=",
"None",
"pol_upper_boundary",
"=",
"None",
"xmin_lower",
"=",
"None",
"xmax_lower",
"=",
"None",
"xmin_upper",
"=",
"None",
"xmax_upper",
"=",
"None",
"csu_bar_slit_center",
"=",
"None",
"# search the slitlet number in bounddict",
"slitlet_label",
"=",
"\"slitlet\"",
"+",
"str",
"(",
"slitlet_number",
")",
".",
"zfill",
"(",
"2",
")",
"if",
"slitlet_label",
"in",
"bounddict",
"[",
"'contents'",
"]",
".",
"keys",
"(",
")",
":",
"list_date_obs",
"=",
"list",
"(",
"bounddict",
"[",
"'contents'",
"]",
"[",
"slitlet_label",
"]",
".",
"keys",
"(",
")",
")",
"list_date_obs",
".",
"sort",
"(",
")",
"num_date_obs",
"=",
"len",
"(",
"list_date_obs",
")",
"if",
"num_date_obs",
"==",
"1",
":",
"date_obs",
"=",
"list_date_obs",
"[",
"0",
"]",
"tmp_dict",
"=",
"bounddict",
"[",
"'contents'",
"]",
"[",
"slitlet_label",
"]",
"[",
"date_obs",
"]",
"pol_lower_boundary",
"=",
"Polynomial",
"(",
"tmp_dict",
"[",
"'boundary_coef_lower'",
"]",
")",
"pol_upper_boundary",
"=",
"Polynomial",
"(",
"tmp_dict",
"[",
"'boundary_coef_upper'",
"]",
")",
"xmin_lower",
"=",
"tmp_dict",
"[",
"'boundary_xmin_lower'",
"]",
"xmax_lower",
"=",
"tmp_dict",
"[",
"'boundary_xmax_lower'",
"]",
"xmin_upper",
"=",
"tmp_dict",
"[",
"'boundary_xmin_upper'",
"]",
"xmax_upper",
"=",
"tmp_dict",
"[",
"'boundary_xmax_upper'",
"]",
"csu_bar_slit_center",
"=",
"tmp_dict",
"[",
"'csu_bar_slit_center'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"num_date_obs =\"",
",",
"num_date_obs",
",",
"\" (must be 1)\"",
")",
"else",
":",
"print",
"(",
"\"WARNING: slitlet number \"",
"+",
"str",
"(",
"slitlet_number",
")",
"+",
"\" is not available in \"",
"+",
"bounddict_file",
".",
"name",
")",
"# return result",
"return",
"pol_lower_boundary",
",",
"pol_upper_boundary",
",",
"xmin_lower",
",",
"xmax_lower",
",",
"xmin_upper",
",",
"xmax_upper",
",",
"csu_bar_slit_center"
] | 36.701493 | 18.61194 |
def set_mappings(cls, index_name, doc_type, mappings):
""" set new mapped-items structure into cache """
cache.set(cls.get_cache_item_name(index_name, doc_type), mappings) | [
"def",
"set_mappings",
"(",
"cls",
",",
"index_name",
",",
"doc_type",
",",
"mappings",
")",
":",
"cache",
".",
"set",
"(",
"cls",
".",
"get_cache_item_name",
"(",
"index_name",
",",
"doc_type",
")",
",",
"mappings",
")"
] | 61.666667 | 16 |
def get_salt(request):
"""
return the user password salt.
If the user doesn't exist return a pseudo salt.
"""
try:
username = request.POST["username"]
except KeyError:
# log.error("No 'username' in POST data?!?")
return HttpResponseBadRequest()
try:
request.server_challenge = request.session[SERVER_CHALLENGE_KEY]
except KeyError as err:
# log.error("Can't get challenge from session: %s", err)
return HttpResponseBadRequest()
# log.debug("old challenge: %r", request.server_challenge)
send_pseudo_salt=True
form = UsernameForm(request, data=request.POST)
if form.is_valid():
send_pseudo_salt=False
user_profile = form.user_profile
init_pbkdf2_salt = user_profile.init_pbkdf2_salt
if not init_pbkdf2_salt:
# log.error("No init_pbkdf2_salt set in user profile!")
send_pseudo_salt=True
if len(init_pbkdf2_salt)!=app_settings.PBKDF2_SALT_LENGTH:
# log.error("Salt for user %r has wrong length: %r" % (request.POST["username"], init_pbkdf2_salt))
send_pseudo_salt=True
# else:
# log.error("Salt Form is not valid: %r", form.errors)
if send_pseudo_salt:
# log.debug("\nUse pseudo salt!!!")
init_pbkdf2_salt = crypt.get_pseudo_salt(app_settings.PBKDF2_SALT_LENGTH, username)
response = HttpResponse(init_pbkdf2_salt, content_type="text/plain")
if not send_pseudo_salt:
response.add_duration=True # collect duration time in @TimingAttackPreventer
# log.debug("\nsend init_pbkdf2_salt %r to client.", init_pbkdf2_salt)
return response | [
"def",
"get_salt",
"(",
"request",
")",
":",
"try",
":",
"username",
"=",
"request",
".",
"POST",
"[",
"\"username\"",
"]",
"except",
"KeyError",
":",
"# log.error(\"No 'username' in POST data?!?\")",
"return",
"HttpResponseBadRequest",
"(",
")",
"try",
":",
"request",
".",
"server_challenge",
"=",
"request",
".",
"session",
"[",
"SERVER_CHALLENGE_KEY",
"]",
"except",
"KeyError",
"as",
"err",
":",
"# log.error(\"Can't get challenge from session: %s\", err)",
"return",
"HttpResponseBadRequest",
"(",
")",
"# log.debug(\"old challenge: %r\", request.server_challenge)",
"send_pseudo_salt",
"=",
"True",
"form",
"=",
"UsernameForm",
"(",
"request",
",",
"data",
"=",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"send_pseudo_salt",
"=",
"False",
"user_profile",
"=",
"form",
".",
"user_profile",
"init_pbkdf2_salt",
"=",
"user_profile",
".",
"init_pbkdf2_salt",
"if",
"not",
"init_pbkdf2_salt",
":",
"# log.error(\"No init_pbkdf2_salt set in user profile!\")",
"send_pseudo_salt",
"=",
"True",
"if",
"len",
"(",
"init_pbkdf2_salt",
")",
"!=",
"app_settings",
".",
"PBKDF2_SALT_LENGTH",
":",
"# log.error(\"Salt for user %r has wrong length: %r\" % (request.POST[\"username\"], init_pbkdf2_salt))",
"send_pseudo_salt",
"=",
"True",
"# else:",
"# log.error(\"Salt Form is not valid: %r\", form.errors)",
"if",
"send_pseudo_salt",
":",
"# log.debug(\"\\nUse pseudo salt!!!\")",
"init_pbkdf2_salt",
"=",
"crypt",
".",
"get_pseudo_salt",
"(",
"app_settings",
".",
"PBKDF2_SALT_LENGTH",
",",
"username",
")",
"response",
"=",
"HttpResponse",
"(",
"init_pbkdf2_salt",
",",
"content_type",
"=",
"\"text/plain\"",
")",
"if",
"not",
"send_pseudo_salt",
":",
"response",
".",
"add_duration",
"=",
"True",
"# collect duration time in @TimingAttackPreventer",
"# log.debug(\"\\nsend init_pbkdf2_salt %r to client.\", init_pbkdf2_salt)",
"return",
"response"
] | 34.680851 | 22.680851 |
def upload(self, cmd: str, meta: dict):
"""Push the current state of the registry to Git."""
index = os.path.join(self.cached_repo, self.INDEX_FILE)
if os.path.exists(index):
os.remove(index)
self._log.info("Writing the new index.json ...")
with open(index, "w") as _out:
json.dump(self.contents, _out)
git.add(self.cached_repo, [index])
message = self.COMMIT_MESSAGES[cmd].format(**meta)
if self.signoff:
global_conf_path = os.path.expanduser("~/.gitconfig")
if os.path.exists(global_conf_path):
with open(global_conf_path, "br") as _in:
conf = ConfigFile.from_file(_in)
try:
name = conf.get(b"user", b"name").decode()
email = conf.get(b"user", b"email").decode()
message += self.DCO_MESSAGE.format(name=name, email=email)
except KeyError:
self._log.warning(
"Did not find name or email in %s, committing without DCO.",
global_conf_path)
else:
self._log.warning("Global git configuration file %s does not exist, "
"committing without DCO.", global_conf_path)
else:
self._log.info("Committing the index without DCO.")
git.commit(self.cached_repo, message=message)
self._log.info("Pushing the updated index ...")
# TODO: change when https://github.com/dulwich/dulwich/issues/631 gets addressed
git.push(self.cached_repo, self.remote_url, b"master")
if self._are_local_and_remote_heads_different():
self._log.error("Push has failed")
raise ValueError("Push has failed") | [
"def",
"upload",
"(",
"self",
",",
"cmd",
":",
"str",
",",
"meta",
":",
"dict",
")",
":",
"index",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cached_repo",
",",
"self",
".",
"INDEX_FILE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"index",
")",
":",
"os",
".",
"remove",
"(",
"index",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Writing the new index.json ...\"",
")",
"with",
"open",
"(",
"index",
",",
"\"w\"",
")",
"as",
"_out",
":",
"json",
".",
"dump",
"(",
"self",
".",
"contents",
",",
"_out",
")",
"git",
".",
"add",
"(",
"self",
".",
"cached_repo",
",",
"[",
"index",
"]",
")",
"message",
"=",
"self",
".",
"COMMIT_MESSAGES",
"[",
"cmd",
"]",
".",
"format",
"(",
"*",
"*",
"meta",
")",
"if",
"self",
".",
"signoff",
":",
"global_conf_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.gitconfig\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"global_conf_path",
")",
":",
"with",
"open",
"(",
"global_conf_path",
",",
"\"br\"",
")",
"as",
"_in",
":",
"conf",
"=",
"ConfigFile",
".",
"from_file",
"(",
"_in",
")",
"try",
":",
"name",
"=",
"conf",
".",
"get",
"(",
"b\"user\"",
",",
"b\"name\"",
")",
".",
"decode",
"(",
")",
"email",
"=",
"conf",
".",
"get",
"(",
"b\"user\"",
",",
"b\"email\"",
")",
".",
"decode",
"(",
")",
"message",
"+=",
"self",
".",
"DCO_MESSAGE",
".",
"format",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"email",
")",
"except",
"KeyError",
":",
"self",
".",
"_log",
".",
"warning",
"(",
"\"Did not find name or email in %s, committing without DCO.\"",
",",
"global_conf_path",
")",
"else",
":",
"self",
".",
"_log",
".",
"warning",
"(",
"\"Global git configuration file %s does not exist, \"",
"\"committing without DCO.\"",
",",
"global_conf_path",
")",
"else",
":",
"self",
".",
"_log",
".",
"info",
"(",
"\"Committing the index without DCO.\"",
")",
"git",
".",
"commit",
"(",
"self",
".",
"cached_repo",
",",
"message",
"=",
"message",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Pushing the updated index ...\"",
")",
"# TODO: change when https://github.com/dulwich/dulwich/issues/631 gets addressed",
"git",
".",
"push",
"(",
"self",
".",
"cached_repo",
",",
"self",
".",
"remote_url",
",",
"b\"master\"",
")",
"if",
"self",
".",
"_are_local_and_remote_heads_different",
"(",
")",
":",
"self",
".",
"_log",
".",
"error",
"(",
"\"Push has failed\"",
")",
"raise",
"ValueError",
"(",
"\"Push has failed\"",
")"
] | 51.971429 | 17.571429 |
def _get_request_url(self, instance, url):
"""
Get the request address, build with proxy if necessary
"""
parsed = urlparse(url)
_url = url
if not (parsed.netloc and parsed.scheme) and is_affirmative(instance.get('spark_proxy_enabled', False)):
master_address = self._get_master_address(instance)
_url = urljoin(master_address, parsed.path)
self.log.debug('Request URL returned: %s', _url)
return _url | [
"def",
"_get_request_url",
"(",
"self",
",",
"instance",
",",
"url",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"_url",
"=",
"url",
"if",
"not",
"(",
"parsed",
".",
"netloc",
"and",
"parsed",
".",
"scheme",
")",
"and",
"is_affirmative",
"(",
"instance",
".",
"get",
"(",
"'spark_proxy_enabled'",
",",
"False",
")",
")",
":",
"master_address",
"=",
"self",
".",
"_get_master_address",
"(",
"instance",
")",
"_url",
"=",
"urljoin",
"(",
"master_address",
",",
"parsed",
".",
"path",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Request URL returned: %s'",
",",
"_url",
")",
"return",
"_url"
] | 36.846154 | 21.769231 |
def run(self, args=None):
"""Applicatin starting point.
This will run the associated method/function/module or print a help
list if it's an unknown keyword or the syntax is incorrect.
Keyword arguments:
args -- Custom application arguments (default sys.argv)
"""
# TODO: Add tests to how command line arguments are passed in
raw_args = self.__parser.parse_args(args=args)
args = vars(raw_args)
cmd = args.pop('cmd')
if hasattr(cmd, '__call__'):
cmd(**args) | [
"def",
"run",
"(",
"self",
",",
"args",
"=",
"None",
")",
":",
"# TODO: Add tests to how command line arguments are passed in",
"raw_args",
"=",
"self",
".",
"__parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"args",
"=",
"vars",
"(",
"raw_args",
")",
"cmd",
"=",
"args",
".",
"pop",
"(",
"'cmd'",
")",
"if",
"hasattr",
"(",
"cmd",
",",
"'__call__'",
")",
":",
"cmd",
"(",
"*",
"*",
"args",
")"
] | 34 | 20 |
def _note_local_option(self, option, state):
"""Record the status of local negotiated Telnet options."""
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].local_option = state | [
"def",
"_note_local_option",
"(",
"self",
",",
"option",
",",
"state",
")",
":",
"if",
"not",
"self",
".",
"telnet_opt_dict",
".",
"has_key",
"(",
"option",
")",
":",
"self",
".",
"telnet_opt_dict",
"[",
"option",
"]",
"=",
"TelnetOption",
"(",
")",
"self",
".",
"telnet_opt_dict",
"[",
"option",
"]",
".",
"local_option",
"=",
"state"
] | 55.4 | 10 |
def normalize_sort(sort=None):
"""
CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE
"""
if not sort:
return Null
output = FlatList()
for s in listwrap(sort):
if is_text(s) or mo_math.is_integer(s):
output.append({"value": s, "sort": 1})
elif not s.field and not s.value and s.sort==None:
#ASSUME {name: sort} FORM
for n, v in s.items():
output.append({"value": n, "sort": sort_direction[v]})
else:
output.append({"value": coalesce(s.field, s.value), "sort": coalesce(sort_direction[s.sort], 1)})
return wrap(output) | [
"def",
"normalize_sort",
"(",
"sort",
"=",
"None",
")",
":",
"if",
"not",
"sort",
":",
"return",
"Null",
"output",
"=",
"FlatList",
"(",
")",
"for",
"s",
"in",
"listwrap",
"(",
"sort",
")",
":",
"if",
"is_text",
"(",
"s",
")",
"or",
"mo_math",
".",
"is_integer",
"(",
"s",
")",
":",
"output",
".",
"append",
"(",
"{",
"\"value\"",
":",
"s",
",",
"\"sort\"",
":",
"1",
"}",
")",
"elif",
"not",
"s",
".",
"field",
"and",
"not",
"s",
".",
"value",
"and",
"s",
".",
"sort",
"==",
"None",
":",
"#ASSUME {name: sort} FORM",
"for",
"n",
",",
"v",
"in",
"s",
".",
"items",
"(",
")",
":",
"output",
".",
"append",
"(",
"{",
"\"value\"",
":",
"n",
",",
"\"sort\"",
":",
"sort_direction",
"[",
"v",
"]",
"}",
")",
"else",
":",
"output",
".",
"append",
"(",
"{",
"\"value\"",
":",
"coalesce",
"(",
"s",
".",
"field",
",",
"s",
".",
"value",
")",
",",
"\"sort\"",
":",
"coalesce",
"(",
"sort_direction",
"[",
"s",
".",
"sort",
"]",
",",
"1",
")",
"}",
")",
"return",
"wrap",
"(",
"output",
")"
] | 33.263158 | 19.578947 |
def enable_debug(self):
"""Open the debug interface on the connected device."""
if not self.connected:
raise HardwareError("Cannot enable debug if we are not in a connected state")
self._loop.run_coroutine(self.adapter.open_interface(0, 'debug')) | [
"def",
"enable_debug",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"raise",
"HardwareError",
"(",
"\"Cannot enable debug if we are not in a connected state\"",
")",
"self",
".",
"_loop",
".",
"run_coroutine",
"(",
"self",
".",
"adapter",
".",
"open_interface",
"(",
"0",
",",
"'debug'",
")",
")"
] | 39.714286 | 27 |
def calc_conf_intervals(self,
conf_percentage,
interval_type='all',
init_vals=None,
epsilon=abc.EPSILON,
**fit_kwargs):
"""
Calculates percentile, bias-corrected and accelerated, and approximate
bootstrap confidence intervals.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
interval_type : str in {'all', 'pi', 'bca', 'abc'}, optional.
Denotes the type of confidence intervals that should be calculated.
'all' results in all types of confidence intervals being
calculated. 'pi' means 'percentile intervals', 'bca' means
'bias-corrected and accelerated', and 'abc' means 'approximate
bootstrap confidence' intervals. Default == 'all'.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions for the 'abc' intervals. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle` when calculating the 'abc' intervals.
Returns
-------
None. Will store the confidence intervals on their respective model
objects: `self.percentile_interval`, `self.bca_interval`,
`self.abc_interval`, or all of these objects.
"""
if interval_type == 'pi':
self.calc_percentile_interval(conf_percentage)
elif interval_type == 'bca':
self.calc_bca_interval(conf_percentage)
elif interval_type == 'abc':
self.calc_abc_interval(conf_percentage,
init_vals,
epsilon=epsilon,
**fit_kwargs)
elif interval_type == 'all':
print("Calculating Percentile Confidence Intervals")
sys.stdout.flush()
self.calc_percentile_interval(conf_percentage)
print("Calculating BCa Confidence Intervals")
sys.stdout.flush()
self.calc_bca_interval(conf_percentage)
# Note we don't print a user message here since that is done in
# self.calc_abc_interval().
self.calc_abc_interval(conf_percentage,
init_vals,
epsilon=epsilon,
**fit_kwargs)
# Get the alpha % for the given confidence percentage.
alpha = bc.get_alpha_from_conf_percentage(conf_percentage)
# Get lists of the interval type names and the endpoint names
interval_type_names = ['percentile_interval',
'BCa_interval',
'ABC_interval']
endpoint_names = ['{:.3g}%'.format(alpha / 2.0),
'{:.3g}%'.format(100 - alpha / 2.0)]
# Create the column names for the dataframe of confidence intervals
multi_index_names =\
list(itertools.product(interval_type_names, endpoint_names))
df_column_index = pd.MultiIndex.from_tuples(multi_index_names)
# Create the dataframe containing all confidence intervals
self.all_intervals = pd.concat([self.percentile_interval,
self.bca_interval,
self.abc_interval],
axis=1,
ignore_index=True)
# Store the column names for the combined confidence intervals
self.all_intervals.columns = df_column_index
self.all_intervals.index = self.mle_params.index
else:
msg =\
"interval_type MUST be in `['pi', 'bca', 'abc', 'all']`"
raise ValueError(msg)
return None | [
"def",
"calc_conf_intervals",
"(",
"self",
",",
"conf_percentage",
",",
"interval_type",
"=",
"'all'",
",",
"init_vals",
"=",
"None",
",",
"epsilon",
"=",
"abc",
".",
"EPSILON",
",",
"*",
"*",
"fit_kwargs",
")",
":",
"if",
"interval_type",
"==",
"'pi'",
":",
"self",
".",
"calc_percentile_interval",
"(",
"conf_percentage",
")",
"elif",
"interval_type",
"==",
"'bca'",
":",
"self",
".",
"calc_bca_interval",
"(",
"conf_percentage",
")",
"elif",
"interval_type",
"==",
"'abc'",
":",
"self",
".",
"calc_abc_interval",
"(",
"conf_percentage",
",",
"init_vals",
",",
"epsilon",
"=",
"epsilon",
",",
"*",
"*",
"fit_kwargs",
")",
"elif",
"interval_type",
"==",
"'all'",
":",
"print",
"(",
"\"Calculating Percentile Confidence Intervals\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"self",
".",
"calc_percentile_interval",
"(",
"conf_percentage",
")",
"print",
"(",
"\"Calculating BCa Confidence Intervals\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"self",
".",
"calc_bca_interval",
"(",
"conf_percentage",
")",
"# Note we don't print a user message here since that is done in",
"# self.calc_abc_interval().",
"self",
".",
"calc_abc_interval",
"(",
"conf_percentage",
",",
"init_vals",
",",
"epsilon",
"=",
"epsilon",
",",
"*",
"*",
"fit_kwargs",
")",
"# Get the alpha % for the given confidence percentage.",
"alpha",
"=",
"bc",
".",
"get_alpha_from_conf_percentage",
"(",
"conf_percentage",
")",
"# Get lists of the interval type names and the endpoint names",
"interval_type_names",
"=",
"[",
"'percentile_interval'",
",",
"'BCa_interval'",
",",
"'ABC_interval'",
"]",
"endpoint_names",
"=",
"[",
"'{:.3g}%'",
".",
"format",
"(",
"alpha",
"/",
"2.0",
")",
",",
"'{:.3g}%'",
".",
"format",
"(",
"100",
"-",
"alpha",
"/",
"2.0",
")",
"]",
"# Create the column names for the dataframe of confidence intervals",
"multi_index_names",
"=",
"list",
"(",
"itertools",
".",
"product",
"(",
"interval_type_names",
",",
"endpoint_names",
")",
")",
"df_column_index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"multi_index_names",
")",
"# Create the dataframe containing all confidence intervals",
"self",
".",
"all_intervals",
"=",
"pd",
".",
"concat",
"(",
"[",
"self",
".",
"percentile_interval",
",",
"self",
".",
"bca_interval",
",",
"self",
".",
"abc_interval",
"]",
",",
"axis",
"=",
"1",
",",
"ignore_index",
"=",
"True",
")",
"# Store the column names for the combined confidence intervals",
"self",
".",
"all_intervals",
".",
"columns",
"=",
"df_column_index",
"self",
".",
"all_intervals",
".",
"index",
"=",
"self",
".",
"mle_params",
".",
"index",
"else",
":",
"msg",
"=",
"\"interval_type MUST be in `['pi', 'bca', 'abc', 'all']`\"",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"None"
] | 50.644444 | 20.177778 |
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile) | [
"def",
"_proc_member",
"(",
"self",
",",
"tarfile",
")",
":",
"if",
"self",
".",
"type",
"in",
"(",
"GNUTYPE_LONGNAME",
",",
"GNUTYPE_LONGLINK",
")",
":",
"return",
"self",
".",
"_proc_gnulong",
"(",
"tarfile",
")",
"elif",
"self",
".",
"type",
"==",
"GNUTYPE_SPARSE",
":",
"return",
"self",
".",
"_proc_sparse",
"(",
"tarfile",
")",
"elif",
"self",
".",
"type",
"in",
"(",
"XHDTYPE",
",",
"XGLTYPE",
",",
"SOLARIS_XHDTYPE",
")",
":",
"return",
"self",
".",
"_proc_pax",
"(",
"tarfile",
")",
"else",
":",
"return",
"self",
".",
"_proc_builtin",
"(",
"tarfile",
")"
] | 40.75 | 8.833333 |
def _put(self, rtracker):
"""
Put a resource back in the queue.
:param rtracker: A resource.
:type rtracker: :class:`_ResourceTracker`
:raises PoolFullError: If pool is full.
:raises UnknownResourceError: If resource can't be found.
"""
with self._lock:
if self._available < self.capacity:
for i in self._unavailable_range():
if self._reference_queue[i] is rtracker:
# i retains its value and will be used to swap with
# first "empty" space in queue.
break
else:
raise UnknownResourceError
j = self._resource_end
rq = self._reference_queue
rq[i], rq[j] = rq[j], rq[i]
self._resource_end = (self._resource_end + 1) % self.maxsize
self._available += 1
self._not_empty.notify()
else:
raise PoolFullError | [
"def",
"_put",
"(",
"self",
",",
"rtracker",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"_available",
"<",
"self",
".",
"capacity",
":",
"for",
"i",
"in",
"self",
".",
"_unavailable_range",
"(",
")",
":",
"if",
"self",
".",
"_reference_queue",
"[",
"i",
"]",
"is",
"rtracker",
":",
"# i retains its value and will be used to swap with",
"# first \"empty\" space in queue.",
"break",
"else",
":",
"raise",
"UnknownResourceError",
"j",
"=",
"self",
".",
"_resource_end",
"rq",
"=",
"self",
".",
"_reference_queue",
"rq",
"[",
"i",
"]",
",",
"rq",
"[",
"j",
"]",
"=",
"rq",
"[",
"j",
"]",
",",
"rq",
"[",
"i",
"]",
"self",
".",
"_resource_end",
"=",
"(",
"self",
".",
"_resource_end",
"+",
"1",
")",
"%",
"self",
".",
"maxsize",
"self",
".",
"_available",
"+=",
"1",
"self",
".",
"_not_empty",
".",
"notify",
"(",
")",
"else",
":",
"raise",
"PoolFullError"
] | 34 | 15.866667 |
def cmd_link_ports(self):
'''show available ports'''
ports = mavutil.auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
for p in ports:
print("%s : %s : %s" % (p.device, p.description, p.hwid)) | [
"def",
"cmd_link_ports",
"(",
"self",
")",
":",
"ports",
"=",
"mavutil",
".",
"auto_detect_serial",
"(",
"preferred_list",
"=",
"[",
"'*FTDI*'",
",",
"\"*Arduino_Mega_2560*\"",
",",
"\"*3D_Robotics*\"",
",",
"\"*USB_to_UART*\"",
",",
"'*PX4*'",
",",
"'*FMU*'",
"]",
")",
"for",
"p",
"in",
"ports",
":",
"print",
"(",
"\"%s : %s : %s\"",
"%",
"(",
"p",
".",
"device",
",",
"p",
".",
"description",
",",
"p",
".",
"hwid",
")",
")"
] | 58.8 | 34 |
def generate(env):
"""Add Builders and construction variables for qt to an Environment."""
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
env.SetDefault(QTDIR = _detect(env),
QT_BINPATH = os.path.join('$QTDIR', 'bin'),
QT_CPPPATH = os.path.join('$QTDIR', 'include'),
QT_LIBPATH = os.path.join('$QTDIR', 'lib'),
QT_MOC = os.path.join('$QT_BINPATH','moc'),
QT_UIC = os.path.join('$QT_BINPATH','uic'),
QT_LIB = 'qt', # may be set to qt-mt
QT_AUTOSCAN = 1, # scan for moc'able sources
# Some QT specific flags. I don't expect someone wants to
# manipulate those ...
QT_UICIMPLFLAGS = CLVar(''),
QT_UICDECLFLAGS = CLVar(''),
QT_MOCFROMHFLAGS = CLVar(''),
QT_MOCFROMCXXFLAGS = CLVar('-i'),
# suffixes/prefixes for the headers / sources to generate
QT_UICDECLPREFIX = '',
QT_UICDECLSUFFIX = '.h',
QT_UICIMPLPREFIX = 'uic_',
QT_UICIMPLSUFFIX = '$CXXFILESUFFIX',
QT_MOCHPREFIX = 'moc_',
QT_MOCHSUFFIX = '$CXXFILESUFFIX',
QT_MOCCXXPREFIX = '',
QT_MOCCXXSUFFIX = '.moc',
QT_UISUFFIX = '.ui',
# Commands for the qt support ...
# command to generate header, implementation and moc-file
# from a .ui file
QT_UICCOM = [
CLVar('$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE'),
CLVar('$QT_UIC $QT_UICIMPLFLAGS -impl ${TARGETS[0].file} '
'-o ${TARGETS[1]} $SOURCE'),
CLVar('$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[2]} ${TARGETS[0]}')],
# command to generate meta object information for a class
# declarated in a header
QT_MOCFROMHCOM = (
'$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[0]} $SOURCE'),
# command to generate meta object information for a class
# declarated in a cpp file
QT_MOCFROMCXXCOM = [
CLVar('$QT_MOC $QT_MOCFROMCXXFLAGS -o ${TARGETS[0]} $SOURCE'),
Action(checkMocIncluded,None)])
# ... and the corresponding builders
uicBld = Builder(action=SCons.Action.Action('$QT_UICCOM', '$QT_UICCOMSTR'),
emitter=uicEmitter,
src_suffix='$QT_UISUFFIX',
suffix='$QT_UICDECLSUFFIX',
prefix='$QT_UICDECLPREFIX',
source_scanner=uicScanner)
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.Action('$QT_MOCFROMHCOM', '$QT_MOCFROMHCOMSTR')
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT_MOCHPREFIX'
mocBld.suffix[h] = '$QT_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.Action('$QT_MOCFROMCXXCOM', '$QT_MOCFROMCXXCOMSTR')
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT_MOCCXXSUFFIX'
# register the builders
env['BUILDERS']['Uic'] = uicBld
env['BUILDERS']['Moc'] = mocBld
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.add_src_builder('Uic')
shared_obj.add_src_builder('Uic')
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LDMODULEEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
# Of course, we need to link against the qt libraries
CPPPATH=["$QT_CPPPATH"],
LIBPATH=["$QT_LIBPATH"],
LIBS=['$QT_LIB']) | [
"def",
"generate",
"(",
"env",
")",
":",
"CLVar",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"Action",
"=",
"SCons",
".",
"Action",
".",
"Action",
"Builder",
"=",
"SCons",
".",
"Builder",
".",
"Builder",
"env",
".",
"SetDefault",
"(",
"QTDIR",
"=",
"_detect",
"(",
"env",
")",
",",
"QT_BINPATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$QTDIR'",
",",
"'bin'",
")",
",",
"QT_CPPPATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$QTDIR'",
",",
"'include'",
")",
",",
"QT_LIBPATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$QTDIR'",
",",
"'lib'",
")",
",",
"QT_MOC",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$QT_BINPATH'",
",",
"'moc'",
")",
",",
"QT_UIC",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'$QT_BINPATH'",
",",
"'uic'",
")",
",",
"QT_LIB",
"=",
"'qt'",
",",
"# may be set to qt-mt",
"QT_AUTOSCAN",
"=",
"1",
",",
"# scan for moc'able sources",
"# Some QT specific flags. I don't expect someone wants to",
"# manipulate those ...",
"QT_UICIMPLFLAGS",
"=",
"CLVar",
"(",
"''",
")",
",",
"QT_UICDECLFLAGS",
"=",
"CLVar",
"(",
"''",
")",
",",
"QT_MOCFROMHFLAGS",
"=",
"CLVar",
"(",
"''",
")",
",",
"QT_MOCFROMCXXFLAGS",
"=",
"CLVar",
"(",
"'-i'",
")",
",",
"# suffixes/prefixes for the headers / sources to generate",
"QT_UICDECLPREFIX",
"=",
"''",
",",
"QT_UICDECLSUFFIX",
"=",
"'.h'",
",",
"QT_UICIMPLPREFIX",
"=",
"'uic_'",
",",
"QT_UICIMPLSUFFIX",
"=",
"'$CXXFILESUFFIX'",
",",
"QT_MOCHPREFIX",
"=",
"'moc_'",
",",
"QT_MOCHSUFFIX",
"=",
"'$CXXFILESUFFIX'",
",",
"QT_MOCCXXPREFIX",
"=",
"''",
",",
"QT_MOCCXXSUFFIX",
"=",
"'.moc'",
",",
"QT_UISUFFIX",
"=",
"'.ui'",
",",
"# Commands for the qt support ...",
"# command to generate header, implementation and moc-file",
"# from a .ui file",
"QT_UICCOM",
"=",
"[",
"CLVar",
"(",
"'$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE'",
")",
",",
"CLVar",
"(",
"'$QT_UIC $QT_UICIMPLFLAGS -impl ${TARGETS[0].file} '",
"'-o ${TARGETS[1]} $SOURCE'",
")",
",",
"CLVar",
"(",
"'$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[2]} ${TARGETS[0]}'",
")",
"]",
",",
"# command to generate meta object information for a class",
"# declarated in a header",
"QT_MOCFROMHCOM",
"=",
"(",
"'$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[0]} $SOURCE'",
")",
",",
"# command to generate meta object information for a class",
"# declarated in a cpp file",
"QT_MOCFROMCXXCOM",
"=",
"[",
"CLVar",
"(",
"'$QT_MOC $QT_MOCFROMCXXFLAGS -o ${TARGETS[0]} $SOURCE'",
")",
",",
"Action",
"(",
"checkMocIncluded",
",",
"None",
")",
"]",
")",
"# ... and the corresponding builders",
"uicBld",
"=",
"Builder",
"(",
"action",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"'$QT_UICCOM'",
",",
"'$QT_UICCOMSTR'",
")",
",",
"emitter",
"=",
"uicEmitter",
",",
"src_suffix",
"=",
"'$QT_UISUFFIX'",
",",
"suffix",
"=",
"'$QT_UICDECLSUFFIX'",
",",
"prefix",
"=",
"'$QT_UICDECLPREFIX'",
",",
"source_scanner",
"=",
"uicScanner",
")",
"mocBld",
"=",
"Builder",
"(",
"action",
"=",
"{",
"}",
",",
"prefix",
"=",
"{",
"}",
",",
"suffix",
"=",
"{",
"}",
")",
"for",
"h",
"in",
"header_extensions",
":",
"act",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"'$QT_MOCFROMHCOM'",
",",
"'$QT_MOCFROMHCOMSTR'",
")",
"mocBld",
".",
"add_action",
"(",
"h",
",",
"act",
")",
"mocBld",
".",
"prefix",
"[",
"h",
"]",
"=",
"'$QT_MOCHPREFIX'",
"mocBld",
".",
"suffix",
"[",
"h",
"]",
"=",
"'$QT_MOCHSUFFIX'",
"for",
"cxx",
"in",
"cxx_suffixes",
":",
"act",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"'$QT_MOCFROMCXXCOM'",
",",
"'$QT_MOCFROMCXXCOMSTR'",
")",
"mocBld",
".",
"add_action",
"(",
"cxx",
",",
"act",
")",
"mocBld",
".",
"prefix",
"[",
"cxx",
"]",
"=",
"'$QT_MOCCXXPREFIX'",
"mocBld",
".",
"suffix",
"[",
"cxx",
"]",
"=",
"'$QT_MOCCXXSUFFIX'",
"# register the builders ",
"env",
"[",
"'BUILDERS'",
"]",
"[",
"'Uic'",
"]",
"=",
"uicBld",
"env",
"[",
"'BUILDERS'",
"]",
"[",
"'Moc'",
"]",
"=",
"mocBld",
"static_obj",
",",
"shared_obj",
"=",
"SCons",
".",
"Tool",
".",
"createObjBuilders",
"(",
"env",
")",
"static_obj",
".",
"add_src_builder",
"(",
"'Uic'",
")",
"shared_obj",
".",
"add_src_builder",
"(",
"'Uic'",
")",
"# We use the emitters of Program / StaticLibrary / SharedLibrary",
"# to scan for moc'able files",
"# We can't refer to the builders directly, we have to fetch them",
"# as Environment attributes because that sets them up to be called",
"# correctly later by our emitter.",
"env",
".",
"AppendUnique",
"(",
"PROGEMITTER",
"=",
"[",
"AutomocStatic",
"]",
",",
"SHLIBEMITTER",
"=",
"[",
"AutomocShared",
"]",
",",
"LDMODULEEMITTER",
"=",
"[",
"AutomocShared",
"]",
",",
"LIBEMITTER",
"=",
"[",
"AutomocStatic",
"]",
",",
"# Of course, we need to link against the qt libraries",
"CPPPATH",
"=",
"[",
"\"$QT_CPPPATH\"",
"]",
",",
"LIBPATH",
"=",
"[",
"\"$QT_LIBPATH\"",
"]",
",",
"LIBS",
"=",
"[",
"'$QT_LIB'",
"]",
")"
] | 46.67033 | 16.351648 |
def _conform_pair(self, pair):
"""Force a given key/value pair into a certain form.
Override the _conform_key and _conform_value if you want to change
the mapping behaviour.
"""
pair = tuple(pair)
if len(pair) != 2:
raise ValueError('MultiMap element must have length 2')
return (self._conform_key(pair[0]), self._conform_value(pair[1])) | [
"def",
"_conform_pair",
"(",
"self",
",",
"pair",
")",
":",
"pair",
"=",
"tuple",
"(",
"pair",
")",
"if",
"len",
"(",
"pair",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'MultiMap element must have length 2'",
")",
"return",
"(",
"self",
".",
"_conform_key",
"(",
"pair",
"[",
"0",
"]",
")",
",",
"self",
".",
"_conform_value",
"(",
"pair",
"[",
"1",
"]",
")",
")"
] | 37.545455 | 18.727273 |
def counter(self, key, value, timestamp=None):
"""Set a counter value
If the inner key does not exist is is created
:param key: counter to update
:type key: str
:param value: counter value
:type value: float
:return: An alignak_stat brok if broks are enabled else None
"""
_min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0))
count += 1
_sum += value
if _min is None or value < _min:
_min = value
if _max is None or value > _max:
_max = value
self.stats[key] = (_min, _max, count, _sum)
# Manage local statsd part
if self.statsd_enabled and self.statsd_sock:
# beware, we are sending ms here, timer is in seconds
packet = '%s.%s.%s:%d|c' % (self.statsd_prefix, self.name, key, value)
packet = packet.encode('utf-8')
try:
self.statsd_sock.sendto(packet, self.statsd_addr)
except (socket.error, socket.gaierror):
pass
# cannot send? ok not a huge problem here and we cannot
# log because it will be far too verbose :p
# Manage Graphite part
if self.statsd_enabled and self.carbon:
self.send_to_graphite(key, value, timestamp=timestamp)
# Manage file part
if self.statsd_enabled and self.file_d:
if timestamp is None:
timestamp = int(time.time())
packet = self.line_fmt
if not self.date_fmt:
date = "%s" % timestamp
else:
date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt)
packet = packet.replace("#date#", date)
packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key))
packet = packet.replace("#value#", '%d' % value)
packet = packet.replace("#uom#", 'c')
try:
self.file_d.write(packet)
except IOError:
logger.warning("Could not write to the file: %s", packet)
if self.broks_enabled:
logger.debug("alignak stat brok: %s = %s", key, value)
if timestamp is None:
timestamp = int(time.time())
return Brok({'type': 'alignak_stat',
'data': {
'ts': timestamp,
'type': 'counter',
'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key),
'value': value,
'uom': 'c'
}})
return None | [
"def",
"counter",
"(",
"self",
",",
"key",
",",
"value",
",",
"timestamp",
"=",
"None",
")",
":",
"_min",
",",
"_max",
",",
"count",
",",
"_sum",
"=",
"self",
".",
"stats",
".",
"get",
"(",
"key",
",",
"(",
"None",
",",
"None",
",",
"0",
",",
"0",
")",
")",
"count",
"+=",
"1",
"_sum",
"+=",
"value",
"if",
"_min",
"is",
"None",
"or",
"value",
"<",
"_min",
":",
"_min",
"=",
"value",
"if",
"_max",
"is",
"None",
"or",
"value",
">",
"_max",
":",
"_max",
"=",
"value",
"self",
".",
"stats",
"[",
"key",
"]",
"=",
"(",
"_min",
",",
"_max",
",",
"count",
",",
"_sum",
")",
"# Manage local statsd part",
"if",
"self",
".",
"statsd_enabled",
"and",
"self",
".",
"statsd_sock",
":",
"# beware, we are sending ms here, timer is in seconds",
"packet",
"=",
"'%s.%s.%s:%d|c'",
"%",
"(",
"self",
".",
"statsd_prefix",
",",
"self",
".",
"name",
",",
"key",
",",
"value",
")",
"packet",
"=",
"packet",
".",
"encode",
"(",
"'utf-8'",
")",
"try",
":",
"self",
".",
"statsd_sock",
".",
"sendto",
"(",
"packet",
",",
"self",
".",
"statsd_addr",
")",
"except",
"(",
"socket",
".",
"error",
",",
"socket",
".",
"gaierror",
")",
":",
"pass",
"# cannot send? ok not a huge problem here and we cannot",
"# log because it will be far too verbose :p",
"# Manage Graphite part",
"if",
"self",
".",
"statsd_enabled",
"and",
"self",
".",
"carbon",
":",
"self",
".",
"send_to_graphite",
"(",
"key",
",",
"value",
",",
"timestamp",
"=",
"timestamp",
")",
"# Manage file part",
"if",
"self",
".",
"statsd_enabled",
"and",
"self",
".",
"file_d",
":",
"if",
"timestamp",
"is",
"None",
":",
"timestamp",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"packet",
"=",
"self",
".",
"line_fmt",
"if",
"not",
"self",
".",
"date_fmt",
":",
"date",
"=",
"\"%s\"",
"%",
"timestamp",
"else",
":",
"date",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")",
".",
"strftime",
"(",
"self",
".",
"date_fmt",
")",
"packet",
"=",
"packet",
".",
"replace",
"(",
"\"#date#\"",
",",
"date",
")",
"packet",
"=",
"packet",
".",
"replace",
"(",
"\"#counter#\"",
",",
"'%s.%s.%s'",
"%",
"(",
"self",
".",
"statsd_prefix",
",",
"self",
".",
"name",
",",
"key",
")",
")",
"packet",
"=",
"packet",
".",
"replace",
"(",
"\"#value#\"",
",",
"'%d'",
"%",
"value",
")",
"packet",
"=",
"packet",
".",
"replace",
"(",
"\"#uom#\"",
",",
"'c'",
")",
"try",
":",
"self",
".",
"file_d",
".",
"write",
"(",
"packet",
")",
"except",
"IOError",
":",
"logger",
".",
"warning",
"(",
"\"Could not write to the file: %s\"",
",",
"packet",
")",
"if",
"self",
".",
"broks_enabled",
":",
"logger",
".",
"debug",
"(",
"\"alignak stat brok: %s = %s\"",
",",
"key",
",",
"value",
")",
"if",
"timestamp",
"is",
"None",
":",
"timestamp",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"'alignak_stat'",
",",
"'data'",
":",
"{",
"'ts'",
":",
"timestamp",
",",
"'type'",
":",
"'counter'",
",",
"'metric'",
":",
"'%s.%s.%s'",
"%",
"(",
"self",
".",
"statsd_prefix",
",",
"self",
".",
"name",
",",
"key",
")",
",",
"'value'",
":",
"value",
",",
"'uom'",
":",
"'c'",
"}",
"}",
")",
"return",
"None"
] | 38.057143 | 18.185714 |
def get_all(self):
"""
Gets all captured counters.
:return: a list with counters.
"""
self._lock.acquire()
try:
return list(self._cache.values())
finally:
self._lock.release() | [
"def",
"get_all",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"return",
"list",
"(",
"self",
".",
"_cache",
".",
"values",
"(",
")",
")",
"finally",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | 22.363636 | 13.272727 |
def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None,
profile=None):
'''
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(
name=name,
uniqueId=unique_id,
description=description,
)
r['result'] = response['pipelineId']
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | [
"def",
"create_pipeline",
"(",
"name",
",",
"unique_id",
",",
"description",
"=",
"''",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"client",
"=",
"_get_client",
"(",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"r",
"=",
"{",
"}",
"try",
":",
"response",
"=",
"client",
".",
"create_pipeline",
"(",
"name",
"=",
"name",
",",
"uniqueId",
"=",
"unique_id",
",",
"description",
"=",
"description",
",",
")",
"r",
"[",
"'result'",
"]",
"=",
"response",
"[",
"'pipelineId'",
"]",
"except",
"(",
"botocore",
".",
"exceptions",
".",
"BotoCoreError",
",",
"botocore",
".",
"exceptions",
".",
"ClientError",
")",
"as",
"e",
":",
"r",
"[",
"'error'",
"]",
"=",
"six",
".",
"text_type",
"(",
"e",
")",
"return",
"r"
] | 30.521739 | 24.173913 |
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | [
"def",
"InitAgeCheck",
"(",
"self",
")",
":",
"age_df",
"=",
"self",
".",
"contribution",
".",
"tables",
"[",
"'ages'",
"]",
".",
"df",
"self",
".",
"panel",
"=",
"wx",
".",
"Panel",
"(",
"self",
",",
"style",
"=",
"wx",
".",
"SIMPLE_BORDER",
")",
"self",
".",
"grid_frame",
"=",
"grid_frame3",
".",
"GridFrame",
"(",
"self",
".",
"contribution",
",",
"self",
".",
"WD",
",",
"'ages'",
",",
"'ages'",
",",
"self",
".",
"panel",
",",
"main_frame",
"=",
"self",
".",
"main_frame",
")",
"self",
".",
"grid_frame",
".",
"exitButton",
".",
"SetLabel",
"(",
"'Save and continue'",
")",
"grid",
"=",
"self",
".",
"grid_frame",
".",
"grid",
"self",
".",
"grid_frame",
".",
"Bind",
"(",
"wx",
".",
"EVT_BUTTON",
",",
"lambda",
"event",
":",
"self",
".",
"onContinue",
"(",
"event",
",",
"grid",
",",
"None",
")",
",",
"self",
".",
"grid_frame",
".",
"exitButton",
")",
"# add back button",
"self",
".",
"backButton",
"=",
"wx",
".",
"Button",
"(",
"self",
".",
"grid_frame",
".",
"panel",
",",
"id",
"=",
"-",
"1",
",",
"label",
"=",
"'Back'",
",",
"name",
"=",
"'back_btn'",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_BUTTON",
",",
"lambda",
"event",
":",
"self",
".",
"onbackButton",
"(",
"event",
",",
"self",
".",
"InitLocCheck",
")",
",",
"self",
".",
"backButton",
")",
"self",
".",
"grid_frame",
".",
"main_btn_vbox",
".",
"Add",
"(",
"self",
".",
"backButton",
",",
"flag",
"=",
"wx",
".",
"ALL",
",",
"border",
"=",
"5",
")",
"# re-do fit",
"self",
".",
"grid_frame",
".",
"do_fit",
"(",
"None",
",",
"self",
".",
"min_size",
")",
"# center",
"self",
".",
"grid_frame",
".",
"Centre",
"(",
")",
"return"
] | 51.652174 | 21.782609 |
def count(data):
"""
count reads mapping to genes using featureCounts
http://subread.sourceforge.net
"""
in_bam = dd.get_work_bam(data) or dd.get_align_bam(data)
out_dir = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))
if dd.get_aligner(data) == "star":
out_dir = os.path.join(out_dir, "%s_%s" % (dd.get_sample_name(data), dd.get_aligner(data)))
sorted_bam = bam.sort(in_bam, dd.get_config(data), order="queryname", out_dir=safe_makedir(out_dir))
gtf_file = dd.get_gtf_file(data)
work_dir = dd.get_work_dir(data)
out_dir = os.path.join(work_dir, "htseq-count")
safe_makedir(out_dir)
count_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts"
summary_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts.summary"
if file_exists(count_file):
return count_file
featureCounts = config_utils.get_program("featureCounts", dd.get_config(data))
paired_flag = _paired_flag(in_bam)
strand_flag = _strand_flag(data)
filtered_bam = bam.filter_primary(sorted_bam, data)
cmd = ("{featureCounts} -a {gtf_file} -o {tx_count_file} -s {strand_flag} "
"{paired_flag} {filtered_bam}")
message = ("Count reads in {tx_count_file} mapping to {gtf_file} using "
"featureCounts")
with file_transaction(data, [count_file, summary_file]) as tx_files:
tx_count_file, tx_summary_file = tx_files
do.run(cmd.format(**locals()), message.format(**locals()))
fixed_count_file = _format_count_file(count_file, data)
fixed_summary_file = _change_sample_name(
summary_file, dd.get_sample_name(data), data=data)
shutil.move(fixed_count_file, count_file)
shutil.move(fixed_summary_file, summary_file)
return count_file | [
"def",
"count",
"(",
"data",
")",
":",
"in_bam",
"=",
"dd",
".",
"get_work_bam",
"(",
"data",
")",
"or",
"dd",
".",
"get_align_bam",
"(",
"data",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
",",
"\"align\"",
",",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"if",
"dd",
".",
"get_aligner",
"(",
"data",
")",
"==",
"\"star\"",
":",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s_%s\"",
"%",
"(",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
",",
"dd",
".",
"get_aligner",
"(",
"data",
")",
")",
")",
"sorted_bam",
"=",
"bam",
".",
"sort",
"(",
"in_bam",
",",
"dd",
".",
"get_config",
"(",
"data",
")",
",",
"order",
"=",
"\"queryname\"",
",",
"out_dir",
"=",
"safe_makedir",
"(",
"out_dir",
")",
")",
"gtf_file",
"=",
"dd",
".",
"get_gtf_file",
"(",
"data",
")",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"htseq-count\"",
")",
"safe_makedir",
"(",
"out_dir",
")",
"count_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"+",
"\".counts\"",
"summary_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"+",
"\".counts.summary\"",
"if",
"file_exists",
"(",
"count_file",
")",
":",
"return",
"count_file",
"featureCounts",
"=",
"config_utils",
".",
"get_program",
"(",
"\"featureCounts\"",
",",
"dd",
".",
"get_config",
"(",
"data",
")",
")",
"paired_flag",
"=",
"_paired_flag",
"(",
"in_bam",
")",
"strand_flag",
"=",
"_strand_flag",
"(",
"data",
")",
"filtered_bam",
"=",
"bam",
".",
"filter_primary",
"(",
"sorted_bam",
",",
"data",
")",
"cmd",
"=",
"(",
"\"{featureCounts} -a {gtf_file} -o {tx_count_file} -s {strand_flag} \"",
"\"{paired_flag} {filtered_bam}\"",
")",
"message",
"=",
"(",
"\"Count reads in {tx_count_file} mapping to {gtf_file} using \"",
"\"featureCounts\"",
")",
"with",
"file_transaction",
"(",
"data",
",",
"[",
"count_file",
",",
"summary_file",
"]",
")",
"as",
"tx_files",
":",
"tx_count_file",
",",
"tx_summary_file",
"=",
"tx_files",
"do",
".",
"run",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"message",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"fixed_count_file",
"=",
"_format_count_file",
"(",
"count_file",
",",
"data",
")",
"fixed_summary_file",
"=",
"_change_sample_name",
"(",
"summary_file",
",",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
",",
"data",
"=",
"data",
")",
"shutil",
".",
"move",
"(",
"fixed_count_file",
",",
"count_file",
")",
"shutil",
".",
"move",
"(",
"fixed_summary_file",
",",
"summary_file",
")",
"return",
"count_file"
] | 44.25 | 21.55 |
def report(usaf):
"""generate report for usaf base"""
fig = plt.figure()
ax = fig.add_subplot(111)
station_info = geo.station_info(usaf)
y = {}
for i in range(1991, 2011):
monthData = monthly(usaf, i)
t = sum(monthData)
y[i] = t
print t
tmy3tot = tmy3.total(usaf)
average = sum([v for k, v in y.items()])/20.
s = sorted(y.items(), key=lambda t: t[1])
o = sorted(y.items(), key=lambda t: t[0])
twohigh = s[-1][1] + s[-2][1]
twolow = s[0][1] + s[1][1]
mintol = 1-twolow/2./average
plustol = twohigh/2./average-1
txt = ""
txt += "%s\n" % station_info['Site Name']
txt += 'TMY3/hist: %s/' % int(round(tmy3tot))
txt += '%s\n' % int(round(average))
txt += "high/low av: %s/" % int(round(twohigh/2.))
txt += "%s\n" % int(round(twolow/2.))
txt += "+%s/-%s%% " % (round(plustol*100, 0), round(mintol*100, 0))
txt += "(-%s%% of TMY3)" % round((1-twolow/2./tmy3tot)*100, 0)
print txt
x = np.array([k for k, v in o])
y = np.array([v for k, v in o])
rx = x[1:]
ry = [(v + y[i+1])/2 for i, v in enumerate(y[:-1])]
fit = pylab.polyfit(x, y, 3)
fit_fn = pylab.poly1d(fit)
f = interp1d(x, y, kind='cubic')
f2 = interp1d(rx, ry, kind='cubic')
xnew = np.linspace(min(x), max(x), 200)
x2 = np.linspace(min(rx), max(rx), 200)
# ax.plot(x,y)
ax.plot(xnew, f(xnew), label="Annual GHI")
ax.plot(xnew, fit_fn(xnew), label='trendline')
ax.plot(x2, f2(x2), label='2 Year Ave')
ax.plot([min(x), max(x)], [tmy3tot, tmy3tot], linestyle='--')
leg = plt.legend(title=txt, loc=4, fancybox=True)
leg.get_frame().set_alpha(0.5)
# fig.text(min(x),min(y)-min(y)*.1,txt)
# fig.text(.1,.1,txt)
plt.tight_layout()
fig.savefig('%s_annual_GHI.pdf' % (usaf), format='pdf') | [
"def",
"report",
"(",
"usaf",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"station_info",
"=",
"geo",
".",
"station_info",
"(",
"usaf",
")",
"y",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1991",
",",
"2011",
")",
":",
"monthData",
"=",
"monthly",
"(",
"usaf",
",",
"i",
")",
"t",
"=",
"sum",
"(",
"monthData",
")",
"y",
"[",
"i",
"]",
"=",
"t",
"print",
"t",
"tmy3tot",
"=",
"tmy3",
".",
"total",
"(",
"usaf",
")",
"average",
"=",
"sum",
"(",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"y",
".",
"items",
"(",
")",
"]",
")",
"/",
"20.",
"s",
"=",
"sorted",
"(",
"y",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
")",
"o",
"=",
"sorted",
"(",
"y",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"0",
"]",
")",
"twohigh",
"=",
"s",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"+",
"s",
"[",
"-",
"2",
"]",
"[",
"1",
"]",
"twolow",
"=",
"s",
"[",
"0",
"]",
"[",
"1",
"]",
"+",
"s",
"[",
"1",
"]",
"[",
"1",
"]",
"mintol",
"=",
"1",
"-",
"twolow",
"/",
"2.",
"/",
"average",
"plustol",
"=",
"twohigh",
"/",
"2.",
"/",
"average",
"-",
"1",
"txt",
"=",
"\"\"",
"txt",
"+=",
"\"%s\\n\"",
"%",
"station_info",
"[",
"'Site Name'",
"]",
"txt",
"+=",
"'TMY3/hist: %s/'",
"%",
"int",
"(",
"round",
"(",
"tmy3tot",
")",
")",
"txt",
"+=",
"'%s\\n'",
"%",
"int",
"(",
"round",
"(",
"average",
")",
")",
"txt",
"+=",
"\"high/low av: %s/\"",
"%",
"int",
"(",
"round",
"(",
"twohigh",
"/",
"2.",
")",
")",
"txt",
"+=",
"\"%s\\n\"",
"%",
"int",
"(",
"round",
"(",
"twolow",
"/",
"2.",
")",
")",
"txt",
"+=",
"\"+%s/-%s%% \"",
"%",
"(",
"round",
"(",
"plustol",
"*",
"100",
",",
"0",
")",
",",
"round",
"(",
"mintol",
"*",
"100",
",",
"0",
")",
")",
"txt",
"+=",
"\"(-%s%% of TMY3)\"",
"%",
"round",
"(",
"(",
"1",
"-",
"twolow",
"/",
"2.",
"/",
"tmy3tot",
")",
"*",
"100",
",",
"0",
")",
"print",
"txt",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"o",
"]",
")",
"y",
"=",
"np",
".",
"array",
"(",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"o",
"]",
")",
"rx",
"=",
"x",
"[",
"1",
":",
"]",
"ry",
"=",
"[",
"(",
"v",
"+",
"y",
"[",
"i",
"+",
"1",
"]",
")",
"/",
"2",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"y",
"[",
":",
"-",
"1",
"]",
")",
"]",
"fit",
"=",
"pylab",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"3",
")",
"fit_fn",
"=",
"pylab",
".",
"poly1d",
"(",
"fit",
")",
"f",
"=",
"interp1d",
"(",
"x",
",",
"y",
",",
"kind",
"=",
"'cubic'",
")",
"f2",
"=",
"interp1d",
"(",
"rx",
",",
"ry",
",",
"kind",
"=",
"'cubic'",
")",
"xnew",
"=",
"np",
".",
"linspace",
"(",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
",",
"200",
")",
"x2",
"=",
"np",
".",
"linspace",
"(",
"min",
"(",
"rx",
")",
",",
"max",
"(",
"rx",
")",
",",
"200",
")",
"# ax.plot(x,y)",
"ax",
".",
"plot",
"(",
"xnew",
",",
"f",
"(",
"xnew",
")",
",",
"label",
"=",
"\"Annual GHI\"",
")",
"ax",
".",
"plot",
"(",
"xnew",
",",
"fit_fn",
"(",
"xnew",
")",
",",
"label",
"=",
"'trendline'",
")",
"ax",
".",
"plot",
"(",
"x2",
",",
"f2",
"(",
"x2",
")",
",",
"label",
"=",
"'2 Year Ave'",
")",
"ax",
".",
"plot",
"(",
"[",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
"]",
",",
"[",
"tmy3tot",
",",
"tmy3tot",
"]",
",",
"linestyle",
"=",
"'--'",
")",
"leg",
"=",
"plt",
".",
"legend",
"(",
"title",
"=",
"txt",
",",
"loc",
"=",
"4",
",",
"fancybox",
"=",
"True",
")",
"leg",
".",
"get_frame",
"(",
")",
".",
"set_alpha",
"(",
"0.5",
")",
"# fig.text(min(x),min(y)-min(y)*.1,txt)",
"# fig.text(.1,.1,txt)",
"plt",
".",
"tight_layout",
"(",
")",
"fig",
".",
"savefig",
"(",
"'%s_annual_GHI.pdf'",
"%",
"(",
"usaf",
")",
",",
"format",
"=",
"'pdf'",
")"
] | 35.78 | 12.4 |
def to_file(self, filename):
"""
Write the ANTsImage to file
Args
----
filename : string
filepath to which the image will be written
"""
filename = os.path.expanduser(filename)
libfn = utils.get_lib_fn('toFile%s'%self._libsuffix)
libfn(self.pointer, filename) | [
"def",
"to_file",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"libfn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'toFile%s'",
"%",
"self",
".",
"_libsuffix",
")",
"libfn",
"(",
"self",
".",
"pointer",
",",
"filename",
")"
] | 27.75 | 14.416667 |
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g | [
"def",
"gender",
"(",
"word",
",",
"pos",
"=",
"NOUN",
")",
":",
"w",
"=",
"word",
".",
"lower",
"(",
")",
"if",
"pos",
"==",
"NOUN",
":",
"# Default rules (baseline = 32%).",
"if",
"w",
".",
"endswith",
"(",
"gender_masculine",
")",
":",
"return",
"MASCULINE",
"if",
"w",
".",
"endswith",
"(",
"gender_feminine",
")",
":",
"return",
"FEMININE",
"if",
"w",
".",
"endswith",
"(",
"gender_neuter",
")",
":",
"return",
"NEUTER",
"# Majority vote.",
"for",
"g",
"in",
"gender_majority_vote",
":",
"if",
"w",
".",
"endswith",
"(",
"gender_majority_vote",
"[",
"g",
"]",
")",
":",
"return",
"g"
] | 33.882353 | 9.058824 |
def target_sequence(self):
# type: () -> SeqRecord
"""Get the target sequence in the vector.
The target sequence if the part of the plasmid that is not discarded
during the assembly (everything except the placeholder sequence).
"""
if self.cutter.is_3overhang():
start, end = self._match.span(2)[0], self._match.span(3)[1]
else:
start, end = self._match.span(1)[0], self._match.span(2)[1]
return add_as_source(self.record, (self.record << start)[end - start :]) | [
"def",
"target_sequence",
"(",
"self",
")",
":",
"# type: () -> SeqRecord",
"if",
"self",
".",
"cutter",
".",
"is_3overhang",
"(",
")",
":",
"start",
",",
"end",
"=",
"self",
".",
"_match",
".",
"span",
"(",
"2",
")",
"[",
"0",
"]",
",",
"self",
".",
"_match",
".",
"span",
"(",
"3",
")",
"[",
"1",
"]",
"else",
":",
"start",
",",
"end",
"=",
"self",
".",
"_match",
".",
"span",
"(",
"1",
")",
"[",
"0",
"]",
",",
"self",
".",
"_match",
".",
"span",
"(",
"2",
")",
"[",
"1",
"]",
"return",
"add_as_source",
"(",
"self",
".",
"record",
",",
"(",
"self",
".",
"record",
"<<",
"start",
")",
"[",
"end",
"-",
"start",
":",
"]",
")"
] | 44.916667 | 21.916667 |
def add_static_url(self, url_path, directory, endpoint=None, roles=None):
"""Add a new url rule for static files.
:param url_path: subpath from application static url path. No heading
or trailing slash.
:param directory: directory to serve content from.
:param endpoint: flask endpoint name for this url rule.
Example::
app.add_static_url('myplugin',
'/path/to/myplugin/resources',
endpoint='myplugin_static')
With default setup it will serve content from directory
`/path/to/myplugin/resources` from url `http://.../static/myplugin`
"""
url_path = self.static_url_path + "/" + url_path + "/<path:filename>"
self.add_url_rule(
url_path,
endpoint=endpoint,
view_func=partial(send_file_from_directory, directory=directory),
roles=roles,
)
self.add_access_controller(
endpoint, allow_access_for_roles(Anonymous), endpoint=True
) | [
"def",
"add_static_url",
"(",
"self",
",",
"url_path",
",",
"directory",
",",
"endpoint",
"=",
"None",
",",
"roles",
"=",
"None",
")",
":",
"url_path",
"=",
"self",
".",
"static_url_path",
"+",
"\"/\"",
"+",
"url_path",
"+",
"\"/<path:filename>\"",
"self",
".",
"add_url_rule",
"(",
"url_path",
",",
"endpoint",
"=",
"endpoint",
",",
"view_func",
"=",
"partial",
"(",
"send_file_from_directory",
",",
"directory",
"=",
"directory",
")",
",",
"roles",
"=",
"roles",
",",
")",
"self",
".",
"add_access_controller",
"(",
"endpoint",
",",
"allow_access_for_roles",
"(",
"Anonymous",
")",
",",
"endpoint",
"=",
"True",
")"
] | 38.888889 | 23.333333 |
def _handle_properties(self, stmt: Statement, sctx: SchemaContext) -> None:
"""Handle **bit** statements."""
nextpos = 0
for bst in stmt.find_all("bit"):
if not sctx.schema_data.if_features(bst, sctx.text_mid):
continue
label = bst.argument
pst = bst.find1("position")
if pst:
pos = int(pst.argument)
self.bit[label] = pos
if pos > nextpos:
nextpos = pos
else:
self.bit[label] = nextpos
nextpos += 1 | [
"def",
"_handle_properties",
"(",
"self",
",",
"stmt",
":",
"Statement",
",",
"sctx",
":",
"SchemaContext",
")",
"->",
"None",
":",
"nextpos",
"=",
"0",
"for",
"bst",
"in",
"stmt",
".",
"find_all",
"(",
"\"bit\"",
")",
":",
"if",
"not",
"sctx",
".",
"schema_data",
".",
"if_features",
"(",
"bst",
",",
"sctx",
".",
"text_mid",
")",
":",
"continue",
"label",
"=",
"bst",
".",
"argument",
"pst",
"=",
"bst",
".",
"find1",
"(",
"\"position\"",
")",
"if",
"pst",
":",
"pos",
"=",
"int",
"(",
"pst",
".",
"argument",
")",
"self",
".",
"bit",
"[",
"label",
"]",
"=",
"pos",
"if",
"pos",
">",
"nextpos",
":",
"nextpos",
"=",
"pos",
"else",
":",
"self",
".",
"bit",
"[",
"label",
"]",
"=",
"nextpos",
"nextpos",
"+=",
"1"
] | 36.25 | 11.75 |
def add_to_rc(self, content):
"""
add content to the rc script.
"""
if not self.rewrite_config:
raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.")
if not self.rc_file:
self.rc_path, self.rc_file = self.__get_rc_handle(self.root_dir)
self.rc_file.write(content + '\n') | [
"def",
"add_to_rc",
"(",
"self",
",",
"content",
")",
":",
"if",
"not",
"self",
".",
"rewrite_config",
":",
"raise",
"DirectoryException",
"(",
"\"Error! Directory was not intialized w/ rewrite_config.\"",
")",
"if",
"not",
"self",
".",
"rc_file",
":",
"self",
".",
"rc_path",
",",
"self",
".",
"rc_file",
"=",
"self",
".",
"__get_rc_handle",
"(",
"self",
".",
"root_dir",
")",
"self",
".",
"rc_file",
".",
"write",
"(",
"content",
"+",
"'\\n'",
")"
] | 40.333333 | 13.666667 |
def _check_for_changes(entity_type, ret, existing, modified):
'''
take an existing entity and a modified entity and check for changes.
'''
ret['result'] = True
#were there any changes? generation always changes, remove it.
if isinstance(existing, dict) and isinstance(modified, dict):
if 'generation' in modified['content'].keys():
del modified['content']['generation']
if 'generation' in existing['content'].keys():
del existing['content']['generation']
if modified['content'] == existing['content']:
ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type)
else:
ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \
'were enforced. See changes for details.'.format(entity_type=entity_type)
ret['changes']['old'] = existing['content']
ret['changes']['new'] = modified['content']
else:
if modified == existing:
ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type)
else:
ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \
'were enforced. See changes for details.'.format(entity_type=entity_type)
ret['changes']['old'] = existing
ret['changes']['new'] = modified
return ret | [
"def",
"_check_for_changes",
"(",
"entity_type",
",",
"ret",
",",
"existing",
",",
"modified",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"#were there any changes? generation always changes, remove it.",
"if",
"isinstance",
"(",
"existing",
",",
"dict",
")",
"and",
"isinstance",
"(",
"modified",
",",
"dict",
")",
":",
"if",
"'generation'",
"in",
"modified",
"[",
"'content'",
"]",
".",
"keys",
"(",
")",
":",
"del",
"modified",
"[",
"'content'",
"]",
"[",
"'generation'",
"]",
"if",
"'generation'",
"in",
"existing",
"[",
"'content'",
"]",
".",
"keys",
"(",
")",
":",
"del",
"existing",
"[",
"'content'",
"]",
"[",
"'generation'",
"]",
"if",
"modified",
"[",
"'content'",
"]",
"==",
"existing",
"[",
"'content'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{entity_type} is currently enforced to the desired state. No changes made.'",
".",
"format",
"(",
"entity_type",
"=",
"entity_type",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{entity_type} was enforced to the desired state. Note: Only parameters specified '",
"'were enforced. See changes for details.'",
".",
"format",
"(",
"entity_type",
"=",
"entity_type",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"existing",
"[",
"'content'",
"]",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"modified",
"[",
"'content'",
"]",
"else",
":",
"if",
"modified",
"==",
"existing",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{entity_type} is currently enforced to the desired state. No changes made.'",
".",
"format",
"(",
"entity_type",
"=",
"entity_type",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{entity_type} was enforced to the desired state. Note: Only parameters specified '",
"'were enforced. See changes for details.'",
".",
"format",
"(",
"entity_type",
"=",
"entity_type",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"existing",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"modified",
"return",
"ret"
] | 45.617647 | 33.911765 |
def get_user_policy(self, user_name, policy_name):
"""
Retrieves the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'UserName' : user_name,
'PolicyName' : policy_name}
return self.get_response('GetUserPolicy', params, verb='POST') | [
"def",
"get_user_policy",
"(",
"self",
",",
"user_name",
",",
"policy_name",
")",
":",
"params",
"=",
"{",
"'UserName'",
":",
"user_name",
",",
"'PolicyName'",
":",
"policy_name",
"}",
"return",
"self",
".",
"get_response",
"(",
"'GetUserPolicy'",
",",
"params",
",",
"verb",
"=",
"'POST'",
")"
] | 35.928571 | 18.357143 |
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z)) | [
"def",
"plt2xyz",
"(",
"fname",
")",
":",
"parser",
"=",
"CompassPltParser",
"(",
"fname",
")",
"plt",
"=",
"parser",
".",
"parse",
"(",
")",
"for",
"segment",
"in",
"plt",
":",
"for",
"command",
"in",
"segment",
":",
"if",
"command",
".",
"cmd",
"==",
"'d'",
":",
"if",
"plt",
".",
"utm_zone",
":",
"x",
",",
"y",
",",
"z",
"=",
"command",
".",
"x",
"*",
"FT_TO_M",
",",
"command",
".",
"y",
"*",
"FT_TO_M",
",",
"command",
".",
"z",
"*",
"FT_TO_M",
"else",
":",
"x",
",",
"y",
",",
"z",
"=",
"command",
".",
"x",
",",
"command",
".",
"y",
",",
"command",
".",
"z",
"print",
"(",
"'%.3f\\t%.3f\\t%.3f'",
"%",
"(",
"x",
",",
"y",
",",
"z",
")",
")"
] | 29.769231 | 17.769231 |
def download_url(self, project, file_name, run=None, entity=None):
"""Generate download urls
Args:
project (str): The project to download
file_name (str): The name of the file to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{ "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
"""
query = gql('''
query Model($name: String!, $fileName: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
files(names: [$fileName]) {
edges {
node {
name
url
md5
updatedAt
}
}
}
}
}
}
''')
query_result = self.gql(query, variable_values={
'name': project, 'run': run or self.settings('run'), 'fileName': file_name,
'entity': entity or self.settings('entity')})
files = self._flatten_edges(query_result['model']['bucket']['files'])
return files[0] if len(files) > 0 and files[0].get('updatedAt') else None | [
"def",
"download_url",
"(",
"self",
",",
"project",
",",
"file_name",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Model($name: String!, $fileName: String!, $entity: String!, $run: String!) {\n model(name: $name, entityName: $entity) {\n bucket(name: $run) {\n files(names: [$fileName]) {\n edges {\n node {\n name\n url\n md5\n updatedAt\n }\n }\n }\n }\n }\n }\n '''",
")",
"query_result",
"=",
"self",
".",
"gql",
"(",
"query",
",",
"variable_values",
"=",
"{",
"'name'",
":",
"project",
",",
"'run'",
":",
"run",
"or",
"self",
".",
"settings",
"(",
"'run'",
")",
",",
"'fileName'",
":",
"file_name",
",",
"'entity'",
":",
"entity",
"or",
"self",
".",
"settings",
"(",
"'entity'",
")",
"}",
")",
"files",
"=",
"self",
".",
"_flatten_edges",
"(",
"query_result",
"[",
"'model'",
"]",
"[",
"'bucket'",
"]",
"[",
"'files'",
"]",
")",
"return",
"files",
"[",
"0",
"]",
"if",
"len",
"(",
"files",
")",
">",
"0",
"and",
"files",
"[",
"0",
"]",
".",
"get",
"(",
"'updatedAt'",
")",
"else",
"None"
] | 39.789474 | 22.578947 |
def _tempfile(filename):
"""
Create a NamedTemporaryFile instance to be passed to atomic_writer
"""
return tempfile.NamedTemporaryFile(mode='w',
dir=os.path.dirname(filename),
prefix=os.path.basename(filename),
suffix=os.fsencode('.tmp'),
delete=False) | [
"def",
"_tempfile",
"(",
"filename",
")",
":",
"return",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w'",
",",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
",",
"prefix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
",",
"suffix",
"=",
"os",
".",
"fsencode",
"(",
"'.tmp'",
")",
",",
"delete",
"=",
"False",
")"
] | 46.222222 | 17.111111 |
def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail | [
"def",
"get_thumbnail",
"(",
"original",
",",
"size",
",",
"*",
"*",
"options",
")",
":",
"engine",
"=",
"get_engine",
"(",
")",
"cache",
"=",
"get_cache_backend",
"(",
")",
"original",
"=",
"SourceFile",
"(",
"original",
")",
"crop",
"=",
"options",
".",
"get",
"(",
"'crop'",
",",
"None",
")",
"options",
"=",
"engine",
".",
"evaluate_options",
"(",
"options",
")",
"thumbnail_name",
"=",
"generate_filename",
"(",
"original",
",",
"size",
",",
"crop",
")",
"if",
"settings",
".",
"THUMBNAIL_DUMMY",
":",
"engine",
"=",
"DummyEngine",
"(",
")",
"return",
"engine",
".",
"get_thumbnail",
"(",
"thumbnail_name",
",",
"engine",
".",
"parse_size",
"(",
"size",
")",
",",
"crop",
",",
"options",
")",
"cached",
"=",
"cache",
".",
"get",
"(",
"thumbnail_name",
")",
"force",
"=",
"options",
"is",
"not",
"None",
"and",
"'force'",
"in",
"options",
"and",
"options",
"[",
"'force'",
"]",
"if",
"not",
"force",
"and",
"cached",
":",
"return",
"cached",
"thumbnail",
"=",
"Thumbnail",
"(",
"thumbnail_name",
",",
"engine",
".",
"get_format",
"(",
"original",
",",
"options",
")",
")",
"if",
"force",
"or",
"not",
"thumbnail",
".",
"exists",
":",
"size",
"=",
"engine",
".",
"parse_size",
"(",
"size",
")",
"thumbnail",
".",
"image",
"=",
"engine",
".",
"get_thumbnail",
"(",
"original",
",",
"size",
",",
"crop",
",",
"options",
")",
"thumbnail",
".",
"save",
"(",
"options",
")",
"for",
"resolution",
"in",
"settings",
".",
"THUMBNAIL_ALTERNATIVE_RESOLUTIONS",
":",
"resolution_size",
"=",
"engine",
".",
"calculate_alternative_resolution_size",
"(",
"resolution",
",",
"size",
")",
"image",
"=",
"engine",
".",
"get_thumbnail",
"(",
"original",
",",
"resolution_size",
",",
"crop",
",",
"options",
")",
"thumbnail",
".",
"save_alternative_resolution",
"(",
"resolution",
",",
"image",
",",
"options",
")",
"cache",
".",
"set",
"(",
"thumbnail",
")",
"return",
"thumbnail"
] | 46.945455 | 28.8 |
def transform_system(principal_vec, principal_default, other_vecs,
matrix=None):
"""Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``.
"""
transformed_vecs = []
principal_vec = np.asarray(principal_vec, dtype=float)
ndim = principal_vec.shape[0]
if matrix is None:
# Separate into dilation and rotation. The dilation is only used
# for comparison, not in the final matrix.
principal_default = np.asarray(principal_default, dtype=float)
pr_norm = np.linalg.norm(principal_vec)
pr_default_norm = np.linalg.norm(principal_default)
if pr_default_norm == 0.0 and pr_norm != 0.0:
raise ValueError('no transformation from {} to {}'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm != 0.0:
raise ValueError('transformation from {} to {} is singular'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm == 0.0:
dilation = 1.0
else:
dilation = (np.linalg.norm(principal_vec) /
np.linalg.norm(principal_default))
# Determine the rotation part
if np.allclose(principal_vec, dilation * principal_default):
# Dilation only
matrix = np.eye(ndim)
else:
matrix = rotation_matrix_from_to(principal_default, principal_vec)
# This one goes straight in
transformed_vecs.append(principal_vec)
else:
matrix = np.asarray(matrix, dtype=float)
if matrix.shape != (ndim, ndim):
raise ValueError('matrix shape must be {}, got {}'
''.format((ndim, ndim), matrix.shape))
# Check matrix condition
svals = np.linalg.svd(matrix, compute_uv=False)
condition = np.inf if 0.0 in svals else svals[0] / svals[-1]
if condition > 1e6:
raise np.linalg.LinAlgError(
'matrix is badly conditioned: condition number is {}'
''.format(condition))
transformed_vecs.append(matrix.dot(principal_vec))
for vec in other_vecs:
if vec is None:
transformed_vecs.append(None)
else:
transformed_vecs.append(matrix.dot(vec))
return tuple(transformed_vecs) | [
"def",
"transform_system",
"(",
"principal_vec",
",",
"principal_default",
",",
"other_vecs",
",",
"matrix",
"=",
"None",
")",
":",
"transformed_vecs",
"=",
"[",
"]",
"principal_vec",
"=",
"np",
".",
"asarray",
"(",
"principal_vec",
",",
"dtype",
"=",
"float",
")",
"ndim",
"=",
"principal_vec",
".",
"shape",
"[",
"0",
"]",
"if",
"matrix",
"is",
"None",
":",
"# Separate into dilation and rotation. The dilation is only used",
"# for comparison, not in the final matrix.",
"principal_default",
"=",
"np",
".",
"asarray",
"(",
"principal_default",
",",
"dtype",
"=",
"float",
")",
"pr_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"principal_vec",
")",
"pr_default_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"principal_default",
")",
"if",
"pr_default_norm",
"==",
"0.0",
"and",
"pr_norm",
"!=",
"0.0",
":",
"raise",
"ValueError",
"(",
"'no transformation from {} to {}'",
"''",
".",
"format",
"(",
"principal_default",
",",
"principal_vec",
")",
")",
"elif",
"pr_norm",
"==",
"0.0",
"and",
"pr_default_norm",
"!=",
"0.0",
":",
"raise",
"ValueError",
"(",
"'transformation from {} to {} is singular'",
"''",
".",
"format",
"(",
"principal_default",
",",
"principal_vec",
")",
")",
"elif",
"pr_norm",
"==",
"0.0",
"and",
"pr_default_norm",
"==",
"0.0",
":",
"dilation",
"=",
"1.0",
"else",
":",
"dilation",
"=",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"principal_vec",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"principal_default",
")",
")",
"# Determine the rotation part",
"if",
"np",
".",
"allclose",
"(",
"principal_vec",
",",
"dilation",
"*",
"principal_default",
")",
":",
"# Dilation only",
"matrix",
"=",
"np",
".",
"eye",
"(",
"ndim",
")",
"else",
":",
"matrix",
"=",
"rotation_matrix_from_to",
"(",
"principal_default",
",",
"principal_vec",
")",
"# This one goes straight in",
"transformed_vecs",
".",
"append",
"(",
"principal_vec",
")",
"else",
":",
"matrix",
"=",
"np",
".",
"asarray",
"(",
"matrix",
",",
"dtype",
"=",
"float",
")",
"if",
"matrix",
".",
"shape",
"!=",
"(",
"ndim",
",",
"ndim",
")",
":",
"raise",
"ValueError",
"(",
"'matrix shape must be {}, got {}'",
"''",
".",
"format",
"(",
"(",
"ndim",
",",
"ndim",
")",
",",
"matrix",
".",
"shape",
")",
")",
"# Check matrix condition",
"svals",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"matrix",
",",
"compute_uv",
"=",
"False",
")",
"condition",
"=",
"np",
".",
"inf",
"if",
"0.0",
"in",
"svals",
"else",
"svals",
"[",
"0",
"]",
"/",
"svals",
"[",
"-",
"1",
"]",
"if",
"condition",
">",
"1e6",
":",
"raise",
"np",
".",
"linalg",
".",
"LinAlgError",
"(",
"'matrix is badly conditioned: condition number is {}'",
"''",
".",
"format",
"(",
"condition",
")",
")",
"transformed_vecs",
".",
"append",
"(",
"matrix",
".",
"dot",
"(",
"principal_vec",
")",
")",
"for",
"vec",
"in",
"other_vecs",
":",
"if",
"vec",
"is",
"None",
":",
"transformed_vecs",
".",
"append",
"(",
"None",
")",
"else",
":",
"transformed_vecs",
".",
"append",
"(",
"matrix",
".",
"dot",
"(",
"vec",
")",
")",
"return",
"tuple",
"(",
"transformed_vecs",
")"
] | 40.773196 | 21.938144 |
def to_tf_matrix(expression_matrix,
gene_names,
tf_names):
"""
:param expression_matrix: numpy matrix. Rows are observations and columns are genes.
:param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index.
:param tf_names: a list of transcription factor names. Should be a subset of gene_names.
:return: tuple of:
0: A numpy matrix representing the predictor matrix for the regressions.
1: The gene names corresponding to the columns in the predictor matrix.
"""
tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names]
tf_indices = [t[0] for t in tuples]
tf_matrix_names = [t[1] for t in tuples]
return expression_matrix[:, tf_indices], tf_matrix_names | [
"def",
"to_tf_matrix",
"(",
"expression_matrix",
",",
"gene_names",
",",
"tf_names",
")",
":",
"tuples",
"=",
"[",
"(",
"index",
",",
"gene",
")",
"for",
"index",
",",
"gene",
"in",
"enumerate",
"(",
"gene_names",
")",
"if",
"gene",
"in",
"tf_names",
"]",
"tf_indices",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"tuples",
"]",
"tf_matrix_names",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"tuples",
"]",
"return",
"expression_matrix",
"[",
":",
",",
"tf_indices",
"]",
",",
"tf_matrix_names"
] | 45.722222 | 28.166667 |
def update_distant_reference(self, ref):
"""Validate and update the reference in Zotero.
Existing fields not present will be left unmodified.
"""
self.validate_reference_data(ref["data"])
self._zotero_lib.update_item(ref) | [
"def",
"update_distant_reference",
"(",
"self",
",",
"ref",
")",
":",
"self",
".",
"validate_reference_data",
"(",
"ref",
"[",
"\"data\"",
"]",
")",
"self",
".",
"_zotero_lib",
".",
"update_item",
"(",
"ref",
")"
] | 36.571429 | 10 |
def gen_sites(path):
" Seek sites by path. "
for root, _, _ in walklevel(path, 2):
try:
yield Site(root)
except AssertionError:
continue | [
"def",
"gen_sites",
"(",
"path",
")",
":",
"for",
"root",
",",
"_",
",",
"_",
"in",
"walklevel",
"(",
"path",
",",
"2",
")",
":",
"try",
":",
"yield",
"Site",
"(",
"root",
")",
"except",
"AssertionError",
":",
"continue"
] | 22.25 | 18 |
def attrdict(prev, attr_names):
"""attrdict pipe can extract attribute values of object into a dict.
The argument attr_names can be a list or a dict.
If attr_names is a list and its item is not a valid attribute of
prev's object. It will be excluded from yielded dict.
If attr_names is dict and the key doesn't exist in prev's object.
the value of corresponding attr_names key will be copy to yielded dict.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_names: The list or dict of attribute names
:type attr_names: str of list or dict
:returns: generator
"""
if isinstance(attr_names, dict):
for obj in prev:
attr_values = dict()
for name in attr_names.keys():
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
else:
attr_values[name] = attr_names[name]
yield attr_values
else:
for obj in prev:
attr_values = dict()
for name in attr_names:
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
yield attr_values | [
"def",
"attrdict",
"(",
"prev",
",",
"attr_names",
")",
":",
"if",
"isinstance",
"(",
"attr_names",
",",
"dict",
")",
":",
"for",
"obj",
"in",
"prev",
":",
"attr_values",
"=",
"dict",
"(",
")",
"for",
"name",
"in",
"attr_names",
".",
"keys",
"(",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"name",
")",
":",
"attr_values",
"[",
"name",
"]",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"else",
":",
"attr_values",
"[",
"name",
"]",
"=",
"attr_names",
"[",
"name",
"]",
"yield",
"attr_values",
"else",
":",
"for",
"obj",
"in",
"prev",
":",
"attr_values",
"=",
"dict",
"(",
")",
"for",
"name",
"in",
"attr_names",
":",
"if",
"hasattr",
"(",
"obj",
",",
"name",
")",
":",
"attr_values",
"[",
"name",
"]",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"yield",
"attr_values"
] | 35.787879 | 16.363636 |
def set_rectangle(self, rectangle):
"""
Set the rectangle for the selection.
:param rectangle:
:return:
"""
if rectangle == self.rectangle():
return False
if rectangle is None:
self.reset()
else:
self.start_point = QgsPointXY(
rectangle.xMinimum(), rectangle.yMinimum())
self.end_point = QgsPointXY(
rectangle.xMaximum(), rectangle.yMaximum())
self.show_rectangle(self.start_point, self.end_point)
return True | [
"def",
"set_rectangle",
"(",
"self",
",",
"rectangle",
")",
":",
"if",
"rectangle",
"==",
"self",
".",
"rectangle",
"(",
")",
":",
"return",
"False",
"if",
"rectangle",
"is",
"None",
":",
"self",
".",
"reset",
"(",
")",
"else",
":",
"self",
".",
"start_point",
"=",
"QgsPointXY",
"(",
"rectangle",
".",
"xMinimum",
"(",
")",
",",
"rectangle",
".",
"yMinimum",
"(",
")",
")",
"self",
".",
"end_point",
"=",
"QgsPointXY",
"(",
"rectangle",
".",
"xMaximum",
"(",
")",
",",
"rectangle",
".",
"yMaximum",
"(",
")",
")",
"self",
".",
"show_rectangle",
"(",
"self",
".",
"start_point",
",",
"self",
".",
"end_point",
")",
"return",
"True"
] | 30.944444 | 13.611111 |
def image_groups_download(self, image_group_id):
"""Get data file for image group with given identifier.
Parameters
----------
image_group_id : string
Unique image group identifier
Returns
-------
FileInfo
Information about image group archive file on disk or None if
identifier is unknown
"""
# Retrieve image group to ensure that it exist
img_grp = self.image_groups_get(image_group_id)
if img_grp is None:
# Return None if image group is unknown
return None
else:
# Reference and information for file image group was created from
return FileInfo(
img_grp.data_file,
img_grp.properties[datastore.PROPERTY_MIMETYPE],
img_grp.properties[datastore.PROPERTY_FILENAME]
) | [
"def",
"image_groups_download",
"(",
"self",
",",
"image_group_id",
")",
":",
"# Retrieve image group to ensure that it exist",
"img_grp",
"=",
"self",
".",
"image_groups_get",
"(",
"image_group_id",
")",
"if",
"img_grp",
"is",
"None",
":",
"# Return None if image group is unknown",
"return",
"None",
"else",
":",
"# Reference and information for file image group was created from",
"return",
"FileInfo",
"(",
"img_grp",
".",
"data_file",
",",
"img_grp",
".",
"properties",
"[",
"datastore",
".",
"PROPERTY_MIMETYPE",
"]",
",",
"img_grp",
".",
"properties",
"[",
"datastore",
".",
"PROPERTY_FILENAME",
"]",
")"
] | 34 | 18.538462 |
def query_int_attribute(self, target, display_mask, attr):
"""Return the value of an integer attribute"""
reply = NVCtrlQueryAttributeReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_id=target.id(),
target_type=target.type(),
display_mask=display_mask,
attr=attr)
if not reply._data.get('flags'):
return None
return int(reply._data.get('value')) | [
"def",
"query_int_attribute",
"(",
"self",
",",
"target",
",",
"display_mask",
",",
"attr",
")",
":",
"reply",
"=",
"NVCtrlQueryAttributeReplyRequest",
"(",
"display",
"=",
"self",
".",
"display",
",",
"opcode",
"=",
"self",
".",
"display",
".",
"get_extension_major",
"(",
"extname",
")",
",",
"target_id",
"=",
"target",
".",
"id",
"(",
")",
",",
"target_type",
"=",
"target",
".",
"type",
"(",
")",
",",
"display_mask",
"=",
"display_mask",
",",
"attr",
"=",
"attr",
")",
"if",
"not",
"reply",
".",
"_data",
".",
"get",
"(",
"'flags'",
")",
":",
"return",
"None",
"return",
"int",
"(",
"reply",
".",
"_data",
".",
"get",
"(",
"'value'",
")",
")"
] | 57 | 20.636364 |
def decode(self, binSequence):
"""decodes a binary sequence to return a string"""
try:
binSeq = iter(binSequence[0])
except TypeError, te:
binSeq = binSequence
ret = ''
for b in binSeq :
ch = ''
for c in self.charToBin :
if b & self.forma[self.charToBin[c]] > 0 :
ch += c +'/'
if ch == '' :
raise KeyError('Key %d unkowom, bad format' % b)
ret += ch[:-1]
return ret | [
"def",
"decode",
"(",
"self",
",",
"binSequence",
")",
":",
"try",
":",
"binSeq",
"=",
"iter",
"(",
"binSequence",
"[",
"0",
"]",
")",
"except",
"TypeError",
",",
"te",
":",
"binSeq",
"=",
"binSequence",
"ret",
"=",
"''",
"for",
"b",
"in",
"binSeq",
":",
"ch",
"=",
"''",
"for",
"c",
"in",
"self",
".",
"charToBin",
":",
"if",
"b",
"&",
"self",
".",
"forma",
"[",
"self",
".",
"charToBin",
"[",
"c",
"]",
"]",
">",
"0",
":",
"ch",
"+=",
"c",
"+",
"'/'",
"if",
"ch",
"==",
"''",
":",
"raise",
"KeyError",
"(",
"'Key %d unkowom, bad format'",
"%",
"b",
")",
"ret",
"+=",
"ch",
"[",
":",
"-",
"1",
"]",
"return",
"ret"
] | 22.222222 | 20.444444 |
def pause(self):
"""Pauses a playing animation. A subsequent call to play will continue where it left off."""
if self.state == PygAnimation.PLAYING:
self.elapsedAtPause = self.elapsed
# only change state if it was playing
self.state = PygAnimation.PAUSED
elif self.state == PygAnimation.STOPPED:
pass # nothing to do
elif self.state == PygAnimation.PAUSED:
pass | [
"def",
"pause",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PLAYING",
":",
"self",
".",
"elapsedAtPause",
"=",
"self",
".",
"elapsed",
"# only change state if it was playing\r",
"self",
".",
"state",
"=",
"PygAnimation",
".",
"PAUSED",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"STOPPED",
":",
"pass",
"# nothing to do\r",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PAUSED",
":",
"pass"
] | 38.083333 | 14.75 |
def mark(self, scope='process'):
"""Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
"""
check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope))) | [
"def",
"mark",
"(",
"self",
",",
"scope",
"=",
"'process'",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXProfileSetMarker",
"(",
"self",
".",
"domain",
".",
"handle",
",",
"c_str",
"(",
"self",
".",
"name",
")",
",",
"c_str",
"(",
"scope",
")",
")",
")"
] | 38.454545 | 19.090909 |
def write_word_at(self, index: int, value: Union[int, BitVec, bool, Bool]) -> None:
"""Writes a 32 byte word to memory at the specified index`
:param index: index to write to
:param value: the value to write to memory
"""
try:
# Attempt to concretize value
if isinstance(value, bool):
_bytes = (
int(1).to_bytes(32, byteorder="big")
if value
else int(0).to_bytes(32, byteorder="big")
)
else:
_bytes = util.concrete_int_to_bytes(value)
assert len(_bytes) == 32
self[index : index + 32] = list(bytearray(_bytes))
except (Z3Exception, AttributeError): # BitVector or BoolRef
value = cast(Union[BitVec, Bool], value)
if isinstance(value, Bool):
value_to_write = If(
value,
symbol_factory.BitVecVal(1, 256),
symbol_factory.BitVecVal(0, 256),
)
else:
value_to_write = value
assert value_to_write.size() == 256
for i in range(0, value_to_write.size(), 8):
self[index + 31 - (i // 8)] = Extract(i + 7, i, value_to_write) | [
"def",
"write_word_at",
"(",
"self",
",",
"index",
":",
"int",
",",
"value",
":",
"Union",
"[",
"int",
",",
"BitVec",
",",
"bool",
",",
"Bool",
"]",
")",
"->",
"None",
":",
"try",
":",
"# Attempt to concretize value",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"_bytes",
"=",
"(",
"int",
"(",
"1",
")",
".",
"to_bytes",
"(",
"32",
",",
"byteorder",
"=",
"\"big\"",
")",
"if",
"value",
"else",
"int",
"(",
"0",
")",
".",
"to_bytes",
"(",
"32",
",",
"byteorder",
"=",
"\"big\"",
")",
")",
"else",
":",
"_bytes",
"=",
"util",
".",
"concrete_int_to_bytes",
"(",
"value",
")",
"assert",
"len",
"(",
"_bytes",
")",
"==",
"32",
"self",
"[",
"index",
":",
"index",
"+",
"32",
"]",
"=",
"list",
"(",
"bytearray",
"(",
"_bytes",
")",
")",
"except",
"(",
"Z3Exception",
",",
"AttributeError",
")",
":",
"# BitVector or BoolRef",
"value",
"=",
"cast",
"(",
"Union",
"[",
"BitVec",
",",
"Bool",
"]",
",",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"Bool",
")",
":",
"value_to_write",
"=",
"If",
"(",
"value",
",",
"symbol_factory",
".",
"BitVecVal",
"(",
"1",
",",
"256",
")",
",",
"symbol_factory",
".",
"BitVecVal",
"(",
"0",
",",
"256",
")",
",",
")",
"else",
":",
"value_to_write",
"=",
"value",
"assert",
"value_to_write",
".",
"size",
"(",
")",
"==",
"256",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"value_to_write",
".",
"size",
"(",
")",
",",
"8",
")",
":",
"self",
"[",
"index",
"+",
"31",
"-",
"(",
"i",
"//",
"8",
")",
"]",
"=",
"Extract",
"(",
"i",
"+",
"7",
",",
"i",
",",
"value_to_write",
")"
] | 40.125 | 16.03125 |
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old | [
"def",
"change_last_focused_widget",
"(",
"self",
",",
"old",
",",
"now",
")",
":",
"if",
"(",
"now",
"is",
"None",
"and",
"QApplication",
".",
"activeWindow",
"(",
")",
"is",
"not",
"None",
")",
":",
"QApplication",
".",
"activeWindow",
"(",
")",
".",
"setFocus",
"(",
")",
"self",
".",
"last_focused_widget",
"=",
"QApplication",
".",
"focusWidget",
"(",
")",
"elif",
"now",
"is",
"not",
"None",
":",
"self",
".",
"last_focused_widget",
"=",
"now",
"self",
".",
"previous_focused_widget",
"=",
"old"
] | 45.555556 | 14.444444 |
def from_requirement(cls, provider, requirement, parent):
"""Build an instance from a requirement.
"""
candidates = provider.find_matches(requirement)
if not candidates:
raise NoVersionsAvailable(requirement, parent)
return cls(
candidates=candidates,
information=[RequirementInformation(requirement, parent)],
) | [
"def",
"from_requirement",
"(",
"cls",
",",
"provider",
",",
"requirement",
",",
"parent",
")",
":",
"candidates",
"=",
"provider",
".",
"find_matches",
"(",
"requirement",
")",
"if",
"not",
"candidates",
":",
"raise",
"NoVersionsAvailable",
"(",
"requirement",
",",
"parent",
")",
"return",
"cls",
"(",
"candidates",
"=",
"candidates",
",",
"information",
"=",
"[",
"RequirementInformation",
"(",
"requirement",
",",
"parent",
")",
"]",
",",
")"
] | 38.7 | 15.2 |
def delete_entry(sender, instance, **kwargs):
"""
Deletes Entry instance corresponding to specified instance.
:param sender: the sending class.
:param instance: the instance being deleted.
"""
from ..models import Entry
Entry.objects.get_for_model(instance)[0].delete() | [
"def",
"delete_entry",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
".",
"models",
"import",
"Entry",
"Entry",
".",
"objects",
".",
"get_for_model",
"(",
"instance",
")",
"[",
"0",
"]",
".",
"delete",
"(",
")"
] | 29 | 14.2 |
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr') | [
"def",
"get_compile_mode",
"(",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"mod",
")",
":",
"raise",
"TypeError",
"(",
"'expected mod node, got %r'",
"%",
"node",
".",
"__class__",
".",
"__name__",
")",
"return",
"{",
"Expression",
":",
"'eval'",
",",
"Interactive",
":",
"'single'",
"}",
".",
"get",
"(",
"node",
".",
"__class__",
",",
"'expr'",
")"
] | 35.545455 | 15.909091 |
def _refined_glimpse_sensor(self, x_t, l_p):
"""
Parameters:
x_t - 28x28 image
l_p - 2x1 focus vector
Returns:
7*14 matrix
"""
# Turn l_p to the left-top point of rectangle
l_p = l_p * 14 + 14 - 4
l_p = T.cast(T.round(l_p), "int32")
l_p = l_p * (l_p >= 0)
l_p = l_p * (l_p < 21) + (l_p >= 21) * 20
glimpse_1 = x_t[l_p[0]: l_p[0] + 7][:, l_p[1]: l_p[1] + 7]
# glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4))
# return T.concatenate([glimpse_1, glimpse_2])
return glimpse_1 | [
"def",
"_refined_glimpse_sensor",
"(",
"self",
",",
"x_t",
",",
"l_p",
")",
":",
"# Turn l_p to the left-top point of rectangle",
"l_p",
"=",
"l_p",
"*",
"14",
"+",
"14",
"-",
"4",
"l_p",
"=",
"T",
".",
"cast",
"(",
"T",
".",
"round",
"(",
"l_p",
")",
",",
"\"int32\"",
")",
"l_p",
"=",
"l_p",
"*",
"(",
"l_p",
">=",
"0",
")",
"l_p",
"=",
"l_p",
"*",
"(",
"l_p",
"<",
"21",
")",
"+",
"(",
"l_p",
">=",
"21",
")",
"*",
"20",
"glimpse_1",
"=",
"x_t",
"[",
"l_p",
"[",
"0",
"]",
":",
"l_p",
"[",
"0",
"]",
"+",
"7",
"]",
"[",
":",
",",
"l_p",
"[",
"1",
"]",
":",
"l_p",
"[",
"1",
"]",
"+",
"7",
"]",
"# glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4))",
"# return T.concatenate([glimpse_1, glimpse_2])",
"return",
"glimpse_1"
] | 34.111111 | 14.444444 |
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node | [
"def",
"update",
"(",
"self",
",",
"path",
",",
"node",
")",
":",
"assert",
"(",
"type",
"(",
"path",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
")",
"assert",
"(",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
"or",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"predefined",
")",
")",
"d",
"=",
"self",
".",
"color_scheme",
"tokens",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"for",
"t",
"in",
"tokens",
"[",
":",
"-",
"1",
"]",
":",
"d",
"=",
"d",
".",
"get",
"(",
"t",
")",
"if",
"d",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Path '%s' not found.\"",
")",
"d",
"[",
"tokens",
"[",
"-",
"1",
"]",
"]",
"=",
"node"
] | 47.076923 | 26.153846 |
def _process_hdu (self, hdu):
"We've hacked the load order a bit to get t0 and mjd0 in _process_main()."
if hdu.name == 'EVENTS':
pass
else:
super (Events, self)._process_hdu (hdu) | [
"def",
"_process_hdu",
"(",
"self",
",",
"hdu",
")",
":",
"if",
"hdu",
".",
"name",
"==",
"'EVENTS'",
":",
"pass",
"else",
":",
"super",
"(",
"Events",
",",
"self",
")",
".",
"_process_hdu",
"(",
"hdu",
")"
] | 31.857143 | 23.285714 |
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page_items = self.object_list[bottom:top]
# check moved from validate_number
if not page_items:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return InfinitePage(page_items, number, self) | [
"def",
"page",
"(",
"self",
",",
"number",
")",
":",
"number",
"=",
"self",
".",
"validate_number",
"(",
"number",
")",
"bottom",
"=",
"(",
"number",
"-",
"1",
")",
"*",
"self",
".",
"per_page",
"top",
"=",
"bottom",
"+",
"self",
".",
"per_page",
"page_items",
"=",
"self",
".",
"object_list",
"[",
"bottom",
":",
"top",
"]",
"# check moved from validate_number",
"if",
"not",
"page_items",
":",
"if",
"number",
"==",
"1",
"and",
"self",
".",
"allow_empty_first_page",
":",
"pass",
"else",
":",
"raise",
"EmptyPage",
"(",
"'That page contains no results'",
")",
"return",
"InfinitePage",
"(",
"page_items",
",",
"number",
",",
"self",
")"
] | 37.666667 | 11.933333 |
def search_index_path(self, index=None, **options):
"""
Builds a Yokozuna search index URL.
:param index: optional name of a yz index
:type index: string
:param options: optional list of additional arguments
:type index: dict
:rtype URL string
"""
if not self.yz_wm_index:
raise RiakError("Yokozuna search is unsupported by this Riak node")
if index:
quote_plus(index)
return mkpath(self.yz_wm_index, "index", index, **options) | [
"def",
"search_index_path",
"(",
"self",
",",
"index",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"not",
"self",
".",
"yz_wm_index",
":",
"raise",
"RiakError",
"(",
"\"Yokozuna search is unsupported by this Riak node\"",
")",
"if",
"index",
":",
"quote_plus",
"(",
"index",
")",
"return",
"mkpath",
"(",
"self",
".",
"yz_wm_index",
",",
"\"index\"",
",",
"index",
",",
"*",
"*",
"options",
")"
] | 35.066667 | 15.6 |
def weighted_sum(groupe, var):
'''
Fonction qui calcule la moyenne pondérée par groupe d'une variable
'''
data = groupe[var]
weights = groupe['pondmen']
return (data * weights).sum() | [
"def",
"weighted_sum",
"(",
"groupe",
",",
"var",
")",
":",
"data",
"=",
"groupe",
"[",
"var",
"]",
"weights",
"=",
"groupe",
"[",
"'pondmen'",
"]",
"return",
"(",
"data",
"*",
"weights",
")",
".",
"sum",
"(",
")"
] | 28.571429 | 20 |
def launchd(
state, host, name,
running=True, restarted=False, command=None,
):
'''
Manage the state of systemd managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
+ daemon_reload: reload the systemd daemon to read updated unit files
'''
yield _handle_service_control(
name, host.fact.launchd_status,
'launchctl {1} {0}',
# No support for restart/reload/command
running, None, None, None,
)
# No restart command, so just stop/start
is_running = host.fact.launchd_status.get(name, None)
if restarted and is_running:
yield 'launchctl stop {0}'.format(name)
yield 'launchctl start {0}'.format(name) | [
"def",
"launchd",
"(",
"state",
",",
"host",
",",
"name",
",",
"running",
"=",
"True",
",",
"restarted",
"=",
"False",
",",
"command",
"=",
"None",
",",
")",
":",
"yield",
"_handle_service_control",
"(",
"name",
",",
"host",
".",
"fact",
".",
"launchd_status",
",",
"'launchctl {1} {0}'",
",",
"# No support for restart/reload/command",
"running",
",",
"None",
",",
"None",
",",
"None",
",",
")",
"# No restart command, so just stop/start",
"is_running",
"=",
"host",
".",
"fact",
".",
"launchd_status",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"restarted",
"and",
"is_running",
":",
"yield",
"'launchctl stop {0}'",
".",
"format",
"(",
"name",
")",
"yield",
"'launchctl start {0}'",
".",
"format",
"(",
"name",
")"
] | 34.37037 | 19.407407 |
def calculate(self, T, method):
r'''Method to calculate low-pressure gas thermal conductivity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the gas, [K]
method : str
Name of the method to use
Returns
-------
kg : float
Thermal conductivity of the gas at T and a low pressure, [W/m/K]
'''
if method == GHARAGHEIZI_G:
kg = Gharagheizi_gas(T, self.MW, self.Tb, self.Pc, self.omega)
elif method == DIPPR_9B:
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug
kg = DIPPR9B(T, self.MW, Cvgm, mug, self.Tc)
elif method == CHUNG:
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug
kg = Chung(T, self.MW, self.Tc, self.omega, Cvgm, mug)
elif method == ELI_HANLEY:
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
kg = eli_hanley(T, self.MW, self.Tc, self.Vc, self.Zc, self.omega, Cvgm)
elif method == EUCKEN_MOD:
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug
kg = Eucken_modified(self.MW, Cvgm, mug)
elif method == EUCKEN:
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug
kg = Eucken(self.MW, Cvgm, mug)
elif method == DIPPR_PERRY_8E:
kg = EQ102(T, *self.Perrys2_314_coeffs)
elif method == VDI_PPDS:
kg = horner(self.VDI_PPDS_coeffs, T)
elif method == BAHADORI_G:
kg = Bahadori_gas(T, self.MW)
elif method == COOLPROP:
kg = CoolProp_T_dependent_property(T, self.CASRN, 'L', 'g')
elif method in self.tabular_data:
kg = self.interpolate(T, method)
return kg | [
"def",
"calculate",
"(",
"self",
",",
"T",
",",
"method",
")",
":",
"if",
"method",
"==",
"GHARAGHEIZI_G",
":",
"kg",
"=",
"Gharagheizi_gas",
"(",
"T",
",",
"self",
".",
"MW",
",",
"self",
".",
"Tb",
",",
"self",
".",
"Pc",
",",
"self",
".",
"omega",
")",
"elif",
"method",
"==",
"DIPPR_9B",
":",
"Cvgm",
"=",
"self",
".",
"Cvgm",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"Cvgm",
",",
"'__call__'",
")",
"else",
"self",
".",
"Cvgm",
"mug",
"=",
"self",
".",
"mug",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"mug",
",",
"'__call__'",
")",
"else",
"self",
".",
"mug",
"kg",
"=",
"DIPPR9B",
"(",
"T",
",",
"self",
".",
"MW",
",",
"Cvgm",
",",
"mug",
",",
"self",
".",
"Tc",
")",
"elif",
"method",
"==",
"CHUNG",
":",
"Cvgm",
"=",
"self",
".",
"Cvgm",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"Cvgm",
",",
"'__call__'",
")",
"else",
"self",
".",
"Cvgm",
"mug",
"=",
"self",
".",
"mug",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"mug",
",",
"'__call__'",
")",
"else",
"self",
".",
"mug",
"kg",
"=",
"Chung",
"(",
"T",
",",
"self",
".",
"MW",
",",
"self",
".",
"Tc",
",",
"self",
".",
"omega",
",",
"Cvgm",
",",
"mug",
")",
"elif",
"method",
"==",
"ELI_HANLEY",
":",
"Cvgm",
"=",
"self",
".",
"Cvgm",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"Cvgm",
",",
"'__call__'",
")",
"else",
"self",
".",
"Cvgm",
"kg",
"=",
"eli_hanley",
"(",
"T",
",",
"self",
".",
"MW",
",",
"self",
".",
"Tc",
",",
"self",
".",
"Vc",
",",
"self",
".",
"Zc",
",",
"self",
".",
"omega",
",",
"Cvgm",
")",
"elif",
"method",
"==",
"EUCKEN_MOD",
":",
"Cvgm",
"=",
"self",
".",
"Cvgm",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"Cvgm",
",",
"'__call__'",
")",
"else",
"self",
".",
"Cvgm",
"mug",
"=",
"self",
".",
"mug",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"mug",
",",
"'__call__'",
")",
"else",
"self",
".",
"mug",
"kg",
"=",
"Eucken_modified",
"(",
"self",
".",
"MW",
",",
"Cvgm",
",",
"mug",
")",
"elif",
"method",
"==",
"EUCKEN",
":",
"Cvgm",
"=",
"self",
".",
"Cvgm",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"Cvgm",
",",
"'__call__'",
")",
"else",
"self",
".",
"Cvgm",
"mug",
"=",
"self",
".",
"mug",
"(",
"T",
")",
"if",
"hasattr",
"(",
"self",
".",
"mug",
",",
"'__call__'",
")",
"else",
"self",
".",
"mug",
"kg",
"=",
"Eucken",
"(",
"self",
".",
"MW",
",",
"Cvgm",
",",
"mug",
")",
"elif",
"method",
"==",
"DIPPR_PERRY_8E",
":",
"kg",
"=",
"EQ102",
"(",
"T",
",",
"*",
"self",
".",
"Perrys2_314_coeffs",
")",
"elif",
"method",
"==",
"VDI_PPDS",
":",
"kg",
"=",
"horner",
"(",
"self",
".",
"VDI_PPDS_coeffs",
",",
"T",
")",
"elif",
"method",
"==",
"BAHADORI_G",
":",
"kg",
"=",
"Bahadori_gas",
"(",
"T",
",",
"self",
".",
"MW",
")",
"elif",
"method",
"==",
"COOLPROP",
":",
"kg",
"=",
"CoolProp_T_dependent_property",
"(",
"T",
",",
"self",
".",
"CASRN",
",",
"'L'",
",",
"'g'",
")",
"elif",
"method",
"in",
"self",
".",
"tabular_data",
":",
"kg",
"=",
"self",
".",
"interpolate",
"(",
"T",
",",
"method",
")",
"return",
"kg"
] | 43.980392 | 21.117647 |
def reference_pix_from_wcs_imgs(imgs, pixref, origin=1):
"""Compute reference pixels between frames using WCS information.
The sky world coordinates are computed on *pixref* using
the WCS of the first frame in the sequence. Then, the
pixel coordinates of the reference sky world-coordinates
are computed for the rest of the frames.
The results is a list with the position of the reference pixel
in each image
"""
result = []
refimg = imgs[0]
wcsh = wcs.WCS(refimg[0].header)
skyref = wcsh.wcs_pix2world([pixref], origin)
result.append(pixref)
for idx, img in enumerate(imgs[1:]):
wcsh = wcs.WCS(img[0].header)
pixval = wcsh.wcs_world2pix(skyref, origin)
result.append(tuple(pixval[0]))
return result | [
"def",
"reference_pix_from_wcs_imgs",
"(",
"imgs",
",",
"pixref",
",",
"origin",
"=",
"1",
")",
":",
"result",
"=",
"[",
"]",
"refimg",
"=",
"imgs",
"[",
"0",
"]",
"wcsh",
"=",
"wcs",
".",
"WCS",
"(",
"refimg",
"[",
"0",
"]",
".",
"header",
")",
"skyref",
"=",
"wcsh",
".",
"wcs_pix2world",
"(",
"[",
"pixref",
"]",
",",
"origin",
")",
"result",
".",
"append",
"(",
"pixref",
")",
"for",
"idx",
",",
"img",
"in",
"enumerate",
"(",
"imgs",
"[",
"1",
":",
"]",
")",
":",
"wcsh",
"=",
"wcs",
".",
"WCS",
"(",
"img",
"[",
"0",
"]",
".",
"header",
")",
"pixval",
"=",
"wcsh",
".",
"wcs_world2pix",
"(",
"skyref",
",",
"origin",
")",
"result",
".",
"append",
"(",
"tuple",
"(",
"pixval",
"[",
"0",
"]",
")",
")",
"return",
"result"
] | 28.333333 | 20.62963 |
def validate_checksum( filename, md5sum ):
"""
Compares the md5 checksum of a file with an expected value.
If the calculated and expected checksum values are not equal,
ValueError is raised.
If the filename `foo` is not found, will try to read a gzipped file named
`foo.gz`. In this case, the checksum is calculated for the unzipped file.
Args:
filename (str): Path for the file to be checksummed.
md5sum (str): The expected hex checksum.
Returns:
None
"""
filename = match_filename( filename )
md5_hash = file_md5( filename=filename )
if md5_hash != md5sum:
raise ValueError('md5 checksums are inconsistent: {}'.format( filename )) | [
"def",
"validate_checksum",
"(",
"filename",
",",
"md5sum",
")",
":",
"filename",
"=",
"match_filename",
"(",
"filename",
")",
"md5_hash",
"=",
"file_md5",
"(",
"filename",
"=",
"filename",
")",
"if",
"md5_hash",
"!=",
"md5sum",
":",
"raise",
"ValueError",
"(",
"'md5 checksums are inconsistent: {}'",
".",
"format",
"(",
"filename",
")",
")"
] | 36.736842 | 20.842105 |
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data | [
"def",
"readchunk",
"(",
"self",
")",
":",
"received",
"=",
"len",
"(",
"self",
".",
"__buffer",
")",
"chunk_data",
"=",
"EMPTY",
"chunk_size",
"=",
"int",
"(",
"self",
".",
"chunk_size",
")",
"if",
"received",
">",
"0",
":",
"chunk_data",
"=",
"self",
".",
"__buffer",
"elif",
"self",
".",
"__position",
"<",
"int",
"(",
"self",
".",
"length",
")",
":",
"chunk_number",
"=",
"int",
"(",
"(",
"received",
"+",
"self",
".",
"__position",
")",
"/",
"chunk_size",
")",
"if",
"self",
".",
"__chunk_iter",
"is",
"None",
":",
"self",
".",
"__chunk_iter",
"=",
"_GridOutChunkIterator",
"(",
"self",
",",
"self",
".",
"__chunks",
",",
"self",
".",
"_session",
",",
"chunk_number",
")",
"chunk",
"=",
"self",
".",
"__chunk_iter",
".",
"next",
"(",
")",
"chunk_data",
"=",
"chunk",
"[",
"\"data\"",
"]",
"[",
"self",
".",
"__position",
"%",
"chunk_size",
":",
"]",
"if",
"not",
"chunk_data",
":",
"raise",
"CorruptGridFile",
"(",
"\"truncated chunk\"",
")",
"self",
".",
"__position",
"+=",
"len",
"(",
"chunk_data",
")",
"self",
".",
"__buffer",
"=",
"EMPTY",
"return",
"chunk_data"
] | 36.2 | 16.2 |
def replace(self, key):
"""Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
"""
if not (key in self.imagesDict):
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy()
# Set the rect of the image to appropriate values - using the current image
# then scale and rotate
self.rect = self.image.get_rect()
self.rect.x = self.loc[0]
self.rect.y = self.loc[1]
self.scale(self.percent, self.scaleFromCenter)
self.rotate(self.angle) | [
"def",
"replace",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"(",
"key",
"in",
"self",
".",
"imagesDict",
")",
":",
"print",
"(",
"'The key'",
",",
"key",
",",
"'was not found in the collection of images dictionary'",
")",
"raise",
"KeyError",
"self",
".",
"originalImage",
"=",
"self",
".",
"imagesDict",
"[",
"key",
"]",
"self",
".",
"image",
"=",
"self",
".",
"originalImage",
".",
"copy",
"(",
")",
"# Set the rect of the image to appropriate values - using the current image\r",
"# then scale and rotate\r",
"self",
".",
"rect",
"=",
"self",
".",
"image",
".",
"get_rect",
"(",
")",
"self",
".",
"rect",
".",
"x",
"=",
"self",
".",
"loc",
"[",
"0",
"]",
"self",
".",
"rect",
".",
"y",
"=",
"self",
".",
"loc",
"[",
"1",
"]",
"self",
".",
"scale",
"(",
"self",
".",
"percent",
",",
"self",
".",
"scaleFromCenter",
")",
"self",
".",
"rotate",
"(",
"self",
".",
"angle",
")"
] | 36.238095 | 19.380952 |
def _parse_broadcast(self, msg):
"""
Given a broacast message, returns the message that was broadcast.
"""
# get message, remove surrounding quotes, and unescape
return self._unescape(self._get_type(msg[self.broadcast_prefix_len:])) | [
"def",
"_parse_broadcast",
"(",
"self",
",",
"msg",
")",
":",
"# get message, remove surrounding quotes, and unescape",
"return",
"self",
".",
"_unescape",
"(",
"self",
".",
"_get_type",
"(",
"msg",
"[",
"self",
".",
"broadcast_prefix_len",
":",
"]",
")",
")"
] | 44.5 | 16.833333 |
def _strict_date(self, lean):
"""
Return a `time.struct_time` representation of the date.
"""
return struct_time(
(
self._precise_year(lean),
self._precise_month(lean),
self._precise_day(lean),
) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS)
) | [
"def",
"_strict_date",
"(",
"self",
",",
"lean",
")",
":",
"return",
"struct_time",
"(",
"(",
"self",
".",
"_precise_year",
"(",
"lean",
")",
",",
"self",
".",
"_precise_month",
"(",
"lean",
")",
",",
"self",
".",
"_precise_day",
"(",
"lean",
")",
",",
")",
"+",
"tuple",
"(",
"TIME_EMPTY_TIME",
")",
"+",
"tuple",
"(",
"TIME_EMPTY_EXTRAS",
")",
")"
] | 31.909091 | 12.090909 |
def disable(self, clear_cache=True):
"""
Disable the cache and clear its contents
:param clear_cache: clear the cache contents as well as disabling (defaults to True)
"""
logger.debug('disable(clear_cache={})'.format(clear_cache))
if clear_cache:
self.clear()
self.options.enabled = False
logger.info('cache disabled') | [
"def",
"disable",
"(",
"self",
",",
"clear_cache",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'disable(clear_cache={})'",
".",
"format",
"(",
"clear_cache",
")",
")",
"if",
"clear_cache",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"options",
".",
"enabled",
"=",
"False",
"logger",
".",
"info",
"(",
"'cache disabled'",
")"
] | 35 | 15.545455 |
def plot_conf(fignum, s, datablock, pars, new):
"""
plots directions and confidence ellipses
"""
# make the stereonet
if new == 1:
plot_net(fignum)
#
# plot the data
#
DIblock = []
for plotrec in datablock:
DIblock.append((float(plotrec["dec"]), float(plotrec["inc"])))
if len(DIblock) > 0:
plot_di(fignum, DIblock) # plot directed lines
#
# put on the mean direction
#
x, y = [], []
XY = pmag.dimap(float(pars[0]), float(pars[1]))
x.append(XY[0])
y.append(XY[1])
plt.figure(num=fignum)
if new == 1:
plt.scatter(x, y, marker='d', s=80, c='r')
else:
if float(pars[1] > 0):
plt.scatter(x, y, marker='^', s=100, c='r')
else:
plt.scatter(x, y, marker='^', s=100, c='y')
plt.title(s)
#
# plot the ellipse
#
plot_ell(fignum, pars, 'r-,', 0, 1) | [
"def",
"plot_conf",
"(",
"fignum",
",",
"s",
",",
"datablock",
",",
"pars",
",",
"new",
")",
":",
"# make the stereonet",
"if",
"new",
"==",
"1",
":",
"plot_net",
"(",
"fignum",
")",
"#",
"# plot the data",
"#",
"DIblock",
"=",
"[",
"]",
"for",
"plotrec",
"in",
"datablock",
":",
"DIblock",
".",
"append",
"(",
"(",
"float",
"(",
"plotrec",
"[",
"\"dec\"",
"]",
")",
",",
"float",
"(",
"plotrec",
"[",
"\"inc\"",
"]",
")",
")",
")",
"if",
"len",
"(",
"DIblock",
")",
">",
"0",
":",
"plot_di",
"(",
"fignum",
",",
"DIblock",
")",
"# plot directed lines",
"#",
"# put on the mean direction",
"#",
"x",
",",
"y",
"=",
"[",
"]",
",",
"[",
"]",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"float",
"(",
"pars",
"[",
"0",
"]",
")",
",",
"float",
"(",
"pars",
"[",
"1",
"]",
")",
")",
"x",
".",
"append",
"(",
"XY",
"[",
"0",
"]",
")",
"y",
".",
"append",
"(",
"XY",
"[",
"1",
"]",
")",
"plt",
".",
"figure",
"(",
"num",
"=",
"fignum",
")",
"if",
"new",
"==",
"1",
":",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"marker",
"=",
"'d'",
",",
"s",
"=",
"80",
",",
"c",
"=",
"'r'",
")",
"else",
":",
"if",
"float",
"(",
"pars",
"[",
"1",
"]",
">",
"0",
")",
":",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"marker",
"=",
"'^'",
",",
"s",
"=",
"100",
",",
"c",
"=",
"'r'",
")",
"else",
":",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"marker",
"=",
"'^'",
",",
"s",
"=",
"100",
",",
"c",
"=",
"'y'",
")",
"plt",
".",
"title",
"(",
"s",
")",
"#",
"# plot the ellipse",
"#",
"plot_ell",
"(",
"fignum",
",",
"pars",
",",
"'r-,'",
",",
"0",
",",
"1",
")"
] | 24.057143 | 20.171429 |
def list_items(item, details=False, group_by='UUID'):
'''
Return a list of a specific type of item. The following items are available:
vms
runningvms
ostypes
hostdvds
hostfloppies
intnets
bridgedifs
hostonlyifs
natnets
dhcpservers
hostinfo
hostcpuids
hddbackends
hdds
dvds
floppies
usbhost
usbfilters
systemproperties
extpacks
groups
webcams
screenshotformats
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.items <item>
salt 'hypervisor' vboxmanage.items <item> details=True
salt 'hypervisor' vboxmanage.items <item> details=True group_by=Name
Some items do not display well, or at all, unless ``details`` is set to
``True``. By default, items are grouped by the ``UUID`` field, but not all
items contain that field. In those cases, another field must be specified.
'''
types = (
'vms', 'runningvms', 'ostypes', 'hostdvds', 'hostfloppies', 'intnets',
'bridgedifs', 'hostonlyifs', 'natnets', 'dhcpservers', 'hostinfo',
'hostcpuids', 'hddbackends', 'hdds', 'dvds', 'floppies', 'usbhost',
'usbfilters', 'systemproperties', 'extpacks', 'groups', 'webcams',
'screenshotformats'
)
if item not in types:
raise CommandExecutionError(
'Item must be one of: {0}.'.format(', '.join(types))
)
flag = ''
if details is True:
flag = ' -l'
ret = {}
tmp_id = None
tmp_dict = {}
cmd = '{0} list{1} {2}'.format(vboxcmd(), flag, item)
for line in salt.modules.cmdmod.run(cmd).splitlines():
if not line.strip():
continue
comps = line.split(':')
if not comps:
continue
if tmp_id is not None:
ret[tmp_id] = tmp_dict
line_val = ':'.join(comps[1:]).strip()
if comps[0] == group_by:
tmp_id = line_val
tmp_dict = {}
tmp_dict[comps[0]] = line_val
return ret | [
"def",
"list_items",
"(",
"item",
",",
"details",
"=",
"False",
",",
"group_by",
"=",
"'UUID'",
")",
":",
"types",
"=",
"(",
"'vms'",
",",
"'runningvms'",
",",
"'ostypes'",
",",
"'hostdvds'",
",",
"'hostfloppies'",
",",
"'intnets'",
",",
"'bridgedifs'",
",",
"'hostonlyifs'",
",",
"'natnets'",
",",
"'dhcpservers'",
",",
"'hostinfo'",
",",
"'hostcpuids'",
",",
"'hddbackends'",
",",
"'hdds'",
",",
"'dvds'",
",",
"'floppies'",
",",
"'usbhost'",
",",
"'usbfilters'",
",",
"'systemproperties'",
",",
"'extpacks'",
",",
"'groups'",
",",
"'webcams'",
",",
"'screenshotformats'",
")",
"if",
"item",
"not",
"in",
"types",
":",
"raise",
"CommandExecutionError",
"(",
"'Item must be one of: {0}.'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"types",
")",
")",
")",
"flag",
"=",
"''",
"if",
"details",
"is",
"True",
":",
"flag",
"=",
"' -l'",
"ret",
"=",
"{",
"}",
"tmp_id",
"=",
"None",
"tmp_dict",
"=",
"{",
"}",
"cmd",
"=",
"'{0} list{1} {2}'",
".",
"format",
"(",
"vboxcmd",
"(",
")",
",",
"flag",
",",
"item",
")",
"for",
"line",
"in",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"run",
"(",
"cmd",
")",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"if",
"not",
"comps",
":",
"continue",
"if",
"tmp_id",
"is",
"not",
"None",
":",
"ret",
"[",
"tmp_id",
"]",
"=",
"tmp_dict",
"line_val",
"=",
"':'",
".",
"join",
"(",
"comps",
"[",
"1",
":",
"]",
")",
".",
"strip",
"(",
")",
"if",
"comps",
"[",
"0",
"]",
"==",
"group_by",
":",
"tmp_id",
"=",
"line_val",
"tmp_dict",
"=",
"{",
"}",
"tmp_dict",
"[",
"comps",
"[",
"0",
"]",
"]",
"=",
"line_val",
"return",
"ret"
] | 27.4 | 24.253333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.