content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def find_possible_words(word: str, dictionary: list) -> list:
"""Return all possible words from word."""
possible_words = []
first_character = word[0]
last_character = word[len(word) - 1]
for dictionary_entry in dictionary:
if (dictionary_entry.startswith(first_character) and
dictionary_entry.endswith(last_character)):
for character in dictionary_entry:
if character in word:
continue
else:
break
else:
possible_words.append(dictionary_entry)
return possible_words
|
a3e63e6b6b9d8de3ca718cfc8e031bbc34630d50
| 3,637,267
|
def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin):
"""Log-likelhood under a multidimensional Gaussian distribution with diagonal covariance.
Returns the log-likelihood for the multidim distribution.
"""
return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0)
|
010b1f510a74b2af29fe0cc94a2c36bc9c980778
| 3,637,268
|
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
|
9678cf38bfcf8dd34d6ccbad9b689709e9ab3dc5
| 3,637,270
|
def set_up_cube(
zero_point_indices=((0, 0, 7, 7),),
num_time_points=1,
num_grid_points=16,
num_realization_points=1,
):
"""Set up a cube with equal intervals along the x and y axis."""
zero_point_indices = list(zero_point_indices)
for index, indices in enumerate(zero_point_indices):
if len(indices) == 3:
indices = (0,) + indices
zero_point_indices[index] = indices
zero_point_indices = tuple(zero_point_indices)
data = np.ones(
(num_realization_points, num_time_points, num_grid_points, num_grid_points),
dtype=np.float32,
)
for indices in zero_point_indices:
realization_index, time_index, lat_index, lon_index = indices
data[realization_index][time_index][lat_index][lon_index] = 0
cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2")
cube.add_dim_coord(
DimCoord(range(num_realization_points), standard_name="realization"), 0
)
tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian")
time_points = [402192.5 + _ for _ in range(num_time_points)]
cube.add_dim_coord(DimCoord(time_points, standard_name="time", units=tunit), 1)
step_size = 2000
y_points = np.arange(0.0, step_size * num_grid_points, step_size, dtype=np.float32)
cube.add_dim_coord(
DimCoord(
y_points,
"projection_y_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
2,
)
x_points = np.arange(
-50000.0, (step_size * num_grid_points) - 50000, step_size, dtype=np.float32
)
cube.add_dim_coord(
DimCoord(
x_points,
"projection_x_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
3,
)
return cube
|
d19380e0cc7471178887ea006fa4f367461dac4d
| 3,637,272
|
def messageBox(self, title, text, icon=QMessageBox.Information):
"""
Working on generic message box
"""
m = QMessageBox(self)
m.setWindowTitle(title)
m.setText(text)
m.setIcon(icon)
# yesButton = m.addButton('Yes', QMessageBox.ButtonRole.YesRole)
# noButton = m.addButton('No', QMessageBox.ButtonRole.NoRole)
m.setDefaultButton(QMessageBox.Ok)
m.setFont(self.font())
m.exec_()
return QMessageBox.Ok
|
7bec2d2f0ca1366382d5bfc800c23af424d9a3d3
| 3,637,273
|
def binary_seg_loss(loss):
"""
Chooses the binary segmentation loss to use depending on the loss name in parameter
:param loss: the type of loss to use
"""
if loss == 'focal':
return BinaryFocalLoss()
else:
return tf.keras.losses.BinaryCrossentropy()
|
9d94a7e406a2fa1a12ba970731c7ce25b2408b21
| 3,637,274
|
def get_config_file():
""" Return the loaded config file if one exists. """
# config will be created here if we can't find one
new_config_path = os.path.expanduser('~/dagobahd.yml')
config_dirs = ['/etc',
os.path.expanduser('~/dagobah/dagobah/daemon/')]
config_filenames = ['dagobahd.yml',
'dagobahd.yaml',
'.dagobahd.yml',
'.dagobahd.yaml']
for directory in config_dirs:
for filename in config_filenames:
try:
if os.path.isfile(os.path.join(directory, filename)):
to_load = open(os.path.join(directory, filename))
config = yaml.load(to_load.read())
to_load.close()
replace_nones(config)
return config
except:
pass
# if we made it to here, need to create a config file
# double up on notifications here to make sure first-time user sees it
print 'Creating new config file in home directory'
print 'sometrhins'
logging.info('Creating new config file in home directory')
new_config = open(new_config_path, 'w')
new_config.write(return_standard_conf())
new_config.close()
new_config = open(new_config_path, 'r')
config = yaml.load(new_config.read())
new_config.close()
replace_nones(config)
return config
|
4a5009d5d6f5a4be6d953d7bc9150c033a56f187
| 3,637,275
|
def bostock_cat_colors(color_sets = ["set3"]):
"""
Get almost as many categorical colors as you please.
Get more than one of the color brewer sets with ['set1' , 'set2']
Parameters
----------
sets : list
list of color sets to return valid options are
(set1, set2, set3, pastel1, pastel2, paired, dark, accent, category10)
Returns
-------
categorical_colors : list
list of strings (e.g. ["#e41a1c",...])
Examples
--------
>>> bostock_cat_colors(['set3'])[:5]
['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3']
>>> bostock_cat_colors(['category10'])[:5]
['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']
Notes
-----
list of hex colors can be found here:
https://observablehq.com/@d3/color-schemes
"""
bostock = \
{"set1" : ["#e41a1c","#377eb8","#4daf4a","#984ea3",
"#ff7f00","#ffff33","#a65628","#f781bf",
"#999999"],
"set2" : ["#66c2a5","#fc8d62","#8da0cb","#e78ac3",
"#a6d854","#ffd92f","#e5c494","#b3b3b3"],
"set3" : ["#8dd3c7","#ffffb3","#bebada","#fb8072",
"#80b1d3","#fdb462","#b3de69","#fccde5",
"#d9d9d9","#bc80bd","#ccebc5","#ffed6f"],
"pastel1" : ["#fbb4ae","#b3cde3","#ccebc5","#decbe4",
"#fed9a6","#ffffcc","#e5d8bd","#fddaec",
"#f2f2f2"],
"pastel2" : ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4",
"#e6f5c9","#fff2ae","#f1e2cc","#cccccc"],
"paired" : ["#a6cee3","#1f78b4","#b2df8a","#33a02c",
"#fb9a99","#e31a1c","#fdbf6f","#ff7f00",
"#cab2d6","#6a3d9a","#ffff99","#b15928"],
"dark" : ["#1b9e77","#d95f02","#7570b3","#e7298a",
"#66a61e","#e6ab02","#a6761d","#666666"],
"accent" : ["#7fc97f","#beaed4","#fdc086","#ffff99",
"#386cb0","#f0027f","#bf5b17","#666666"],
"category10":["#1f77b4","#ff7f0e","#2ca02c","#d62728",
"#9467bd","#8c564b","#e377c2","#7f7f7f",
"#bcbd22","#17becf"]
}
l = [bostock[k] for k in color_sets]
categorical_colors = [item for sublist in l for item in sublist]
return categorical_colors
|
d01a2c833c3ee4ab1a196184ec4aecdb6cfc97a0
| 3,637,276
|
def bbpssw_gates_and_measurement_bob(q1, q2):
"""
Performs the gates and measurements for Bob's side of the BBPSSW protocol
:param q1: Bob's qubit from the first entangled pair
:param q2: Bob's qubit from the second entangled pair
:return: Integer 0/1 indicating Bob's measurement outcome
"""
q1.cnot(q2)
m2 = q2.measure()
return m2
|
71e981a99065ea2b0d76a2ebaacebdf04b53488a
| 3,637,277
|
from typing import Tuple
def fiber_array(
n: int = 8,
pitch: float = 127.0,
core_diameter: float = 10,
cladding_diameter: float = 125,
layer_core: Tuple[int, int] = gf.LAYER.WG,
layer_cladding: Tuple[int, int] = gf.LAYER.WGCLAD,
) -> Component:
"""Returns a fiber array
.. code::
pitch
<->
_________
| | lid
| o o o o |
| | base
|_________|
length
"""
c = Component()
for i in range(n):
core = c.add_ref(circle(radius=core_diameter / 2, layer=layer_core))
cladding = c.add_ref(circle(radius=cladding_diameter / 2, layer=layer_cladding))
core.movex(i * pitch)
cladding.movex(i * pitch)
c.add_port(name=f"F{i}", width=core_diameter, orientation=0)
return c
|
88bf1536788313c99f6b3c56ce8633db8cc30b8b
| 3,637,278
|
def delete(movie_id):
"""
deletes the movie from the database
:param movie_id: id to delete
:return: index file
"""
movie_to_delete_id = Movie.query.get(movie_id)
db_session.delete(movie_to_delete_id)
db_session.commit()
return redirect(url_for('home'))
|
cfaade7b63e4d4413b7593a667fe62d5de90eaad
| 3,637,279
|
import torch
def one_vector_block_diagonal(num_blocks: int, vector_length: int) -> Tensor:
"""Computes a block diagonal matrix with column vectors of ones as blocks.
Associated with the mathematical symbol :math:`E`.
Example:
::
one_vector_block_diagonal(3, 2) == tensor([
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.]]).
Args:
num_blocks: number of columns.
vector_length: number of ones in each matrix diagonal block.
Returns:
``(n * vector_length, n)`` 0-1 tensor.
"""
# pylint: disable=E1103
return torch.eye(num_blocks).repeat(1, vector_length).reshape(
num_blocks * vector_length, num_blocks)
|
babe3e8178f3d9cde9150909fbf890ae17830730
| 3,637,280
|
def get_cli_args():
"""
:return: argparse.Namespace with command-line arguments from user
"""
args = get_main_pipeline_arg_names().difference({
'output', 'ses', 'subject', 'task', WRAPPER_LOC[2:].replace('-', '_')
})
tasks = ('SST', 'MID', 'nback')
parser = get_pipeline_cli_argparser(arg_names=args)
parser.add_argument('-all-events', '--all-events', type=valid_readable_dir,
help=('Valid path to an existing directory which has '
'1 folder per subject, with the folder structure '
'(--all-events)/(subject ID)/(session name)/'
'level-1/events.'))
parser.add_argument('-all-outputs', '--all-outputs', type=valid_output_dir,
help=('Valid path to your output directory root. In '
'other words, the "--output" argument for each '
'command in the --script-list file will be '
'subject- and session-specific subdirectories '
'of this --all-outputs directory.'))
parser.add_argument('-output', '--output', type=valid_output_dir, required=False)
parser.add_argument('-script', '--script', type=valid_readable_file)
parser.add_argument('-script-list', '--script-list', required=True)
parser.add_argument('-slurm', '--slurm', action='store_true')
parser.add_argument('-sourcedata', '--sourcedata', type=valid_readable_dir)
parser.add_argument('-tasks', '--tasks', nargs='+', default=tasks) # choices=tasks,
parser.add_argument(WRAPPER_LOC, type=valid_readable_dir,
default=SCRIPT_DIR) #, dest='loc')
return vars(parser.parse_args())
|
d728c1254ffcfdeb2ba883681dd997d578084e72
| 3,637,281
|
from google.cloud import securitycenter
def list_all_assets(organization_id):
"""Demonstrate listing and printing all assets."""
i = 0
# [START securitycenter_list_all_assets]
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization.
# organization_id = "1234567777"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# Call the API and print results.
asset_iterator = client.list_assets(request={"parent": org_name})
for i, asset_result in enumerate(asset_iterator):
print(i, asset_result)
# [END securitycenter_list_all_assets]
return i
|
882672c91e8a698730532e1e7801aba0d5ec7d05
| 3,637,282
|
def _0_to_empty_str(dataframe: pd.DataFrame, column_data_type: dict):
"""
데이터가 str인 column에 들어있는 0을 '' 로 바꾸어 준다.
column_data_type 에서 value가 'str' 인 column 만 바꾸어 준다.
"""
for column, datatype in column_data_type.items():
if datatype == "str":
dataframe[column].replace("0", "", inplace=True)
return dataframe
|
2453b53c0e7a0067772f37d9d8c370b8accb933c
| 3,637,283
|
def _predict(rel):
"""
Predicts the betrayal probabilities and returns them as an inference.Output object.
"""
return inference.predict(rel)
|
c3b0489bef0723012f1de336dad5f61c40d77c7e
| 3,637,284
|
from datetime import datetime
def evaluate_exams(request, exam_id):
"""
Request-Methods :POST
Request-Headers : Authorization Token
Request-Body: Student-Solution -> JSON
Response: "student_name" -> str,
"teacher_name" -> str,
"batch" -> str,
"marks" -> str,
"exam_start_date_time" -> str,
"total_marks" -> str,
"grade" -> str
"""
student_solutions = request.data.get("student_solutions")
student = check_token_and_get_student(request)
if not Exam.objects.filter(Q(id=exam_id) & Q(batch=student.batch)).exists():
raise NotFoundException("Exam not found")
exam = Exam.objects.get(Q(id=exam_id) & Q(batch=student.batch))
exam_start_date_time = datetime.now(tz=timezone.utc) - timedelta(
hours=exam.exam_period.hour
)
if Result.objects.filter(Q(exam=exam) & Q(student=student)).exists():
raise AlreadyExistsException("Already Submitted the exam")
score = evaluate_exam_score(exam.questions_and_solutions, student_solutions)[
"total_score"
]
grade = evaluate_exam_grade(score, exam.total_marks)
result = Result(
exam_start_date_time=(exam_start_date_time),
exam=exam,
student=student,
teacher=exam.teacher,
student_solutions=student_solutions,
total_marks=float(exam.total_marks),
score=score,
grade=grade,
)
result.save()
response = {
"student_name": student.name,
"teacher_name": exam.teacher.name,
"batch": exam.batch,
"score": score,
"exam_start_date_time": (exam_start_date_time),
"total_marks": float(exam.total_marks),
"grade": grade,
}
return JsonResponse(data=response, status=200)
|
688cf2cd43991c98475fb7921ad64fccd0ea2b36
| 3,637,285
|
def _accumulated_penalty_energy_fw(energy_to_track, penalty_matrix, parallel):
"""Calculates acummulated penalty in forward direction (t=0...end).
`energy_to_track`: squared abs time-frequency transform
`penalty_matrix`: pre-calculated penalty for all potential jumps between
two frequencies
# Returns:
`penalized_energy`: new energy with added forward penalty
`ridge_idxs`: calculated initial ridge with only forward penalty
"""
penalized_energy = energy_to_track.copy()
fn = (__accumulated_penalty_energy_fwp if parallel else
__accumulated_penalty_energy_fw)
fn(penalized_energy, penalty_matrix)
ridge_idxs = np.unravel_index(np.argmin(penalized_energy, axis=0),
penalized_energy.shape)[1]
return penalized_energy, ridge_idxs
|
cc05de06ab53a9dcf7937df8bc6c5613a649b01c
| 3,637,286
|
def rot90(m, k=1, axis=2):
"""Rotate an array k*90 degrees in the counter-clockwise direction
around the given axis
This differs from np's rot90 because it's 3D
"""
m = np.swapaxes(m, 2, axis)
m = np.rot90(m, k)
m = np.swapaxes(m, 2, axis)
return m
|
40bb5c4406e8f7a1f4f6019c56d1a734bee0eac6
| 3,637,287
|
def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]
"""
Get padding values for the operation described by an ONNX node.
If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID values are
calculated. Otherwise values are taken from the `pads` attribute.
`pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]
:param onnx_node: wrapped ONNX node for Conv or Pool operation
:return: tuple of numbers of pixels to pad (height, width, depth)
"""
auto_pad = onnx_node.get_attribute_value('auto_pad')
pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis
kernel_shape = onnx_node.get_attribute_value('kernel_shape')
# Attribute 'auto_pad' is deprecated, but is currently used by CNTK
if auto_pad:
if auto_pad == 'VALID':
pads = [0, 0] * len(kernel_shape)
else:
# SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.
# In case of odd number add the extra padding at the end for SAME_UPPER and at the
# beginning for SAME_LOWER.
def pad_value(kernel_dim): # type: (int) -> float
return (kernel_dim - 1.0) / 2.0
pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else
ceil(pad_value(dim)) for dim in kernel_shape]
pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else
floor(pad_value(dim)) for dim in kernel_shape]
pads = pads_starts + pads_ends
verify_symmetric_padding(onnx_node, pads)
pad_h, pad_w, pad_d = 0, 0, 0
if pads and len(pads) == 2: # ONNX input axes NCHW
pad_h, pad_w = pads
if pads and len(pads) == 3: # ONNX input axes NCHWD
pad_h, pad_w, pad_d = pads
if pads and len(pads) == 4: # ONNX input axes NCHW
pad_h, pad_w, _, _ = pads
elif pads and len(pads) == 6: # ONNX input axes NCHWD
pad_h, pad_w, pad_d, _, _, _ = pads
return pad_h, pad_w, pad_d
|
9199129f59c3f459dfbad209427f4dcb8b5863e7
| 3,637,288
|
def from_dict(transforms):
"""Deserializes the transformations stored in a dict.
Supports deserialization of Streams only.
Parameters
----------
transforms : dict
Transforms
Returns
-------
out : solt.core.Stream
An instance of solt.core.Stream.
"""
if not isinstance(transforms, dict):
raise TypeError("Transforms must be a dict!")
for t in transforms:
if "transforms" in transforms[t]:
transforms[t]["transforms"] = [from_dict(x) for x in transforms[t]["transforms"]]
if "affine_transforms" in transforms[t]:
transforms[t]["affine_transforms"] = from_dict(transforms[t]["affine_transforms"])
if t in Serializable.registry:
cls = Serializable.registry[t]
else:
raise ValueError(f"Could not find {t} in the registry!")
return cls(**transforms[t])
|
bf09deac48819306a7fef9b98cd68775f3d9bcbd
| 3,637,290
|
def create_mp_pool(nproc=None):
"""Creates a multiprocessing pool of processes.
Arguments
---------
nproc : int, optional
number of processors to use. Defaults to number of available CPUs
minus 2.
"""
n_cpu = pathos.multiprocessing.cpu_count()
if nproc is None:
nproc = n_cpu - 2
assert nproc <= n_cpu, \
f'Cannot allocate more processes than existing CPUs: {nproc} > {n_cpu}'
return ProcessingPool(nproc)
|
37b750fb961535eada1924f524a4ec851ad7d613
| 3,637,291
|
def subpixel_edges(img, threshold, iters, order):
"""
Detects subpixel features for each pixel belonging to an edge in `img`.
The subpixel edge detection used the method published in the following paper:
"Accurate Subpixel Edge Location Based on Partial Area Effect"
http://www.sciencedirect.com/science/article/pii/S0262885612001850
Parameters
----------
img: ndarray
A grayscale image.
threshold: int or float
Specifies the minimum difference of intensity at both
sides of a pixel to be considered as an edge.
iters: int
Specifies how many smoothing iterations are needed
to find the final edges:
0: Oriented to noise free images. No previous smoothing on
the image. The detection is applied on the original
image values (section 3 of the paper).
1: Oriented to low-noise images. The detection is applied
on the image previously smoothed by a 3x3 mask
(default) (sections 4 and 5 of the paper)
>1: Oriented to high-noise images. Several stages of
smoothing + detection + synthetic image creation are
applied (section 6 of the paper). A few iterations are
normally enough.
order: int
Specifies the order of the edges to find:
1: first order edges (straight lines)
2: second order edges (default)
Returns
-------
An instance of EdgePixel
"""
if iters == 0:
return main_iter0(img, threshold, iters, order)
elif iters == 1:
return main_iter1(img, threshold, iters, order)
elif iters > 1:
for iterN in range(iters):
ep, img = main_iterN(img, threshold, iters, order)
return ep
|
546a8d1aedd1c53a329ce7a6e600307cf85b70a4
| 3,637,292
|
import numbers
import numpy
def arrays(hyperchunks, array_count):
"""Iterate over the arrays in a set of hyperchunks."""
class Attribute(object):
def __init__(self, expression, hyperslices):
self._expression = expression
self._hyperslices = hyperslices
@property
def expression(self):
return self._expression
@property
def hyperslice_count(self):
return 0 if self._hyperslices is None else len(self._hyperslices)
def hyperslices(self):
"""Iterate over the hyperslices in a hyperchunk."""
if self._hyperslices is not None:
for hyperslice in self._hyperslices:
yield tuple(hyperslice)
class Array(object):
def __init__(self, index, attributes, order, hyperslices):
self._index = index
self._attributes = attributes
self._order = order
self._hyperslices = hyperslices
@property
def index(self):
return self._index
@property
def attribute_count(self):
return 0 if self._attributes is None else len(self._attributes)
@property
def order(self):
return self._order
def attributes(self, attribute_count):
"""Iterate over the attributes in a hyperchunk."""
if self._attributes is not None:
for attributes in self._attributes:
if isinstance(attributes, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(attributes, numbers.Integral):
if attributes < 0:
attributes = slice(attribute_count + attributes, attribute_count + attributes + 1)
else:
attributes = slice(attributes, attributes + 1)
elif isinstance(attributes, type(Ellipsis)):
attributes = slice(0, attribute_count)
start, stop, step = attributes.indices(attribute_count)
for index in numpy.arange(start, stop, step):
yield Attribute(slycat.hyperchunks.grammar.AttributeIndex(index), self._hyperslices)
else:
yield Attribute(attributes, self._hyperslices)
for hyperchunk in hyperchunks:
for arrays in hyperchunk.arrays:
if isinstance(arrays, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(arrays, numbers.Integral):
if arrays < 0:
arrays = slice(array_count + arrays, array_count + arrays + 1)
else:
arrays = slice(arrays, arrays + 1)
elif isinstance(arrays, type(Ellipsis)):
arrays = slice(0, array_count)
start, stop, step = arrays.indices(array_count)
for index in numpy.arange(start, stop, step):
yield Array(index, hyperchunk.attributes, hyperchunk.order, hyperchunk.hyperslices)
else:
cherrypy.log.error("hyperchunks.__init__.py", "Unexpected array: %r" % arrays)
raise ValueError("Unexpected array: %r" % arrays)
|
0b4e8833b1dd0f7cf90ed1fc97dbc77d76e29e17
| 3,637,293
|
from typing import Union
import torch
import types
def ne(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative.
Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be
compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand involved in the comparison
y: DNDarray or scalar
The second operand involved in the comparison
Examples
---------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.ne(x, 3.0)
DNDarray([[ True, True],
[False, True]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.ne(x, y)
DNDarray([[ True, False],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.ne, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
|
f948f586781fb6c841a576b19defe2dff388469b
| 3,637,294
|
def _quote_embedded_quotes(text):
"""
Replace any embedded quotes with two quotes.
:param text: the text to quote
:return: the quoted text
"""
result = text
if '\'' in text:
result = result.replace('\'', '\'\'')
if '"' in text:
result = result.replace('"', '""')
return result
|
71231e590e025c2ceb7b2dd4fde4465a9ff61a4c
| 3,637,295
|
def exp2(x):
"""Calculate 2**x"""
return 2 ** x
|
d76d1e344e79ebb05d38a2e7e6ef36b6f367e85b
| 3,637,296
|
import json
from typing import Generator
def play():
"""Play page."""
ticket_name = request.cookies.get('ticket_name')
ticket = None
game = get_game()
new_ticket = True
if ticket_name:
ticket = Ticket.get_by_name(ticket_name)
new_ticket = ticket and ticket.game != game.id
if new_ticket:
ticket = Ticket.create(
name=get_name(),
game=game.id,
data=json.dumps(Generator().get_ticket())
)
resp = make_response(render_template("public/play.html", card=ticket, data=json.loads(ticket.data)))
resp.set_cookie('ticket_name', ticket.name)
return resp
|
75dd74a843c60d7eea2f1f2ffd08febbabbf5d41
| 3,637,297
|
def count_search_results(idx, typ, query, date_range, exclude_distributions,
exclude_article_types):
"""Count the number of results for a query
"""
q = create_query(query, date_range, exclude_distributions,
exclude_article_types)
#print q
return _es().count(index=idx, doc_type=typ, body=q)
|
b53742010645fc363abca8ddad5a15c7268ff49b
| 3,637,298
|
def ifft(data: np.ndarray) -> np.ndarray:
"""
Perform inverse discrete Fast Fourier transform of data by conjugating signal.
Arguments:
data: frequency data to be transformed (np.array, shape=(n,), dtype='float64')
Return:
result: Inverse transformed data
"""
n = len(data)
result = np.conjugate(fft(np.conjugate(data)))
return result
|
540ed47b2c7c4085609a9f94dba469c2b3a32d7a
| 3,637,299
|
def archived_minute(dataSet, year, month, day, hour, minute):
"""
Input: a dataset and specific minute
Output: a list of ride details at that minute or -1 if no ride during that minute
"""
year = str(year)
month = str(month)
day = str(day)
#Converts hour and minute into 2 digit integers (that are strings)
hour = "%02d" % hour
minute = "%02d" % minute
timeStamp = month+'/'+day+'/'+year+' '+hour+':'+minute+':'+'00'
if timeStamp in dataSet:
return dataSet[timeStamp]
else:
return -1
|
e550cb8ae5fbcfcc2a0b718dc2e4f3372f100015
| 3,637,300
|
def Rq(theta, vect):
"""Returns a 3x3 matrix representing a rotation of angle theta about vect
axis.
Parameters
----------
theta: float, rotation angle in radian
vect: list of float or array, vector about which the rotation happens
"""
I = np.matrix(np.identity(3))
Q = np.matrix(np.zeros((3,3)))
Q[0,1] = -vect[2]
Q[0,2] = vect[1]
Q[1,2] = -vect[0]
Q[1,0] = -Q[0,1]
Q[2,0] = -Q[0,2]
Q[2,1] = -Q[1,2]
res = I + np.sin(theta)*Q + (1-np.cos(theta))*Q**2
return res
|
069ef6568df170179a728b59dfb6a03dca0fbb75
| 3,637,301
|
def ExpsMaintPol():
"""Maintenance expense per policy"""
return asmp.ExpsMaintPol.match(prod, polt, gen).value
|
778d77d860378deeceee33e20a996e667430f851
| 3,637,302
|
def inputRead(c, inps):
"""
Reads the tokens in the input channels (Queues) given by the list inps
using the token rates defined by the list c.
It outputs a list where each element is a list of the read tokens.
Parameters
----------
c : [int]
List of token consumption rates.
inps : [Queue]
List of channels.
Returns
----------
inputs: [List]
List of token lists.
"""
if len(c) != len(inps):
raise Exception("Token consumption list and Queue list have different sizes")
inputs = []
for i in range(len(c)):
aux = []
for j in range(c[i]):
aux.append(inps[i].get())
inputs.append(aux)
return inputs
|
ea70548f7da4fae66fe5196734bbf39deb255537
| 3,637,303
|
def _split_schema_abstract(s):
"""
split the schema abstract into fields
>>> _split_schema_abstract("a b c")
['a', 'b', 'c']
>>> _split_schema_abstract("a(a b)")
['a(a b)']
>>> _split_schema_abstract("a b[] c{a b}")
['a', 'b[]', 'c{a b}']
>>> _split_schema_abstract(" ")
[]
"""
r = []
w = ''
brackets = []
for c in s:
if c == ' ' and not brackets:
if w:
r.append(w)
w = ''
else:
w += c
if c in _BRACKETS:
brackets.append(c)
elif c in _BRACKETS.values():
if not brackets or c != _BRACKETS[brackets.pop()]:
raise ValueError("unexpected " + c)
if brackets:
raise ValueError("brackets not closed: %s" % brackets)
if w:
r.append(w)
return r
|
ba1fba44979074b34adf87173a1277c212bd93e8
| 3,637,304
|
def myfn(n):
"""打印hello world
每隔一秒打印一个hello world,共n次
"""
if n == 1:
print("hello world!")
return
else:
print("hello world!")
return myfn(n - 1)
|
4405e8b4c591c435d43156283c0d8e2aa9860055
| 3,637,305
|
def name(ea, **flags):
"""Return the name defined at the address specified by `ea`.
If `flags` is specified, then use the specified value as the flags.
"""
ea = interface.address.inside(ea)
# figure out what default flags to use
fn = idaapi.get_func(ea)
# figure out which name function to call
if idaapi.__version__ < 6.8:
# if get_true_name is going to return the function's name instead of a real one, then leave it as unnamed.
if fn and interface.range.start(fn) == ea and not flags:
return None
aname = idaapi.get_true_name(ea) or idaapi.get_true_name(ea, ea)
else:
aname = idaapi.get_ea_name(ea, flags.get('flags', idaapi.GN_LOCAL))
# return the name at the specified address or not
return utils.string.of(aname) or None
|
7c0d938f5f4112f08749e1f412403d0da7ebf4d1
| 3,637,306
|
def estimate_distance(
row: pd.DataFrame,
agent_x: float,
agent_y: float
):
"""
Side function to estimate distance
from AGENT to the other vehicles
This function should be applied by row
Args:
row: (pd.DataFrame)
agent_x: (float) x coordinate of agent
agent_y: (float) y coordinate of agent
Returns:
(pd.DataFrame)
"""
row["distance"] = np.sqrt(
(row["center_x"] - agent_x) ** 2 +
(row["center_y"] - agent_y) ** 2
)
return row
|
c6d3dd9dbdcdde06baea95c8e0e56794d80aa0de
| 3,637,307
|
from .plot_methods import plot_spinpol_bands
from .bokeh_plots import bokeh_spinpol_bands
def spinpol_bands(kpath, eigenvalues_up, eigenvalues_dn, backend=None, data=None, **kwargs):
"""
Plot the provided data for a bandstructure (spin-polarized)
Non-weighted, weighted, as a line plot or scatter plot,
color-mapped or fixed colors are all possible options
:param kpath: data for the kpoints path (flattened to 1D)
:param eigenvalues_up: data for the eigenvalues for spin-up
:param eigenvalues_dn: data for the eigenvalues for spin-down
:param data: source for the data of the plot (optional) (pandas Dataframe for example)
:param backend: name of the backend to use (uses a default if None is given)
Kwargs are passed on to the backend plotting functions:
- ``matplotlib``: :py:func:`~masci_tools.vis.plot_methods.plot_spinpol_bands()`
- ``bokeh``: :py:func:`~masci_tools.vis.bokeh_plots.bokeh_spinpol_bands()`
:returns: Figure object for the used plotting backend
"""
plot_funcs = {PlotBackend.mpl: plot_spinpol_bands, PlotBackend.bokeh: bokeh_spinpol_bands}
backend = PlotBackend.from_str(backend)
return plot_funcs[backend](kpath, eigenvalues_up, eigenvalues_dn, data=data, **kwargs)
|
60df91e2a06b4ff2e943efebc4ff936fb55164dd
| 3,637,308
|
def valid_config_and_get_dates():
"""
校验配置文件的参数,并返回配置文件的预约疫苗日期
:return:
"""
if config.global_config.getConfigSection("cookie") == "":
raise Exception("请先配置登陆后的 cookie,查看方式请查看 README.MD")
if config.global_config.getConfigSection("date") == "":
raise Exception("请先配置登陆后的 预约日期")
valid_dates = get_dates()
if len(valid_dates) == 0:
raise Exception("预约日期未配置或配置不正确(预约日期需要大于等于今天),请重新配置预约日期")
return valid_dates
|
42310c1fa60331e2ae238f4c6ab5ab23940536b3
| 3,637,309
|
import requests
def check_internet_connection():
"""Checks if there is a working internet connection."""
url = 'http://www.google.com/'
timeout = 5
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError as e:
return False
return False
|
5f587e6077377196d2c89b39f5be5d6a2747e093
| 3,637,310
|
def wall_filter(points, img):
"""
Filters away points that are inside walls. Works by checking where the refractive index is not 1.
"""
deletion_mask = img[points[:, 0], points[:, 1]] != 1
filtered_points = points[~deletion_mask]
return filtered_points
|
05a34602e8a555eb1f1739f5de910a71514a92ae
| 3,637,311
|
import requests
def navigateResults(results):
"""Navigate all links, returning a list contaning the ulrs and corresponding pages.
results:[String] - List with links to be visited
Return: {list}[{tuple}({String}url, {String}content)]"""
global BASE_ADDR
ret = []
for i in results:
page = requests.get("%s%s" % (BASE_ADDR, i), verify=False)
ret.append(["%s%s" % (BASE_ADDR, i), page.text])
return ret
|
566da85528c7af46b29076c2b96eaf90f083becc
| 3,637,312
|
def _get_matching_signature(oper, args):
""" Search the first operation signature matched by a list of arguments
Args:
oper: Operation where searching signature
args: Candidate list of argument expressions
Returns:
Matching signature, None if not found
"""
# Search corresponding signature
return next((s for s in oper.signatures if _is_matching_arguments(s, args)), None)
|
21efb39c19f664ba0d79bcdbd1ab042bcaafffce
| 3,637,313
|
def format_size(num: int) -> str:
"""Format byte-sizes.
:param num: Size given as number of bytes.
.. seealso:: http://stackoverflow.com/a/1094933
"""
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
|
349649fc1ee6069b2cfba1ac8aa3745aafc9c7fc
| 3,637,314
|
def hub_payload(hub):
"""Create response payload for a hub."""
if hasattr(hub, "librarySectionID"):
media_content_id = f"{HUB_PREFIX}{hub.librarySectionID}:{hub.hubIdentifier}"
else:
media_content_id = f"{HUB_PREFIX}server:{hub.hubIdentifier}"
payload = {
"title": hub.title,
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": PLEX_URI_SCHEME + media_content_id,
"media_content_type": hub.type,
"can_play": False,
"can_expand": True,
}
return BrowseMedia(**payload)
|
e446ea607db6a665c94fab774ef46db70e1ed76f
| 3,637,315
|
def roq_transform(pressure, loading):
"""Rouquerol transform function."""
return loading * (1 - pressure)
|
b69d83579cdb904cc7e3625a371e1f6c0573e44b
| 3,637,316
|
def _cms_inmem(file_names):
"""
Computes mean and image_classification deviation in an offline fashion. This is possible only when the dataset can
be allocated in memory.
Parameters
----------
file_names: List of String
List of file names of the dataset
Returns
-------
mean : double
std : double
"""
img = np.zeros([file_names.size] + list(np.array(Image.open(file_names[0]).convert('RGB')).shape))
# Load all samples
for i, sample in enumerate(file_names):
img[i] = np.array(Image.open(sample).convert('RGB'))
mean = np.array([np.mean(img[:, :, :, 0]), np.mean(img[:, :, :, 1]), np.mean(img[:, :, :, 2])]) / 255.0
std = np.array([np.std(img[:, :, :, 0]), np.std(img[:, :, :, 1]), np.std(img[:, :, :, 2])]) / 255.0
return mean, std
|
88f856bbe33ec0e819b81dc7f5ad5930db45beea
| 3,637,317
|
def ddphi_spherical_zm (dd, ps_zm, r_e, lat, time_chunk=None ):
"""
This function calculates the gradient in meridional direction in a spherical system
It takes and returns xarray.DataArrays
inputs:
dd data xarray.DataArray with (latitude, time, level) or (latitude, time), or combinations there off
ps_zm xr.DataArray, Surfare pressure in the dimensions acoording dd, no copying to addional dimensions needed.
2nd dimension should be latitude, if more then 2 dims.
r_e earth radius used in the spherical gradient
lat np.array, latitude values in degree, same size as dd.latitude
returns:
xr.DataArray same dimensions as dd
"""
# ensure correct chunks
rechunk_dic=dict()
for k in dd.dims:
rechunk_dic[k]= dd[k].size
if time_chunk is not None:
rechunk_dic['time']= time_chunk
dd= dd.chunk(rechunk_dic)
#plt.hist(np.diff(lat_radiens))
lat_radiens =lat *np.pi/180.0
cos_phi= np.cos(lat_radiens)
if ps_zm is None:
print('no ps weight lat gradient')
ps_dummy = dd.isel(level=1)*0+1
grad_matrix = ps_dummy* r_e *cos_phi**2 * dd
else:
print('ps weight lat gradient')
rechunk_dic=dict()
for k in ps_zm.dims:
rechunk_dic[k]= uzm_vzm_rep[k].size
if time_chunk is not None:
rechunk_dic['time']= time_chunk
ps_zm=ps_zm.chunk(rechunk_dic)
grad_matrix =ps_zm* r_e *cos_phi**2 * dd
if lat.size != grad_matrix.shape[1]:
grad_matrix= grad_matrix.T
if lat.size != grad_matrix.shape[1]:
raise ValueError('the 2nd dimension it not the same size as the latitude. make sure the input arrays as the cooriantes like (time, latitude, level) or (time, latitude)')
grad_matrix_dphi = - grad_matrix.differentiate('latitude', edge_order=2)/(4.0*lat_radiens.diff('latitude').mean())
#grad_matrix_dphi_np =np.gradient(grad_matrix, lat_radiens , axis=1)
# ensure same order of diemnsions when data is returned
# only for non-xarray fileds
# trans_list=list()
# for k in list(dd.shape):
# for i in [i for i,x in enumerate(list(grad_matrix_dphi.shape)) if x == k]:
# trans_list.append(i)
#print(np.shape(r_e**2 *cos_phi**2))
#print(np.shape(ps_zm * r_e**2 *cos_phi**2))
if ps_zm is None:
factor = r_e**2 *cos_phi**2
else:
factor = ps_zm * r_e**2 *cos_phi**2
# non xarray version
#dd_return = xr.DataArray(data=np.transpose(grad_matrix_dphi, trans_list), dims=dd.dims, coords=dd.coords ) /factor
# xarray version
dd_return = grad_matrix_dphi/factor
return dd_return
|
5cae237e3a1dc9646a0a288a6164880c482a82be
| 3,637,318
|
def precompute_dgmatrix(set_gm_minmax,res=0.1,adopt=True):
"""Precomputing MODIT GRID MATRIX for normalized GammaL
Args:
set_gm_minmax: set of gm_minmax for different parameters [Nsample, Nlayers, 2], 2=min,max
res: grid resolution. res=0.1 (defaut) means a grid point per digit
adopt: if True, min, max grid points are used at min and max values of x. In this case, the grid width does not need to be res exactly.
Returns:
grid for DIT (Nlayer x NDITgrid)
"""
set_gm_minmax=np.array(set_gm_minmax)
lminarray=np.min(set_gm_minmax[:,:,0],axis=0) #min
lmaxarray=np.max(set_gm_minmax[:,:,1],axis=0) #max
dlog=np.max(lmaxarray-lminarray)
gm=[]
Ng=(dlog/res).astype(int)+2
Nlayer=len(lminarray)
for i in range(0,Nlayer):
lxmin=lminarray[i]
lxmax=lmaxarray[i]
if adopt==False:
grid=np.logspace(lxmin,lxmin+(Ng-1)*res,Ng)
else:
grid=np.logspace(lxmin,lxmax,Ng)
gm.append(grid)
gm=np.array(gm)
return gm
|
b007c4ec9f1aec9af364abc40ed903e9db66482c
| 3,637,320
|
def get_image_urls(ids):
"""function to map ids to image URLS"""
return [f"http://127.0.0.1:8000/{id}" for id in ids]
|
a70cd4eea39ea277c82ccffac2e9b7d68dd7c801
| 3,637,321
|
def partition(n: int) -> int:
"""Pure Python partition function, ported to Python from SageMath.
A000041 implemented by Peter Luschny.
@CachedFunction
def A000041(n):
if n == 0: return 1
S = 0; J = n-1; k = 2
while 0 <= J:
T = A000041(J)
S = S+T if is_odd(k//2) else S-T
J -= k if is_odd(k) else k//2
k += 1
return S
"""
if n in _p.keys():
return _p[n]
if not n:
return 1
sum, j, k = EMPTY_SUM, dec(n), 2
while j >= 0:
t = partition(j)
if k//2 % 2:
sum += t
else:
sum -= t
if k % 2:
j -= k
else:
j -= k//2
k += 1
_p[n] = sum
return sum
|
e30279029e8fd3ec012020fcf9e088822577e3d3
| 3,637,322
|
def port_to_host_int(port: int) -> int:
"""Function to convert a port from network byte order to little endian
Args:
port (int): the big endian port to be converted
Returns:
int: the little endian representation of the port
"""
return ntohs(port)
|
69506efe702826ca55e013cfe7f064c61994beb9
| 3,637,323
|
def inv_median(a):
"""
Inverse of the median of array a.
This can be used as the `scale` argument of
ccdproc.combine when combining flat frames.
See CCD Data Reduction Guide Sect. 4.3.1
"""
return 1 / np.median(a)
|
3612b6b334781c44677c3be91d876b69ec6cd6d3
| 3,637,324
|
def mpdisted(dask_client, T_A, T_B, m, percentage=0.05, k=None, normalize=True):
"""
Compute the z-normalized matrix profile distance (MPdist) measure between any two
time series with a distributed dask cluster
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
dask_client : client
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
T_A : ndarray
The first time series or sequence for which to compute the matrix profile
T_B : ndarray
The second time series or sequence for which to compute the matrix profile
m : int
Window size
percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0. This parameter is ignored when `k` is not `None`.
k : int
Specify the `k`th value in the concatenated matrix profiles to return. When `k`
is not `None`, then the `percentage` parameter is ignored.
normalize : bool, default True
When set to `True`, this z-normalizes subsequences prior to computing distances.
Otherwise, this function gets re-routed to its complementary non-normalized
equivalent set in the `@core.non_normalized` function decorator.
Returns
-------
MPdist : float
The matrix profile distance
Notes
-----
`DOI: 10.1109/ICDM.2018.00119 \
<https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__
See Section III
"""
return _mpdist(T_A, T_B, m, percentage, k, dask_client=dask_client, mp_func=stumped)
|
5ba6455228901e528909a63764a5a9d4fbdef2b3
| 3,637,325
|
def naive_pipeline_2() -> Pipeline:
"""Generate pipeline with NaiveModel(2)."""
pipeline = Pipeline(model=NaiveModel(2), transforms=[], horizon=7)
return pipeline
|
a14cef26c6970260435ddd5a17762e8dfa98e51a
| 3,637,326
|
import torch
def caption_image_batch(encoder, decoder, images, word_map, device, max_length):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image: image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
# Encode
encoder_out = encoder(images) # (1, enc_image_size, enc_image_size, encoder_dim)
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(3)
# Flatten encoding
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (1, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[word_map['<start>']]] * batch_size) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Lists to store completed sequences, their alphas and scores
complete_seqs = set()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while len(complete_seqs) < batch_size:
embeddings = decoder.embedding(k_prev_words.to(device)).squeeze(1) # (s, embed_dim)
awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)
gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = decoder.fc(h) # (s, vocab_size)
_, next_word_inds = scores.max(1)
next_word_inds = next_word_inds.cpu()
# Add new words to sequences, alphas
seqs = torch.cat([seqs, next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
complete_inds = set(range(batch_size)) - set(incomplete_inds)
complete_seqs.update(complete_inds)
k_prev_words = next_word_inds.unsqueeze(1)
# Break if things have been going on too long
if step > max_length:
break
step += 1
k_end_words = torch.LongTensor([[word_map['<end>']]] * batch_size) # (k, 1)
seqs = torch.cat([seqs, k_end_words], dim=1) # (s, step+1)
seq_length = [s.tolist().index(word_map['<end>']) for s in seqs]
return seq_length
|
192def399d6b05947df7bac06e90836771a22dda
| 3,637,327
|
def wasserstein_distance(p, q, C):
"""Wasserstein距离计算方法,
p.shape=(m,)
q.shape=(n,)
C.shape=(m,n)
p q满足归一性概率化
"""
p = np.array(p)
q = np.array(q)
A_eq = []
for i in range(len(p)):
A = np.zeros_like(C)
A[i,:] = 1.0
A_eq.append(A.reshape((-1,)))
for i in range(len(q)):
A = np.zeros_like(C)
A[:,i] = 1.0
A_eq.append(A.reshape((-1,)))
A_eq = np.array(A_eq)
b_eq = np.concatenate([p, q], axis=0)
C = np.array(C).reshape((-1,))
return optimize.linprog(
c=C,
A_eq=A_eq[:-1],
b_eq=b_eq[:-1],
method="interior-point",
options={"cholesky":False, "sym_pos":True}
).fun
|
3a1e1836f5ec9975b30dafff06d8a4eaee90e482
| 3,637,328
|
def median_cutoff_points(ventricular_rate, ponset, toffset):
"""Calculate the median cutoff start and end points"""
ponset = 0 if np.isnan(ponset) else int(ponset)
toffset = 600 if np.isnan(toffset) else int(toffset)
# limit the onset and offset to be in the range of 0-600
# take some margin of 10ms (5*2) on the start and end indices
margin = 5
ponset = max(ponset - margin, 0)
toffset = min(toffset, 600)
if np.isnan(ventricular_rate):
end = 600
else:
# calculate the average number of points between the QRS complexes
rr_interval = (1 * 60 * 1000 / 2) / ventricular_rate
# say that the end of a beat would be around the onset of the P wave
# plus the avg. duration of one beat
end = min(ponset + margin + rr_interval, 600)
# if the GE measured T wave offset is larger than our calculated beat
# endpoint, take the measured T wave offset
end = max(end, toffset)
if not np.isinf(end):
end = int(end)
return ponset, end
|
28de4a6ec172a052cb3a819d21ae8371ba68a469
| 3,637,329
|
def make_colormap(color_palette, N=256, gamma=1.0):
"""
Create a linear colormap from a color palette.
Parameters
----------
color_palette : str, list, or dict
A color string, list of color strings, or color palette dict
Returns
-------
cmap : LinearSegmentedColormap
A colormap object based on color_palette using linear segments.
"""
colors = extract_palette(color_palette)
rgb = map(hex2rgb, colors)
return LinearSegmentedColormap.from_list('custom', list(rgb),
N=N, gamma=1.0)
|
78fee496532616d3d4a8d7bee6ee5ec7637750f5
| 3,637,330
|
def frac_correct(t):
"""Compute fraction correct and confidence interval of trials t
"""
assert np.all(t.outcome.values<2)
frac = t.outcome.mean()
conf = confidence(frac, len(t))
return frac, conf
|
82ed3ae7e6e9a34933ff7c07a2ca54faf563b235
| 3,637,331
|
def run_location(tokens, description):
"""Identifies the indices of matching text in the lines.
Arguments:
tokens (list): A list of strings, serialized from the GUI.
description (CourseDescription): The course to be matched against.
Returns:
list: List of list of index positions.
"""
indices = run_text(tokens[1], description)
result = []
if tokens[0] == 'before':
for line_indices in indices:
result.append([start for start, end in line_indices])
elif tokens[0] == 'after':
for line_indices in indices:
result.append([end for start, end in line_indices])
return result
|
b2f7867319620f8a46c07727494cc8dbf85d5084
| 3,637,332
|
def domean(data, start, end, calculation_type):
"""
Gets average direction using Fisher or principal component analysis (line
or plane) methods
Parameters
----------
data : nest list of data: [[treatment,dec,inc,int,quality],...]
start : step being used as start of fit (often temperature minimum)
end : step being used as end of fit (often temperature maximum)
calculation_type : string describing type of calculation to be made
'DE-BFL' (line), 'DE-BFL-A' (line-anchored), 'DE-BFL-O' (line-with-origin),
'DE-BFP' (plane), 'DE-FM' (Fisher mean)
Returns
-------
mpars : dictionary with the keys "specimen_n","measurement_step_min",
"measurement_step_max","specimen_mad","specimen_dec","specimen_inc"
"""
mpars = {}
datablock = []
start0, end0 = start, end
# indata = [rec.append('g') if len(rec)<6 else rec for rec in indata] #
# this statement doesn't work!
indata = []
for rec in data:
if len(rec) < 6:
rec.append('g')
indata.append(rec)
if indata[start0][5] == 'b':
print("Can't select 'bad' point as start for PCA")
flags = [x[5] for x in indata]
bad_before_start = flags[:start0].count('b')
bad_in_mean = flags[start0:end0 + 1].count('b')
start = start0 - bad_before_start
end = end0 - bad_before_start - bad_in_mean
datablock = [x for x in indata if x[5] == 'g']
if indata[start0] != datablock[start]:
print('problem removing bad data in pmag.domean start of datablock shifted:\noriginal: %d\nafter removal: %d' % (
start0, indata.index(datablock[start])))
if indata[end0] != datablock[end]:
print('problem removing bad data in pmag.domean end of datablock shifted:\noriginal: %d\nafter removal: %d' % (
end0, indata.index(datablock[end])))
mpars["calculation_type"] = calculation_type
rad = old_div(np.pi, 180.)
if end > len(datablock) - 1 or end < start:
end = len(datablock) - 1
control, data, X, Nrec = [], [], [], float(end - start + 1)
cm = [0., 0., 0.]
#
# get cartesian coordinates
#
fdata = []
for k in range(start, end + 1):
if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-A' or calculation_type == 'DE-BFL-O': # best-fit line
data = [datablock[k][1], datablock[k][2], datablock[k][3]]
else:
data = [datablock[k][1], datablock[k][2], 1.0] # unit weight
fdata.append(data)
cart = dir2cart(data)
X.append(cart)
if calculation_type == 'DE-BFL-O': # include origin as point
X.append([0., 0., 0.])
# pass
if calculation_type == 'DE-FM': # for fisher means
fpars = fisher_mean(fdata)
mpars["specimen_direction_type"] = 'l'
mpars["specimen_dec"] = fpars["dec"]
mpars["specimen_inc"] = fpars["inc"]
mpars["specimen_alpha95"] = fpars["alpha95"]
mpars["specimen_n"] = fpars["n"]
mpars["specimen_r"] = fpars["r"]
mpars["measurement_step_min"] = indata[start0][0]
mpars["measurement_step_max"] = indata[end0][0]
mpars["center_of_mass"] = cm
mpars["specimen_dang"] = -1
return mpars
#
# get center of mass for principal components (DE-BFL or DE-BFP)
#
for cart in X:
for l in range(3):
cm[l] += old_div(cart[l], Nrec)
mpars["center_of_mass"] = cm
#
# transform to center of mass (if best-fit line)
#
if calculation_type != 'DE-BFP':
mpars["specimen_direction_type"] = 'l'
if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-O': # not for planes or anchored lines
for k in range(len(X)):
for l in range(3):
X[k][l] = X[k][l] - cm[l]
else:
mpars["specimen_direction_type"] = 'p'
#
# put in T matrix
#
T = np.array(Tmatrix(X))
#
# get sorted evals/evects
#
t, V = tauV(T)
if t == []:
mpars["specimen_direction_type"] = "Error"
print("Error in calculation")
return mpars
v1, v3 = V[0], V[2]
if t[2] < 0:
t[2] = 0 # make positive
if calculation_type == 'DE-BFL-A':
Dir, R = vector_mean(fdata)
mpars["specimen_direction_type"] = 'l'
mpars["specimen_dec"] = Dir[0]
mpars["specimen_inc"] = Dir[1]
mpars["specimen_n"] = len(fdata)
mpars["measurement_step_min"] = indata[start0][0]
mpars["measurement_step_max"] = indata[end0][0]
mpars["center_of_mass"] = cm
s1 = np.sqrt(t[0])
MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad)
if np.iscomplexobj(MAD):
MAD = MAD.real
# I think this is how it is done - i never anchor the "PCA" - check
mpars["specimen_mad"] = MAD
return mpars
if calculation_type != 'DE-BFP':
#
# get control vector for principal component direction
#
rec = [datablock[start][1], datablock[start][2], datablock[start][3]]
P1 = dir2cart(rec)
rec = [datablock[end][1], datablock[end][2], datablock[end][3]]
P2 = dir2cart(rec)
#
# get right direction along principal component
##
for k in range(3):
control.append(P1[k] - P2[k])
# changed by rshaar
# control is taken as the center of mass
# control=cm
dot = 0
for k in range(3):
dot += v1[k] * control[k]
if dot < -1:
dot = -1
if dot > 1:
dot = 1
if np.arccos(dot) > old_div(np.pi, 2.):
for k in range(3):
v1[k] = -v1[k]
# get right direction along principal component
#
s1 = np.sqrt(t[0])
Dir = cart2dir(v1)
MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad)
if np.iscomplexobj(MAD):
MAD = MAD.real
if calculation_type == "DE-BFP":
Dir = cart2dir(v3)
MAD = old_div(
np.arctan(np.sqrt(old_div(t[2], t[1]) + old_div(t[2], t[0]))), rad)
if np.iscomplexobj(MAD):
MAD = MAD.real
#
# get angle with center of mass
#
CMdir = cart2dir(cm)
Dirp = [Dir[0], Dir[1], 1.]
dang = angle(CMdir, Dirp)
mpars["specimen_dec"] = Dir[0]
mpars["specimen_inc"] = Dir[1]
mpars["specimen_mad"] = MAD
# mpars["specimen_n"]=int(Nrec)
mpars["specimen_n"] = len(X)
mpars["specimen_dang"] = dang[0]
mpars["measurement_step_min"] = indata[start0][0]
mpars["measurement_step_max"] = indata[end0][0]
return mpars
|
09e3539e4705995e8721976412977e13575add19
| 3,637,334
|
def download_data(dataset: str):
"""
Downloads a dataset as a .csv file.
:param dataset: The name of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], dataset, as_attachment=True)
|
cc4cd03f38ecfbfc3b602a2ed323482203ef319f
| 3,637,335
|
def _split_features_target(feature_matrix, problem_name):
"""Split the features and labels.
Args:
feature_matrix (pd.DataFrame):
a dataframe consists of both feature values and target values.
problem_name (str):
the name of the problem.
Returns:
tuple:
features (pd.DataFrame) and target (pd.Series).
"""
features = feature_matrix.copy().reset_index(drop=True)
if problem_name.lower() in features.columns:
features.pop(problem_name.lower())
target = features.pop(TARGET_NAME[problem_name])
features = features
return features, target
|
363dd1a9956f97c3c3520a074cca3c7c57f1517f
| 3,637,336
|
def descope_queue_name(scoped_name):
"""Descope Queue name with '.'.
Returns the queue name from the scoped name
which is of the form project-id.queue-name
"""
return scoped_name.split('.')[1]
|
24de78d12399e0894f495cd5c472b10c2315e4af
| 3,637,337
|
from IPython.display import Image
def movie(function, movie_name="movie.gif", play_range=None,
loop=0, optimize=True, duration=100, embed=False, mp4=True):
"""
Make a movie from a function.
function has signature: function(index) and should return
a PIL.Image.
"""
frames = []
for index in range(*play_range):
frames.append(function(index))
if frames:
frames[0].save(movie_name, save_all=True, append_images=frames[1:],
optimize=optimize, loop=loop, duration=duration)
if mp4 is False:
return Image(url=movie_name, embed=embed)
else:
return gif2mp4(movie_name)
|
ac1705c4dae278a58af4f745d676c20570639567
| 3,637,339
|
def get_rounded_reward_2(duration: float) -> float:
""" Helper function to round reward.
:param duration: not rounded duration
:return: rounded duration, two decimal points
"""
return round(get_reward(duration), 2)
|
ad1e97788504e6b4173853dbd7e82e9b407f7d0b
| 3,637,340
|
def fitness_order(order):
"""fitness function of a order of cities"""
score = 0
cacher = str(order)
if cacher in cache:
return cache[cacher]
for i in range(len(order) - 1):
score += distance_map[(order[i], order[i + 1])]
score += distance_map[(order[0], order[-1])]
cache[cacher] = score
return score
|
e2af48535bf2219ec4cb6ddf0972c7dcc7ef363a
| 3,637,341
|
from ba import _lang
from typing import Optional
def get_human_readable_user_scripts_path() -> str:
"""Return a human readable location of user-scripts.
This is NOT a valid filesystem path; may be something like "(SD Card)".
"""
app = _ba.app
path: Optional[str] = app.python_directory_user
if path is None:
return '<Not Available>'
# On newer versions of android, the user's external storage dir is probably
# only visible to the user's processes and thus not really useful printed
# in its entirety; lets print it as <External Storage>/myfilepath.
if app.platform == 'android':
ext_storage_path: Optional[str] = (
_ba.android_get_external_storage_path())
if (ext_storage_path is not None
and app.python_directory_user.startswith(ext_storage_path)):
path = ('<' +
_lang.Lstr(resource='externalStorageText').evaluate() +
'>' + app.python_directory_user[len(ext_storage_path):])
return path
|
f5d78fed6db03947f1bb4135391aeeb7a130031c
| 3,637,342
|
def rotated_positive_orthogonal_basis(
angle_x=np.pi / 3, angle_y=np.pi / 4, angle_z=np.pi / 5
):
"""Get a rotated orthogonal basis.
If X,Y,Z are the rotation matrices of the passed angles, the resulting
basis is Z * Y * X.
Parameters
----------
angle_x :
Rotation angle around the x-axis (Default value = np.pi / 3)
angle_y :
Rotation angle around the y-axis (Default value = np.pi / 4)
angle_z :
Rotation angle around the z-axis (Default value = np.pi / 5)
Returns
-------
np.ndarray
Rotated orthogonal basis
"""
# rotate axes to produce a more general test case
r_x = tf.rotation_matrix_x(angle_x)
r_y = tf.rotation_matrix_y(angle_y)
r_z = tf.rotation_matrix_z(angle_z)
r_tot = np.matmul(r_z, np.matmul(r_y, r_x))
return r_tot
|
8848d1d11f3e83727ed90957f012869f27991285
| 3,637,343
|
import array
def create_sequences(tokenizer, max_length, descriptions, photos_features, vocab_size):
"""
从输入的图片标题list和图片特征构造LSTM的一组输入
Args:
:param tokenizer: 英文单词和整数转换的工具keras.preprocessing.text.Tokenizer
:param max_length: 训练数据集中最长的标题的长度
:param descriptions: dict, key 为图像的名(不带.jpg后缀), value 为list, 包含一个图像的几个不同的描述
:param photos_features: dict, key 为图像的名(不带.jpg后缀), value 为numpy array 图像的特征
:param vocab_size: 训练集中表的单词数量
:return: tuple:
第一个元素为 numpy array, 元素为图像的特征, 它本身也是 numpy.array
第二个元素为 numpy array, 元素为图像标题的前缀, 它自身也是 numpy.array
第三个元素为 numpy array, 元素为图像标题的下一个单词(根据图像特征和标题的前缀产生) 也为numpy.array
Examples:
from pickle import load
tokenizer = load(open('tokenizer.pkl', 'rb'))
max_length = 6
descriptions = {'1235345':['startseq one bird on tree endseq', "startseq red bird on tree endseq"],
'1234546':['startseq one boy play water endseq', "startseq one boy run across water endseq"]}
photo_features = {'1235345':[ 0.434, 0.534, 0.212, 0.98 ],
'1234546':[ 0.534, 0.634, 0.712, 0.28 ]}
vocab_size = 7378
print(create_sequences(tokenizer, max_length, descriptions, photo_features, vocab_size))
(array([[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.434, 0.534, 0.212, 0.98 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ],
[ 0.534, 0.634, 0.712, 0.28 ]]),
array([[ 0, 0, 0, 0, 0, 2],
[ 0, 0, 0, 0, 2, 59],
[ 0, 0, 0, 2, 59, 254],
[ 0, 0, 2, 59, 254, 6],
[ 0, 2, 59, 254, 6, 134],
[ 0, 0, 0, 0, 0, 2],
[ 0, 0, 0, 0, 2, 26],
[ 0, 0, 0, 2, 26, 254],
[ 0, 0, 2, 26, 254, 6],
[ 0, 2, 26, 254, 6, 134],
[ 0, 0, 0, 0, 0, 2],
[ 0, 0, 0, 0, 2, 59],
[ 0, 0, 0, 2, 59, 16],
[ 0, 0, 2, 59, 16, 82],
[ 0, 2, 59, 16, 82, 24],
[ 0, 0, 0, 0, 0, 2],
[ 0, 0, 0, 0, 2, 59],
[ 0, 0, 0, 2, 59, 16],
[ 0, 0, 2, 59, 16, 165],
[ 0, 2, 59, 16, 165, 127],
[ 2, 59, 16, 165, 127, 24]]),
array([[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.]]))
"""
X1, X2, y = list(), list(), list()
for key, desc_list in descriptions.items():
for desc in desc_list:
seq = tokenizer.texts_to_sequences([desc])[0]
for i in range(1, len(seq)):
in_seq, out_seq = seq[:i], seq[i]
#填充in_seq,使得其长度为max_length
in_seq = pad_sequences([in_seq], maxlen = max_length)[0]
out_seq = to_categorical([out_seq], num_classes = vocab_size)[0]
X1.append(photos_features[key][0])
X2.append(in_seq)
y.append(out_seq)
return array(X1), array(X2), array(y)
|
31cffba7fdce229bd264e87291e57ed386498f3f
| 3,637,344
|
def avatar_uri(instance, filename):
"""
upload_to handler for Channel.avatar
"""
return generate_filepath(filename, instance.name, "_avatar", "channel")
|
f880f49055fbdc30f6a045f2f7916c077d22452c
| 3,637,345
|
def readMoveBaseGoalsFromFile(poses_file):
"""Read and return MoveBaseGoals for the robot-station and patrol-poses.
If the contents of the file do not obey the syntax rules of
_readPosesFromFile(), or if no patrol-poses were found, an IOError
exception is raised.
"""
patrol_poses, station_pose = _readPosesFromFile(poses_file)
_assertNumPatrolPoses(patrol_poses)
patrol_goals = [_createMoveBaseGoalFromPose(x) for x in patrol_poses]
station_goal = None if station_pose==None else _createMoveBaseGoalFromPose(station_pose)
return patrol_goals, station_goal
|
09085a9bc569e78050f3f10552aa160e9a9324bb
| 3,637,346
|
import configparser
def get_config(section = None):
"""Load local config file"""
run_config = configparser.ConfigParser()
run_config.read(get_repo_dir() + 'config.ini')
if len(run_config) == 1:
run_config = None
elif section is not None:
run_config = run_config[section]
return run_config
|
934dfad58fd674b58ccb4ddfeb9d62edbaba6e84
| 3,637,347
|
def get_parent(running_list, i, this_type, parent_type):
"""Get the description of an industry group's parent
OSHA industry decriptions are provided in ordered lists;
this function identifies the parent industry group based
on information provided by the groups preceeding it
"""
prior = running_list[i - 1]
if clean_desc(prior.full_desc)[1] == parent_type:
# If the type of the previous group is a parent type then set the
# parent description to previous element's description
parent_desc = str(prior.full_desc)
elif clean_desc(prior.full_desc)[1] == this_type:
# Else if the previous group is the more granular type then set the
# parent description to previous element's parent description
parent_desc = str(prior.parent_desc)
else:
# Otherwise raise a value error
err_msg = 'Unexpected code type: ' + prior
raise ValueError(err_msg)
return parent_desc
|
f307900a6220f4d6f14ed0c1924d8f03a40a8a1d
| 3,637,349
|
import torch
def rebalance_binary_class(label, mask=None, base_w=1.0):
"""Binary-class rebalancing."""
weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float())
weight_factor = torch.clamp(weight_factor, min=1e-2)
alpha = 1.0
weight = alpha * label*(1-weight_factor)/weight_factor + (1-label)
return weight_factor, weight
|
5adf3a21e4cc4b9e7bf129ecf31cfe37ab7a305a
| 3,637,350
|
def from_base(num_base: int, dec: int) -> float:
"""Returns value in e.g. ETH (taking e.g. wei as input)."""
return float(num_base / (10 ** dec))
|
447a07b3e282e5104f8dcd50639c658f3013ec7a
| 3,637,351
|
def make_auth_header(auth_token):
"""Make the authorization headers to communicate with endpoints which implement Auth0 authentication API.
Args:
auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT
(JSON Web Token), its expiry, the scopes granted, and the token type.
Returns:
headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication
required endpoints.
"""
token_type = auth_token['token_type']
access_token = auth_token['access_token']
headers = {
"Content-type": "application/json",
"Authorization": "{token_type} {access_token}".format(
token_type=token_type, access_token=access_token
),
}
return headers
|
e7c9b93cfbda876668068fb871d3abaf06157204
| 3,637,352
|
from typing import Tuple
def _drop_additional_columns(
pdf: PandasDataFrame,
column_names: Tuple,
additional_columns: Tuple,
) -> PandasDataFrame:
"""Removes additional columns from pandas DataFrame."""
# ! columns has to be a list
to_drop = list(compress(column_names, additional_columns))
return pdf.drop(columns=to_drop)
|
86a41647ae9bed9a18b428610f73af36105702b8
| 3,637,355
|
def get_cc3d(mask, top=1):
""" 26-connected neighbor
:param mask:
:param top: top K connected components
:return:
"""
msk = connected_components(mask.astype('uint8'))
indices, counts = np.unique(msk, return_counts=True)
indices = indices[1:]
counts = counts[1:]
if len(counts) >= top:
# print(f'Found {len(counts)} connected components')
pass
else:
return 'invalid'
labels = indices[np.argpartition(counts, -top)[-top:]]
for i in range(top):
msk[msk == labels[i]] = 501+i
mn = 501
mx = 501 + top - 1
msk[msk < mn] = 500
msk[msk > mx] = 500
msk = msk - 500
return msk
|
e91d5f2617ba526ebc27eaca93d8fed7e0145d0b
| 3,637,356
|
def dscp_class(bits_0_2, bit_3, bit_4):
"""
Takes values of DSCP bits and computes dscp class
Bits 0-2 decide major class
Bit 3-4 decide drop precedence
:param bits_0_2: int: decimal value of bits 0-2
:param bit_3: int: value of bit 3
:param bit_4: int: value of bit 4
:return: DSCP class name
"""
bits_3_4 = (bit_3 << 1) + bit_4
if bits_3_4 == 0:
dscp_cl = "cs{}".format(bits_0_2)
elif (bits_0_2, bits_3_4) == (5, 3):
dscp_cl = "ef"
else:
dscp_cl = "af{}{}".format(bits_0_2, bits_3_4)
return dscp_cl
|
79e9881e413a5fcbbbaab110e7b3346a2dbcaa53
| 3,637,357
|
def load_data(impaths_all, test=False):
"""
Load data with corresponding masks and segmentations
:param impaths_all: Paths of images to be loaded
:param test: Boolean, part of test set?
:return: Numpy array of images, masks and segmentations
"""
# Save all images, masks and segmentations
images = []
masks = []
segmentations = []
# Load as numpy array and normalize between 0 and 1
for im_path in impaths_all:
images.append(np.array(Image.open(im_path)) / 255.)
mask_path = im_path.replace('images', 'mask').replace('.png', '_mask.gif')
masks.append(np.array(Image.open(mask_path)) / 255.)
if not test:
seg_path = im_path.replace('images', '1st_manual').replace('training.png', 'manual1.gif')
else:
seg_path = im_path.replace('images', '1st_manual').replace('test.png', 'manual1.gif')
segmentations.append(np.array(Image.open(seg_path)) / 255.)
return np.array(images), np.expand_dims(np.array(masks), axis=-1), np.expand_dims(np.array(segmentations), axis=-1)
|
b1d8f1b135eab0f122370aaa98f8cbbe6f2f1be7
| 3,637,358
|
def f_assert_seq0_gte_seq1(value_list):
"""检测列表中的第一个元素是否大于等于第二个元素"""
if not value_list[0] >= value_list[1]:
raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list)
return value_list
|
225e60a565ffa81ec373dea7f8097ee6619a8b01
| 3,637,359
|
from typing import List
def decorate_diff_with_color(contents: List[str]) -> str:
"""Inject the ANSI color codes to the diff."""
for i, line in enumerate(contents):
if line.startswith("+++") or line.startswith("---"):
line = f"\033[1;37m{line}\033[0m" # bold white, reset
elif line.startswith("@@"):
line = f"\033[36m{line}\033[0m" # cyan, reset
elif line.startswith("+"):
line = f"\033[32m{line}\033[0m" # green, reset
elif line.startswith("-"):
line = f"\033[31m{line}\033[0m" # red, reset
contents[i] = line
return '\n'.join(contents)
|
55821622b1e7d7f545fa4ad34abebd108f27528d
| 3,637,362
|
from typing import Dict
def _combine_multipliers(first: Dict[Text, float],
second: Dict[Text, float]) -> Dict[Text, float]:
"""Combines operation weight multiplier dicts. Modifies the first dict."""
for name in second:
first[name] = first.get(name, 1.0) * second[name]
return first
|
e9db49daad0463e42a9231a2459fac5b4d14e181
| 3,637,363
|
def scale_to_one(iterable):
"""
Scale an iterable of numbers proportionally such as the highest number
equals to 1
Example:
>> > scale_to_one([5, 4, 3, 2, 1])
[1, 0.8, 0.6, 0.4, 0.2]
"""
m = max(iterable)
return [v / m for v in iterable]
|
92cfc7ef586ecfea4300aeedabe2410a247610f7
| 3,637,364
|
def insecure(path):
"""Find an insecure path, at or above this one"""
return first(search_parent_paths(path), insecure_inode)
|
685e66392f2adf8c9447e5cacf883de68c3bbe2d
| 3,637,366
|
import gc
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile)
return percentiles
|
16d386385f4fde04670e37e76abe13b8c22a487c
| 3,637,367
|
def make_author_list(res):
"""Takes a list of author names and returns a cleaned list of author names."""
try:
r = [", ".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]
except KeyError as e:
print("No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.")
r = ["Unknown Authors"]
return r
|
e1459514928d87a3e688b6957437452d02e50987
| 3,637,368
|
def backproject_points_np(p, fx=None, fy=None, cx=None, cy=None, K=None):
"""
p.shape = (nr_points,xyz)
"""
if not K is None:
fx = K[0, 0]
fy = K[1, 1]
cx = K[0, 2]
cy = K[1, 2]
# true_divide
u = ((p[:, 0] / p[:, 2]) * fx) + cx
v = ((p[:, 1] / p[:, 2]) * fy) + cy
return np.stack([v, u]).T
|
a06031de2f03d6e80ae149024a5bb2bea96f6b76
| 3,637,369
|
def recover(D, gamma=None):
"""Recover low-rank and sparse part, using Alg. 4 of [2].
Note: gamma is lambda in Alg. 4.
Parameters
---------
D : numpy ndarray, shape (N, D)
Input data matrix.
gamma : float, default = None
Weight on sparse component. If 'None', then gamma = 1/sqrt(max(D, N))
as shown in [1] to be the optimal choice under a set of suitable
assumptions.
Returns
-------
LL : numpy array, shape (N, D)
Low-rank part of data
SP : numpy array, shape (N, D)
Sparse part of data
n_iter : int
Number of iterations until convergence.
"""
n, m = D.shape
if gamma is None:
gamma = 1/np.sqrt(np.amax([n, m]))
# the following lines implement line 1 of Alg. 4
Y = np.sign(D)
l2n = np.linalg.norm(Y, ord=2)
l2i = np.linalg.norm(np.asarray(Y).ravel(), ord=np.inf)
dual_norm = np.amax([l2n, l2i])
Y = Y/dual_norm
# line 4 of Alg. 4
A_hat = np.zeros(D.shape)
E_hat = np.zeros(D.shape)
D_fro = np.linalg.norm(D, ord='fro')
# cf. section "Choosing Parameters" of [2]
proj_tol = 1e-06*D_fro
term_tol = 1e-07
iter_max = 1e+03
num_svd = 0 # track # of SVD calls
m = 0.5/l2n # \mu in Alg. 4
r = 6 # \rho in Alg. 4
sv = 5
svp = sv
k = 0
converged = False
while not converged:
primal_converged = False
sv = sv+np.round(n*0.1)
primal_iter = 0
while not primal_converged:
# implement line 10 in Alg. 4
T_tmp = D-A_hat+1/m*Y
E_tmp = (np.maximum(T_tmp-gamma/m, 0) +
np.minimum(T_tmp+gamma/m, 0))
# line 7 of Alg. 4
U, S, V = np.linalg.svd(D-E_tmp+1/m*Y, full_matrices=False)
# line 8 of Alg. 4
svp = len(np.where(S > 1/m)[0])
if svp < sv:
sv = np.amin([svp+1, n])
else:
sv = np.amin([svp + np.round(0.05*n), n])
A_tmp = (np.mat(U[:,0:svp]) *
np.diag(S[0:svp]-1/m) *
np.mat(V[0:svp,:]))
# check convergence of inner optimization
if (np.linalg.norm(A_hat-A_tmp, ord='fro') < proj_tol and
np.linalg.norm(E_hat-E_tmp, ord='fro') < proj_tol):
primal_converged = True
A_hat = A_tmp
E_hat = E_tmp
primal_iter = primal_iter+1
num_svd = num_svd+1
# line 13 of Alg. 4
Z = D-A_hat-E_hat
Y = Y+m*Z
m = r*m
# evaluate stopping criteria
stop_crit = np.linalg.norm(Z,'fro')/D_fro
if stop_crit < term_tol:
converged = True
# some information about the iteration
non_zero = len(np.where(np.asarray(np.abs(E_hat)).ravel()>0)[0])
message = ["[iter: %.4d]" % k,
"#svd=%.4d" % num_svd,
"rank(P)=%.4d" % svp,
"|C|_0=%.4d" % non_zero,
"crit=%.4g" % stop_crit]
print ' '.join(message)
k = k+1
# handle non-convergence
if not converged and k > iter_max:
warnings.warn("terminate after max. iter.", UserWarning)
converged = True
return (A_hat, E_hat, k)
|
83a269006a7cc6c48b1db520c813929886eedf7b
| 3,637,371
|
def get_stylesheet():
"""Generate an html link to a stylesheet"""
return "{static_url}/code_pygments/css/{theme}.css".format(
static_url=core_config['ASSETS_URL'],
theme=module_config['PYGMENTS_THEME'])
|
e5f766a414d6fac32b361dfbb54da929bff4458d
| 3,637,373
|
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
vol['replication_extended_status'] = replication_extended_status
vol['replication_driver_data'] = replication_driver_data
return db.volume_create(ctxt, vol)
|
b638e98ab88f0e65c37503dee80bbec2250aab0e
| 3,637,374
|
def duration_of_treatment_30():
"""
Real Name: b'duration of treatment 30'
Original Eqn: b'10'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 10
|
510b8e114007c9e64f866c98bfce9d2d86fa7bfe
| 3,637,375
|
def input_pkgidx(g_dim):
"""
Specify the parking spots index by the user
return 1*pk_dim np.array 'pk_g_idx' where pk_dim is the number of spots
"""
#print('Please specify the num of parking spots:')
pk_dim = np.int(input('Please specify the num of parking spots:'))
while pk_dim >= g_dim:
print('Too many parking spots!')
pk_dim = np.int(input('Please specify the num of parking spots:'))
pk_g_idx = -np.ones(pk_dim, dtype = int)
for idx in range(pk_dim):
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while spot_idx in pk_g_idx:
print('Repeated input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
pk_g_idx[idx] = spot_idx
pk_g_idx.sort()
return pk_g_idx
|
acba8fdbeb1d0fdcb67d28b64fa313489fd597df
| 3,637,376
|
def get_total_count(data):
"""
Retrieves the total count from a Salesforce SOQL query.
:param dict data: data from the Salesforce API
:rtype: int
"""
return data['totalSize']
|
7cb8696c36449425fbcfa944f1f057d063972888
| 3,637,377
|
def _check_hex(dummy_option, opt, value):
"""
Checks if a value is given in a decimal integer of hexadecimal reppresentation.
Returns the converted value or rises an exception on error.
"""
try:
if value.lower().startswith("0x"):
return int(value, 16)
else:
return int(value)
except ValueError:
raise OptionValueError(
"option {0:s}: invalid integer or hexadecimal value: {1:s}.".format(opt, value))
|
6acf63f42b79ba30a55fc1666bdd2c1f65124fd3
| 3,637,378
|
def get_challenge():
"""returns the ChallengeSetting object, from cache if cache is enabled"""
challenge = cache_mgr.get_cache('challenge')
if not challenge:
challenge, _ = ChallengeSetting.objects.get_or_create(pk=1)
# check the WattDepot URL to ensure it does't end with '/'
if challenge.wattdepot_server_url:
while challenge.wattdepot_server_url.endswith('/'):
challenge.wattdepot_server_url = challenge.wattdepot_server_url[:-1]
# create the admin
create_admin_user()
cache_mgr.set_cache('challenge', challenge, 2592000)
return challenge
|
f874b90624bbd9c4d4fc5e18efd6363d461fab0f
| 3,637,379
|
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(" ", 1)
extra = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif args:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`Pass the user's username, id or reply!`")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra
|
4cd659637603e9910aa5fb00e855767ae8252c2e
| 3,637,380
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.