content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.
"""
dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
#if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
#dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
dataset = dataset.shuffle(buffer_size=500)
dataset = dataset.map(parse_record)
dataset = dataset.map(lambda image, label: preprocess_image(image, label, is_training))
dataset = dataset.prefetch(batch_size)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
|
94875b205df2d67993dd658f33fbf3be917cf701
| 3,645,657
|
def num_list(to_parse):
"""
Creates list from its string representation
Arguments:
to_parse {string} -- String representation of list, can include 'None' or internal lists, represented by separation with '#'
Returns:
list[int] -- List represented in to_parse
"""
if len(to_parse) == 2:
return []
inter = to_parse[1:-1]
inter = [x.strip() for x in inter.split(',')]
result = []
for n in inter:
if n == "None":
result.append(None)
elif "#" in n:
result.append([int(x) for x in n.split("#")])
else:
result.append(int(n))
return result
|
b444554e37434b5ae42ebc913bcc0f9b99c65ce9
| 3,645,659
|
def total_scatter_matrix(data):
"""
Total sum of square (TSS) : sum of squared distances of points around the baycentre
References : Clustering Indices, Bernard Desgraupes (April 2013)
"""
X = np.array(data.T.copy(), dtype=np.float64)
for feature_i in range(data.shape[1]):
X[feature_i] = X[feature_i] - np.mean(X[feature_i])
T = np.dot(X, X.T)
return T
|
829bfdbf838d087517465e7173e480796e52cf8e
| 3,645,661
|
def save_camera_zip(camera_id, year, month, file_path=None):
"""
Download a camera ZIP archive.
:param camera_id: int, camera ID
:param year: int, year
:param month: int, month
:param file_path: str, optional, path to save file
:return: bool, status of download
"""
# Setup file name
file_name = "{0:04d}.{1:02d}.zip".format(year, month)
if file_path is None:
file_path = "./{0}".format(file_name)
# Download
save_buffer(get_zip_url(camera_id, year, month), file_path)
return True
|
41e4ef0f5f412850266de2d136da719adae08e04
| 3,645,662
|
def read_user(str):
""" str -> dict """
pieces = str.split()
return {
'first': pieces[0],
'last': pieces[1],
'username': pieces[5],
'custID': pieces[3],
'password': pieces[7],
'rank': 0,
'total': 0
}
|
fcb24a2b791f0df8f40ea4080cdabe83d51fe068
| 3,645,663
|
import calendar
def lweekdate(weekday, year, month, nextDay=0):
"""
Usage
lastDate = lweekdate(weekday, year, month, nextDay)
Notes
Date of last occurrence of weekday in month
returns the serial date number for the last occurrence of Weekday in the given
year and month and in a week that also contains NextDay.
Weekday Weekday whose date you seek. Enter as an integer from 1 through 7:
1 Sunday
2 Monday
3 Tuesday
4 Wednesday
5 Thursday
6 Friday
7 Saturday
Year Year. Enter as a four-digit integer.
Month Month. Enter as an integer from 1 through 12.
Not Implemented
NextDay (Optional) Weekday that must occur after Weekday in the same week.
Enter as an integer from 0 through 7, where 0 = ignore (default) and 1 through 7
are the same as for Weekday.
Any input can contain multiple values, but if so, all other inputs must contain
the same number of values or a single value that applies to all.
See Also
Use the function datestr to convert serial date numbers to formatted date strings.
Examples
"""
assert weekday in range(1,8), "weekday must be in range(1,8)"
assert month in range(1,13), "month must be in range(1,13)"
assert year in range(0, 10000), "year must be in range(0,10000)"
assert nextDay in range(0,8), "weekday must be in range(0,8)"
day = calendar.monthcalendar(year,month)[-1][weekday-1]
if day == 0:
day = calendar.monthcalendar(year,month)[-2][weekday-1]
return datenum(year, month, day)
|
60367db7223f104260e2b7d757b367d6388d222b
| 3,645,664
|
def multi_polygon_gdf(basic_polygon):
"""
A GeoDataFrame containing the basic polygon geometry.
Returns
-------
GeoDataFrame containing the basic_polygon polygon.
"""
poly_a = Polygon([(3, 5), (2, 3.25), (5.25, 6), (2.25, 2), (2, 2)])
gdf = gpd.GeoDataFrame(
[1, 2],
geometry=[poly_a.buffer(0), basic_polygon.buffer(0)],
crs="epsg:4326",
)
multi_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(gdf.unary_union), crs="epsg:4326"
)
return multi_gdf
|
9acfa76ca3a51603d96e1388dc7c7a1178ec3fa1
| 3,645,665
|
import traceback
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
|
3968fc99c006c45e20eabce1329d95247ad855c8
| 3,645,666
|
import json
from typing import OrderedDict
def get_board_properties(board, board_path):
"""parses the board file returns the properties of the board specified"""
with open(helper.linux_path(board_path)) as f:
board_data = json.load(f, object_pairs_hook=OrderedDict)
return board_data[board]
|
e5fa5542c540c643ecf8b57314e227d14e193a56
| 3,645,667
|
def get_repository_username(repo_url):
"""
Returns the repository username
:return: (str) Repository owner username
"""
repo_path = _get_repo_path(repo_url)
return repo_path[0]
|
008e67435c11e4fbb12ca19149e795dd50c12526
| 3,645,670
|
from IPython.config import Application
import logging
def get_logger():
"""Grab the global logger instance.
If a global IPython Application is instantiated, grab its logger.
Otherwise, grab the root logger.
"""
global _logger
if _logger is None:
if Application.initialized():
_logger = Application.instance().log
else:
logging.basicConfig()
_logger = logging.getLogger()
return _logger
|
717487ac1c94c09ab7831e405255283aea4570a5
| 3,645,671
|
def process_y(y_train: pd.Series, max_mult=20, large_sampsize=50000):
"""
Drop missing values, downsample the negative class
if sample size is large and there is significant class imbalance
"""
# Remove missing labels
ytr = y_train.dropna()
# The code below assumes the negative class is over-represented.
assert ytr.mean() < 0.5
# If there are too many negative samples, downsample
if len(ytr) > large_sampsize:
label_counts = ytr.value_counts()
max_neg = max(label_counts.loc[1.0] * max_mult, large_sampsize)
y_neg = ytr[ytr == 0.0]
y_pos = ytr[ytr == 1.0]
new_y = pd.concat(
[y_neg.sample(frac=1.0, replace=False).iloc[:max_neg], y_pos]
).sample(frac=1.0, replace=False)
return new_y
else:
return ytr
|
2f36ba3bce93d47f944784f83fd731b9aa315acc
| 3,645,672
|
def _distance(y1, y2):
"""1D distance calculator"""
inner = (y2 - y1) ** 2
d = np.sqrt(inner)
return d
|
696c5ccbe720301d22d9b142e9a5d5f3c507b738
| 3,645,673
|
def create_container(context, values):
"""Create a new container.
:param context: The security context
:param values: A dict containing several items used to identify
and track the container, and several dicts which are
passed
into the Drivers when managing this container. For
example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A container.
"""
return _get_dbdriver_instance().create_container(context, values)
|
b9047467a0a96c1b08bc92b4e74399e0a413ba45
| 3,645,674
|
from google.protobuf.json_format import MessageToDict
from typing import Callable
from typing import Dict
def build_model_from_pb(name: str, pb_model: Callable):
"""
Build model from protobuf message.
:param name: Name of the model.
:param pb_model: protobuf message.
:return: Model.
"""
dp = MessageToDict(pb_model(), including_default_value_fields=True)
all_fields = {
k: (name if k in ('chunks', 'matches') else type(v), Field(default=v))
for k, v in dp.items()
}
if pb_model == QueryLangProto:
all_fields['parameters'] = (Dict, Field(default={}))
return create_model(name, **all_fields)
|
a1de965b13b6cbbe33a08a52561f699042dd93f8
| 3,645,675
|
def create_proof_of_time_pietrzak(discriminant, x, iterations, int_size_bits):
"""
Returns a serialized proof blob.
"""
delta = 8
powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations)
powers = iterate_squarings(x, powers_to_calculate)
y = powers[iterations]
proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers,
x.identity(), generate_r_value, int_size_bits)
return y.serialize(), serialize_proof(proof)
|
d31ac6eadcc3ce155d682e2cef4b392561a1412b
| 3,645,676
|
def resample_dataset ( fname, x_factor, y_factor, method="mean", \
data_min=-1000, data_max=10000 ):
"""This function resamples a GDAL dataset (single band) by a factor of
(``x_factor``, ``y_factor``) in x and y. By default, the only method used
is to calculate the mean. The ``data_min`` and ``data_max`` parameters are
used to mask out pixels in value"""
QA_OK = np.array([0, 1, 4, 12, 8, 64, 512, 2048] )# VI OK
# Table in http://gis.cri.fmach.it/modis-ndvi-evi/
# First open the NDVI file
fname = 'HDF4_EOS:EOS_GRID:"%s":' % fname + \
'MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'
gdal_data = gdal.Open ( fname )
# Get raster sizes
nx = gdal_data.RasterXSize
ny = gdal_data.RasterYSize
# Calculate output raster size
nnx = nx/x_factor
nny = ny/y_factor
# Reshape the raster data...
B = np.reshape ( gdal_data.ReadAsArray(), ( nny, y_factor, nnx, x_factor ) )
# Now open QA file
fname = fname.replace ("NDVI", "VI Quality" )
gdal_data = gdal.Open ( fname )
qa = gdal_data.ReadAsArray()
# Check what goes through QA
qa_pass = np.logical_or.reduce([qa==x for x in QA_OK ])
B = np.ma.array ( B, mask=qa_pass )
# Re-jiggle the dimensions so we can easily average over then
C = np.transpose ( B, (0, 2, 1, 3 ) )
if method == "mean":
reduced_raster = np.mean ( np.mean ( C, axis=-1), axis=-1 )
else:
raise NotImplemented( "Only mean reduction supported by now")
return reduced_raster
|
f32465711d7dae3a8e7350676cb0e90f084bf5c5
| 3,645,677
|
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
"""
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b('')
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = ZERO_BYTE
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
return raw_bytes
|
091764ffeb9a15036b484380750f04496db36da1
| 3,645,678
|
def compFirstFivePowOf2(iset={0, 1, 2, 3, 4}):
"""
task 0.5.6
a comprehension over the given set whose value is the set consisting
of the first five powers of two, starting with 2**0
"""
return {2**x for x in iset}
|
a7b04ab6b127ef5ee7fdd3598b1569e171fd009e
| 3,645,679
|
import types
import pandas
def sdc_pandas_dataframe_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.DataFrame.getitem
Get data from a DataFrame by indexer.
Limitations
-----------
Supported ``key`` can be one of the following:
* String literal, e.g. :obj:`df['A']`
* A slice, e.g. :obj:`df[2:5]`
* A tuple of string, e.g. :obj:`df[('A', 'B')]`
* An array of booleans, e.g. :obj:`df[True,False]`
* A series of booleans, e.g. :obj:`df(series([True,False]))`
Supported getting a column through getting attribute.
Examples
--------
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_attr.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame column through getting attribute.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_attr.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame column where key is a string.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_slice.py
:language: python
:lines: 34-
:caption: Getting slice of Pandas DataFrame.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_slice.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_tuple.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame elements where key is a tuple of strings.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_tuple.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_array.py
:language: python
:lines: 34-
:caption: Getting Pandas DataFrame elements where key is an array of booleans.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_array.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_series.py
:language: python
:lines: 34-
:caption: Getting Pandas DataFrame elements where key is series of booleans.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_series.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.getitem <pandas.Series.getitem>`
Get value(s) of Series by key.
:ref:`Series.setitem <pandas.Series.setitem>`
Set value to Series by index
:ref:`Series.loc <pandas.Series.loc>`
Access a group of rows and columns by label(s) or a boolean array.
:ref:`Series.iloc <pandas.Series.iloc>`
Purely integer-location based indexing for selection by position.
:ref:`Series.at <pandas.Series.at>`
Access a single value for a row/column label pair.
:ref:`Series.iat <pandas.Series.iat>`
Access a single value for a row/column pair by integer position.
:ref:`DataFrame.setitem <pandas.DataFrame.setitem>`
Set value to DataFrame by index
:ref:`DataFrame.loc <pandas.DataFrame.loc>`
Access a group of rows and columns by label(s) or a boolean array.
:ref:`DataFrame.iloc <pandas.DataFrame.iloc>`
Purely integer-location based indexing for selection by position.
:ref:`DataFrame.at <pandas.DataFrame.at>`
Access a single value for a row/column label pair.
:ref:`DataFrame.iat <pandas.DataFrame.iat>`
Access a single value for a row/column pair by integer position.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas DataFrame method :meth:`pandas.DataFrame.getitem` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_dataframe.TestDataFrame.test_df_getitem*
"""
ty_checker = TypeChecker('Operator getitem().')
if not isinstance(self, DataFrameType):
return None
if isinstance(idx, types.StringLiteral):
col_loc = self.column_loc.get(idx.literal_value)
if col_loc is None:
key_error = True
else:
type_id, col_id = col_loc.type_id, col_loc.col_id
key_error = False
def _df_getitem_str_literal_idx_impl(self, idx):
if key_error == False: # noqa
data = self._data[type_id][col_id]
return pandas.Series(data, index=self._index, name=idx)
else:
raise KeyError('Column is not in the DataFrame')
return _df_getitem_str_literal_idx_impl
if isinstance(idx, types.UnicodeType):
def _df_getitem_unicode_idx_impl(self, idx):
# http://numba.pydata.org/numba-doc/dev/developer/literal.html#specifying-for-literal-typing
# literally raises special exception to call getitem with literal idx value got from unicode
return literally(idx)
return _df_getitem_unicode_idx_impl
if isinstance(idx, types.Tuple):
if all([isinstance(item, types.StringLiteral) for item in idx]):
return gen_df_getitem_tuple_idx_impl(self, idx)
if isinstance(idx, types.SliceType):
return gen_df_getitem_slice_idx_impl(self, idx)
if isinstance(idx, SeriesType) and isinstance(idx.dtype, types.Boolean):
self_index_is_none = isinstance(self.index, types.NoneType)
idx_index_is_none = isinstance(idx.index, types.NoneType)
if self_index_is_none and not idx_index_is_none:
if not check_index_is_numeric(idx):
ty_checker.raise_exc(idx.index.dtype, 'number', 'idx.index.dtype')
if not self_index_is_none and idx_index_is_none:
if not check_index_is_numeric(self):
ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype')
if not self_index_is_none and not idx_index_is_none:
if not check_types_comparable(self.index, idx.index):
ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype')
return gen_df_getitem_bool_series_idx_impl(self, idx)
if isinstance(idx, types.Array) and isinstance(idx.dtype, types.Boolean):
return gen_df_getitem_bool_array_idx_impl(self, idx)
ty_checker = TypeChecker('Operator getitem().')
expected_types = 'str, tuple(str), slice, series(bool), array(bool)'
ty_checker.raise_exc(idx, expected_types, 'idx')
|
90b335db7327da883561665909a2b335437efc83
| 3,645,680
|
def display_finds_meta(r):
"""A list of urls in r is displayed as HTML"""
rows = ["<tr><td><img src='{row}'</td><td><a href = {meta} target='_'>{meta}</a></td></tr>".format(row=row, meta=row) for row in r]
return HTML("""<html><head></head>
<body>
<table>
{rows}
</table>
</body>
</html>
""".format(rows=' '.join(rows)))
|
bd769bc4b0b6d4d55ec721e02c623f30d5eb5e1f
| 3,645,681
|
def __pairwise__(iterable):
""" Converts a list of elements in a list of pairs like:
list -> (list[0], list[1]), (list[2], list[3]), (list[4], list[5]), ...
:param iterable: Input list.
:return: List of pairs of the given list elements.
"""
a = iter(iterable)
return zip(a, a)
|
59eae23e0e6f9ccba528f9632caf77fe28698c5b
| 3,645,682
|
def _generateTriangleSequence():
"""
Generates list of elements following sequence of triangle numbers.
Returns:
sequenceElements - List of elements following the sequence.
"""
sequenceElements = []
totalCharactersInNewSequence = 0
total = 1
currentAddend = 2
while totalCharactersInNewSequence <= _MAX_NUMBER_OF_CHARACTERS_TO_PRINT:
currentSequenceMember = str(total)
sequenceElements.append(currentSequenceMember)
totalCharactersInNewSequence += len(currentSequenceMember)
total += currentAddend
currentAddend += 1
return sequenceElements
|
453b5e672a4817281f5e4ba51ca3ea426fcdb3d2
| 3,645,683
|
import math
def normalize_neurons_range(neurons, standard_diagonal_line: int or float):
"""
:param neurons: should be refined.
:param standard_diagonal_line: pre-defined standard length of diagonal line of xoy plate
:return: neurons, width_scale, [width_span, height_span, z_span]
width_scale: The length of width is different with height among all volumes, so scaling width
could transfer ellipse shell into circle shell to count conveniently on xoy plate.
"""
regions = np.array([re for res in neurons.copy() for re in res], dtype = np.float32) # [x, y, z]
width, height = np.max(regions[:, 0]) - np.min(regions[:, 0]), np.max(regions[:, 1]) - np.min(regions[:, 1])
scale = standard_diagonal_line / math.sqrt(width * width + height * height)
neurons = [[[p[0] * scale, p[1] * scale, p[2] * scale] for p in pp] for pp in neurons] # for knn feature
width_scale = height / width
width_span = width * width_scale * scale
height_span = height * scale
z_span = (np.max(regions[:, 2]) - np.min(regions[:, 2])) * scale
return neurons, width_scale, [width_span, height_span, z_span]
|
1709b68054aa10ccd9d065b04d809e2df4d3a8e2
| 3,645,684
|
def distort_image(image):
"""Perform random distortions to the given 4D image and return result"""
# Switch to 3D as that's what these operations require
slices = tf.unpack(image)
output = []
# Perform pixel-wise distortions
for image in slices:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, .2, 2.)
image += tf.truncated_normal(image.get_shape(), stddev=.05)
image = tf.image.random_contrast(image, .85, 1.15)
image = tf.image.random_brightness(image, .3)
output.append(image)
# Go back to 4D
image = tf.pack(output)
return image
|
70db49a2a3dfe31c0b511824342a95ad5da30430
| 3,645,685
|
from io import StringIO
def tablebyname(filehandle, header):
"""fast extraction of the table using the header to identify the table
This function reads only one table from the HTML file. This is in contrast to `results.readhtml.titletable` that will read all the tables into memory and allows you to interactively look thru them. The function `results.readhtml.titletable` can be very slow on large HTML files.
This function is useful when you know which file you are looking for. It looks for the title line that is in bold just before the table. Some tables don't have such a title in bold. This function will not work for tables that don't have a title in bold
Parameters
----------
fhandle : file like object
A file handle to the E+ HTML table file
header: str
This is the title of the table you are looking for
Returns
-------
titleandtable : (str, list)
- (title, table)
- title = previous item with a <b> tag
- table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]
"""
htmlheader = f"<b>{header}</b><br><br>"
with filehandle:
for line in filehandle:
line = _decodeline(line)
if line.strip() == htmlheader:
justtable = getnexttable(filehandle)
thetable = f"{htmlheader}\n{justtable}"
break
filehandle = StringIO(thetable)
htables = readhtml.titletable(filehandle)
try:
return list(htables[0])
except IndexError as e:
None
|
ba1c228843f631b0441fc69c66b0d9ae7acbf813
| 3,645,686
|
def BuildDataset():
"""Create the dataset"""
# Get the dataset keeping the first two features
iris = load_iris()
x = iris["data"][:,:2]
y = iris["target"]
# Standardize and keep only classes 0 and 1
x = (x - x.mean(axis=0)) / x.std(axis=0)
i0 = np.where(y == 0)[0]
i1 = np.where(y == 1)[0]
x = np.vstack((x[i0],x[i1]))
# Train and test data
xtrn = np.vstack((x[:35],x[50:85]))
ytrn = np.array([0]*35 + [1]*35)
xtst = np.vstack((x[35:50],x[85:]))
ytst = np.array([0]*15+[1]*15)
idx = np.argsort(np.random.random(70))
xtrn = xtrn[idx]
ytrn = ytrn[idx]
idx = np.argsort(np.random.random(30))
xtst = xtst[idx]
ytst = ytst[idx]
y_train = np.zeros((len(ytrn),2))
for i in range(len(ytrn)):
if (ytrn[i] == 1):
y_train[i,:] = [0,1]
else:
y_train[i,:] = [1,0]
y_test = np.zeros((len(ytst),2))
for i in range(len(ytst)):
if (ytst[i] == 1):
y_test[i,:] = [0,1]
else:
y_test[i,:] = [1,0]
return (xtrn.reshape((xtrn.shape[0],1,2)), y_train,
xtst.reshape((xtst.shape[0],1,2)), y_test)
|
f6b3cc216262899880f048dd6d4823596d111c1a
| 3,645,687
|
def read_tile(file, config):
"""Read a codex-specific 5D image tile"""
# When saving tiles in ImageJ compatible format, any unit length
# dimensions are lost so when reading them back out, it is simplest
# to conform to 5D convention by reshaping if necessary
slices = [None if dim == 1 else slice(None) for dim in config.tile_dims]
return imread(file)[slices]
|
3e0e61d8fa5ac497377c02c90a03bcbd176752ab
| 3,645,688
|
def rmsd(V, W):
""" Calculate Root-mean-square deviation from two sets of vectors V and W. """
D = len(V[0])
N = len(V)
rmsd = 0.0
for v, w in zip(V, W):
rmsd += sum([(v[i] - w[i]) ** 2.0 for i in range(D)])
return np.sqrt(rmsd / N)
|
dc537f1cc742f7c4c5231af4c45d470e582be623
| 3,645,689
|
from re import L
def partition(f, xs):
"""
partition :: (a -> Bool) -> [a] -> ([a], [a])
The partition function takes a predicate a list and returns the pair of
lists of elements which do and do not satisfy the predicate.
"""
yes, no = [], []
for item in xs:
if f(item):
yes.append(item)
else:
no.append(item)
return L[yes], L[no]
|
7fde3557fa9d1e3bdf232885dda360a6695dabc0
| 3,645,690
|
from math import exp, pi, sin
def bvp2_check():
"""
Using scikits.bvp_solver to solve the bvp
y'' + y' + sin y = 0, y(0) = y(2*pi) = 0
y0 = y, y1 = y'
y0' = y1, y1' = y'' = -sin(y0) - y1
"""
lbc, rbc = .1, .1
def function1(x , y):
return np.array([y[1] , -sin(y[0]) -y[1] ])
def boundary_conditions(ya,yb):
return (np.array([ya[0] - lbc]), #evaluate the difference between the temperature of the hot stream on the
#left and the required boundary condition
np.array([yb[0] - rbc]))#evaluate the difference between the temperature of the cold stream on the
#right and the required boundary condition
problem = bvp_solver.ProblemDefinition(num_ODE = 2,
num_parameters = 0,
num_left_boundary_conditions = 1,
boundary_points = (0, 2.*pi),
function = function1,
boundary_conditions = boundary_conditions)
guess = np.linspace(0.,2.*pi, 10)
guess = np.array([.1-np.sin(2*guess),np.sin(2*guess)])
# plt.plot(guess,np.sin(guess))
# plt.show()
solution = bvp_solver.solve(problem,
solution_guess = guess)
#
A = np.linspace(0.,2.*pi, 200)
T = solution(A)
plt.plot(A, T[0,:],'-k',linewidth=2.0)
plt.show()
plt.clf()
N = 150
x = (2.*np.pi/N)*np.arange(1,N+1).reshape(N,1)
print x.shape
print solution(x)[0,:].shape
plt.plot(x,solution(x)[0,:])
plt.show()
# np.save('sol',solution(x)[0,:])
return
|
6fe48b76945d3c322c21938049ab74099d022c7d
| 3,645,691
|
import zarr
def get_zarr_store(file_path):
"""Get the storage type
"""
ZARR_STORE_MAP = {"lmdb": zarr.LMDBStore,
"zip": zarr.ZipStore,
"dbm": zarr.DBMStore,
"default": zarr.DirectoryStore}
suffix, subsuffix = get_subsuffix(file_path)
if suffix != 'zarr' or (subsuffix is not None and subsuffix not in ZARR_STORE_MAP):
return ZARR_STORE_MAP['default'](file_path)
else:
return ZARR_STORE_MAP[subsuffix](file_path)
|
ecc17168d56bd9cc725a2db51914cddd098aa1af
| 3,645,692
|
def convert_string(inpt):
"""Return string value from input lit_input
>>> convert_string(1)
'1'
"""
if PY2:
return str(inpt).decode()
else:
return str(inpt)
|
a88ee436726a6587e673fb71673c771851b83cea
| 3,645,693
|
def get_ipc_kernel(imdark, tint, boxsize=5, nchans=4, bg_remove=True,
hotcut=[5000,50000], calc_ppc=False,
same_scan_direction=False, reverse_scan_direction=False):
""" Derive IPC/PPC Convolution Kernels
Find the IPC and PPC kernels used to convolve detector pixel data.
Finds all hot pixels within hotcut parameters and measures the
average relative flux within adjacent pixels.
Parameters
==========
Keyword Parameters
==================
boxsize : int
Size of the box. Should be odd, but if even, then adds +1.
bg_remove : bool
Remove the average dark current values for each hot pixel cut-out.
Only works if boxsize>3.
hotcut : array-like
Min and max values of hot pixels (above bg and bias) to cosider.
calc_ppc : bool
Calculate and return post-pixel coupling?
same_scan_direction : bool
Are all the output channels read in the same direction?
By default fast-scan readout direction is ``[-->,<--,-->,<--]``
If ``same_scan_direction``, then all ``-->``
reverse_scan_direction : bool
If ``reverse_scan_direction``, then ``[<--,-->,<--,-->]`` or all ``<--``
"""
ny, nx = imdark.shape
chsize = int(ny / nchans)
imtemp = imdark * tint
boxhalf = int(boxsize/2)
boxsize = int(2*boxhalf + 1)
distmin = np.ceil(np.sqrt(2.0) * boxhalf)
# Get rid of pixels around border
pixmask = ((imtemp>hotcut[0]) & (imtemp<hotcut[1]))
pixmask[0:4+boxhalf, :] = False
pixmask[-4-boxhalf:, :] = False
pixmask[:, 0:4+boxhalf] = False
pixmask[:, -4-boxhalf:] = False
# Ignore borders between amplifiers
for ch in range(1, nchans):
x1 = ch*chsize - boxhalf
x2 = x1 + 2*boxhalf
pixmask[:, x1:x2] = False
indy, indx = np.where(pixmask)
nhot = len(indy)
if nhot < 2:
print("No hot pixels found!")
return None
# Only want isolated pixels
# Get distances for every pixel
# If too close, then set equal to 0
for i in range(nhot):
d = np.sqrt((indx-indx[i])**2 + (indy-indy[i])**2)
ind_close = np.where((d>0) & (d<distmin))[0]
if len(ind_close)>0: pixmask[indy[i], indx[i]] = 0
indy, indx = np.where(pixmask)
nhot = len(indy)
if nhot < 2:
print("No hot pixels found!")
return None
# Stack all hot pixels in a cube
hot_all = []
for iy, ix in zip(indy, indx):
x1, y1 = np.array([ix,iy]) - boxhalf
x2, y2 = np.array([x1,y1]) + boxsize
sub = imtemp[y1:y2, x1:x2]
# Flip channels along x-axis for PPC
if calc_ppc:
# Check if an even or odd channel (index 0)
for ch in np.arange(0,nchans,2):
even = True if (ix > ch*chsize) and (ix < (ch+1)*chsize-1) else False
if same_scan_direction:
flip = True if reverse_scan_direction else False
elif even:
flip = True if reverse_scan_direction else False
else:
flip = False if reverse_scan_direction else True
if flip: sub = sub[:,::-1]
hot_all.append(sub)
hot_all = np.array(hot_all)
# Remove average dark current values
if boxsize>3 and bg_remove==True:
for im in hot_all:
im -= np.median([im[0,:], im[:,0], im[-1,:], im[:,-1]])
# Normalize by sum in 3x3 region
norm_all = hot_all.copy()
for im in norm_all:
im /= im[boxhalf-1:boxhalf+2, boxhalf-1:boxhalf+2].sum()
# Take average of normalized stack
ipc_im_avg = np.median(norm_all, axis=0)
# ipc_im_sig = robust.medabsdev(norm_all, axis=0)
corner_val = (ipc_im_avg[boxhalf-1,boxhalf-1] +
ipc_im_avg[boxhalf+1,boxhalf+1] +
ipc_im_avg[boxhalf+1,boxhalf-1] +
ipc_im_avg[boxhalf-1,boxhalf+1]) / 4
if corner_val<0: corner_val = 0
# Determine post-pixel coupling value?
if calc_ppc:
ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] + \
ipc_im_avg[boxhalf,boxhalf-1] + \
ipc_im_avg[boxhalf+1,boxhalf]) / 3
if ipc_val<0: ipc_val = 0
ppc_val = ipc_im_avg[boxhalf,boxhalf+1] - ipc_val
if ppc_val<0: ppc_val = 0
k_ipc = np.array([[corner_val, ipc_val, corner_val],
[ipc_val, 1-4*ipc_val, ipc_val],
[corner_val, ipc_val, corner_val]])
k_ppc = np.zeros([3,3])
k_ppc[1,1] = 1 - ppc_val
k_ppc[1,2] = ppc_val
return (k_ipc, k_ppc)
# Just determine IPC
else:
ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] +
ipc_im_avg[boxhalf,boxhalf-1] +
ipc_im_avg[boxhalf,boxhalf+1] +
ipc_im_avg[boxhalf+1,boxhalf]) / 4
if ipc_val<0: ipc_val = 0
kernel = np.array([[corner_val, ipc_val, corner_val],
[ipc_val, 1-4*ipc_val, ipc_val],
[corner_val, ipc_val, corner_val]])
return kernel
|
4646ccc138d7f625941e9bc43382e1c5ef57e5c5
| 3,645,694
|
from datetime import datetime
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This plot presents the trailing X number of days
temperature or precipitation departure from long term average. You can
express this departure either in Absolute Departure or as a Standard
Deviation. The Standard Deviation option along with precipitation is
typically called the "Standardized Precipitation Index".
<p>The plot also contains an underlay with the weekly US Drought Monitor
that is valid for the station location. If you plot a climate district
station, you get the US Drought Monitor valid for the district centroid.
If you plot a statewide average, you get no USDM included.
"""
today = datetime.date.today()
sts = today - datetime.timedelta(days=720)
desc['arguments'] = [
dict(type='station', name='station', default='IA0200',
label='Select Station:', network='IACLIMATE'),
dict(type='int', name='p1', default=31,
label='First Period of Days'),
dict(type='int', name='p2', default=91,
label='Second Period of Days'),
dict(type='int', name='p3', default=365,
label='Third Period of Days'),
dict(type='date', name='sdate', default=sts.strftime("%Y/%m/%d"),
min='1893/01/01',
label='Start Date of Plot'),
dict(type='date', name='edate', default=today.strftime("%Y/%m/%d"),
min='1893/01/01',
label='End Date of Plot'),
dict(type='select', name='pvar', default='precip', options=PDICT,
label='Which variable to plot?'),
dict(type='select', name='how', default='diff', options=PDICT2,
label='How to Express Departure?'),
]
return desc
|
04d19dde79c25bfc3cc606cd1a2b09ecd8a6408b
| 3,645,695
|
def SpringH(z,m,k):
""" with shapes (bs,2nd)"""
D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim
q = z[:,:D//2].reshape(*m.shape,-1)
p = z[:,D//2:].reshape(*m.shape,-1)
return EuclideanK(p,m) + SpringV(q,k)
|
0895d44933a0390a65da42d596fcf1e822f0f93c
| 3,645,696
|
def write_sushi_input_files(lhafile):
""" Add SusHi-related blocks to LHA file """
outfiles = {}
for higgsname, higgstype in {'H': 12, 'A': 21}.iteritems():
lha = LHA(lhafile)
sushi = Block('SUSHI', comment='SusHi specific')
sushi.add(Entry([1, 2], comment='Select 2HDM'))
sushi.add(Entry([2, higgstype], comment='h / H / A'))
sushi.add(Entry([3, 0], comment='p-p collisions'))
sushi.add(Entry([4, 13000], comment='E_cm'))
sushi.add(Entry([5, 2], comment='ggH at NNLO'))
sushi.add(Entry([6, 2], comment='bbH at NNLO'))
sushi.add(Entry([7, 2], comment='SM EW content'))
sushi.add(Entry([19, 1], comment='Verbosity'))
sushi.add(Entry([20, 0], comment='All processes'))
lha.add_block(sushi)
thdm = Block('2HDM', '2HDM parameters')
#thdm.add(Entry([1], comment='Type I'))
#thdm.add(Entry([2], comment='Type II'))
thdm.add(Entry([4], comment='Type IV'))
lha.add_block(thdm)
distrib = Block('DISTRIB', comment='Kinematic requirements')
distrib.add(Entry([1, 0], comment='Sigma total'))
distrib.add(Entry([2, 0], comment='Disable pT cut'))
#distrib.add(Entry([21, GENER_SETTINGS['higgs_pt_min']], comment='Min higgs pT'))
distrib.add(Entry([3, 0], comment='Disable eta cut'))
#distrib.add(Entry([32, GENER_SETTINGS['higgs_eta_max']], comment='Max eta'))
distrib.add(Entry([4, 1], comment='Use eta, not y'))
lha.add_block(distrib)
pdfspec = Block('PDFSPEC')
pdfspec.add(Entry([1, 'MMHT2014lo68cl.LHgrid'], comment='Name of pdf (lo)'))
pdfspec.add(Entry([2, 'MMHT2014nlo68cl.LHgrid'], comment='Name of pdf (nlo)'))
pdfspec.add(Entry([3, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (nnlo)'))
pdfspec.add(Entry([4, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (n3lo)'))
pdfspec.add(Entry([10, 0], comment='Set number'))
lha.add_block(pdfspec)
lha.get_block('SMINPUTS').add(Entry([8, 1.275], comment='m_c'))
# Write output
suffix = '_%s_sushi.in' % higgsname
outname = lhafile.replace('.lha', suffix)
lha.write(outname)
outfiles[higgsname] = outname
return outfiles
|
f2f88c79d19de05109748c5c839550bfab905581
| 3,645,697
|
import functools
def pytest_collection(session): # pylint: disable=unused-argument
"""Monkey patch lru_cache, before any module imports occur."""
# Gotta hold on to this before we patch it away
old_lru_cache = functools.lru_cache
@wraps(functools.lru_cache)
def lru_cache_wrapper(*args, **kwargs):
"""Wrap lru_cache decorator, to track which functions are decorated."""
# Apply lru_cache params (maxsize, typed)
decorated_function = old_lru_cache(*args, **kwargs)
# Mimicking lru_cache: https://github.com/python/cpython/blob/v3.7.2/Lib/functools.py#L476-L478
@wraps(decorated_function)
def decorating_function(user_function):
"""Wraps the user function, which is what everyone is actually using. Including us."""
wrapper = decorated_function(user_function)
CACHED_FUNCTIONS.append(wrapper)
return wrapper
return decorating_function
# Monkey patch the wrapped lru_cache decorator
functools.lru_cache = lru_cache_wrapper
yield
# Be a good citizen and undo our monkeying
functools.lru_cache = old_lru_cache
|
56f218c06c1d8cc64d884e94f503ae51be135c7f
| 3,645,698
|
def __graph_laplacian(mtx):
""" Compute the Laplacian of the matrix.
.. math::
"""
L = np.diag(np.sum(mtx, 0)) - mtx
return L
|
54f7fd0a359863bcf31ca9800c30e9eadf32ba8f
| 3,645,699
|
def moon_illumination(phase: float) -> float:
"""Calculate the percentage of the moon that is illuminated.
Currently this value increases approximately linearly in time from new moon
to full, and then linearly back down until the next new moon.
Args:
phase: float
The phase angle of the Moon, in degrees.
Returns:
illumination: flaot
The percentage of the Moon that is illuminated.
"""
return 100 * (1 - np.abs(phase - 180) / 180)
|
c40a3a6cb4de6da1fd64a188c99892afe3d385d7
| 3,645,700
|
def convex_hull_mask_iou(points_uv, im_shape, gt_hull_mask):
"""Computes masks by calculating a convex hull from points. Creates two masks (if possible),
one for the estimated foreground pixels and one for the estimated background pixels.
Args:
points_uv: (2, N) Points in u, v coordinates
im_shape: image shape [image_height, im_width]
gt_hull_mask: mask created by calculating convex hull
Returns:
best_iou: best mask iou calculated from the calculated hull masks and the ground truth hull
mask
"""
im_height, im_width = im_shape
# Segment the points into background and foreground
if len(set(points_uv[0])) > 1:
thresh = filters.threshold_li(points_uv[0])
pred_seg_1 = points_uv[0] > thresh
pred_seg_2 = points_uv[0] < thresh
segs = [pred_seg_1, pred_seg_2]
else:
# There is only one unique point so a threshold cannot be made
segs = [np.full(points_uv[0].shape, True, dtype=bool)]
mask_list = []
# Loop over both segments since it is uncertain which segment is foreground or background
for seg in segs:
# Obtain the coordinates of the pixels
pred_u = np.int32(points_uv[0][seg])
pred_v = np.int32(points_uv[1][seg])
# Remove duplicate coordinates by forming a set
coords = set(zip(pred_u, pred_v))
# Convex hull calculation requires a numpy array
coords = np.array(list(coords))
# Need at least 3 points to create convex hull
if len(coords) < 3:
continue
# Points must not lie along a single line in order to create convex hull
elif any(np.all(coords == coords[0, :], axis=0)):
continue
else:
hull = ConvexHull(coords)
img = Image.new('L', (im_width, im_height), 0)
vertices = list(zip(coords[hull.vertices, 0], coords[hull.vertices, 1]))
ImageDraw.Draw(img).polygon(vertices, outline=1, fill=1)
mask = np.array(img)
mask_list.append(mask)
best_iou = 0
for mask in mask_list:
iou = evaluation.mask_iou(mask, gt_hull_mask)
if iou > best_iou:
best_iou = iou
return best_iou
|
09cb5cf8f7721a7430ab3825e0e6ddbbb0966be6
| 3,645,701
|
from typing import Callable
from typing import Awaitable
import inspect
def load_callback(module: ModuleType, event: Event) -> Callable[..., Awaitable[None]]:
"""
Load the callback function from the handler module
"""
callback = getattr(module, "handler")
if not inspect.iscoroutinefunction(callback):
raise TypeError(
f"expected 'coroutine function' for 'handler', got {type(callback).__name__!r}"
)
signature = inspect.signature(callback)
params = dict(signature.parameters)
# Construct the model from the callback for manual events
if isinstance(event, ManualEvent):
expect_returns(signature, None, Response, allow_unannotated=True)
event.model = build_model_from_params(params)
# Ensure the signature is passed the same parameters as the event sends
elif isinstance(event, AutomatedEvent):
expect_returns(signature, None, allow_unannotated=True)
# Get the model parameters
model_signature = inspect.signature(event.input_validator)
model_params = dict(model_signature.parameters)
validate_automated_signature(params, model_params)
return callback
|
e961adaae0c7f4ad5abe228ef677b3b61288d531
| 3,645,703
|
def molmer_sorensen(theta, N=None, targets=[0, 1]):
"""
Quantum object of a Mølmer–Sørensen gate.
Parameters
----------
theta: float
The duration of the interaction pulse.
N: int
Number of qubits in the system.
target: int
The indices of the target qubits.
Returns
-------
molmer_sorensen_gate: :class:`qutip.Qobj`
Quantum object representation of the Mølmer–Sørensen gate.
"""
if targets != [0, 1] and N is None:
N = 2
if N is not None:
return expand_operator(molmer_sorensen(theta), N, targets=targets)
return Qobj(
[
[np.cos(theta/2.), 0, 0, -1.j*np.sin(theta/2.)],
[0, np.cos(theta/2.), -1.j*np.sin(theta/2.), 0],
[0, -1.j*np.sin(theta/2.), np.cos(theta/2.), 0],
[-1.j*np.sin(theta/2.), 0, 0, np.cos(theta/2.)]
],
dims=[[2, 2], [2, 2]])
|
8b5e7bc221c4f785bd8747a5b04d4a9299ebeefc
| 3,645,705
|
import math
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): color distance between red, green, and blue pixel values
"""
color_distance = math.sqrt((pixel.red - red)**2 + (pixel.green - green)**2 + (pixel.blue - blue)**2)
return color_distance
|
9ad0a30090e735daac4c7d470ea40e7d4dc0010f
| 3,645,706
|
def structure_pmu(array: np.ndarray) -> np.ndarray:
"""Helper function to convert 4 column array into structured array
representing 4-momenta of particles.
Parameters
----------
array : numpy ndarray of floats, with shape (num particles, 4)
The 4-momenta of the particles, arranged in columns.
Columns must be in order (x, y, z, e).
See also
--------
structure_pmu_components : structured array from seperate 1d arrays
of momentum components.
Notes
-----
As the data-type of the input needs to be recast, the output is
a copy of the original data, not a view on it. Therefore it uses
additional memory, so later changes to the original will not
affect the returned array, and vice versa.
"""
if array.dtype != _types.pmu:
struc_array = array.astype(_types.pmu[0][1])
struc_array = struc_array.view(dtype=_types.pmu, type=np.ndarray)
struc_pmu = struc_array.copy().squeeze()
else:
struc_pmu = array
return struc_pmu
|
519173b131d4120f940022b567faef018be2f2ed
| 3,645,707
|
def url_query_parameter(url, parameter, default=None, keep_blank_values=0):
"""Return the value of a url parameter, given the url and parameter name
General case:
>>> import w3lib.url
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "id")
'200'
>>>
Return a default value if the parameter is not found:
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault")
'mydefault'
>>>
Returns None if `keep_blank_values` not set or 0 (default):
>>> w3lib.url.url_query_parameter("product.html?id=", "id")
>>>
Returns an empty string if `keep_blank_values` set to 1:
>>> w3lib.url.url_query_parameter("product.html?id=", "id", keep_blank_values=1)
''
>>>
"""
queryparams = parse_qs(
urlsplit(str(url))[3],
keep_blank_values=keep_blank_values
)
return queryparams.get(parameter, [default])[0]
|
d2ed39b6d6054baa9f8be90dfe1e1c8a06e47746
| 3,645,709
|
def read_ground_stations_extended(filename_ground_stations_extended):
"""
Reads ground stations from the input file.
:param filename_ground_stations_extended: Filename of ground stations basic (typically /path/to/ground_stations.txt)
:return: List of ground stations
"""
ground_stations_extended = []
gid = 0
with open(filename_ground_stations_extended, 'r') as f:
for line in f:
split = line.split(',')
if len(split) != 8:
raise ValueError("Extended ground station file has 8 columns: " + line)
if int(split[0]) != gid:
raise ValueError("Ground station id must increment each line")
ground_station_basic = {
"gid": gid,
"name": split[1],
"latitude_degrees_str": split[2],
"longitude_degrees_str": split[3],
"elevation_m_float": float(split[4]),
"cartesian_x": float(split[5]),
"cartesian_y": float(split[6]),
"cartesian_z": float(split[7]),
}
ground_stations_extended.append(ground_station_basic)
gid += 1
return ground_stations_extended
|
2492dc8d5c55f124696aafbec11d74e609c3f397
| 3,645,710
|
import uuid
def shortPrescID():
"""Create R2 (short format) Prescription ID
Build the prescription ID and add the required checkdigit.
Checkdigit is selected from the PRESCRIPTION_CHECKDIGIT_VALUES constant
"""
_PRESC_CHECKDIGIT_VALUES = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ+'
hexString = str(uuid.uuid1()).replace('-', '').upper()
prescriptionID = hexString[:6] + '-Z' + hexString[6:11] + '-' + hexString[12:17]
prscID = prescriptionID.replace('-', '')
prscIDLength = len(prscID)
runningTotal = 0
for stringPosition in range(prscIDLength):
runningTotal = runningTotal + int(prscID[stringPosition], 36) * (2 ** (prscIDLength - stringPosition))
checkValue = (38 - runningTotal % 37) % 37
checkValue = _PRESC_CHECKDIGIT_VALUES[checkValue]
prescriptionID += checkValue
return prescriptionID
|
db491d3fe299adfbcd6f202eb46bc4669f829613
| 3,645,712
|
def rmse(predictions, verbose=True):
"""Compute RMSE (Root Mean Squared Error).
.. math::
\\text{RMSE} = \\sqrt{\\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \in
\\hat{R}}(r_{ui} - \\hat{r}_{ui})^2}.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Root Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError('Prediction list is empty.')
mse = np.mean([float((true_r - est)**2)
for (_, _, true_r, est, _) in predictions])
rmse_ = np.sqrt(mse)
if verbose:
print('RMSE: {0:1.4f}'.format(rmse_))
return rmse_
|
2898f98fba50d71ef20e66b9654e1d539531f17b
| 3,645,713
|
import ast
def get_module_docstring(path):
"""get a .py file docstring, without actually executing the file"""
with open(path) as f:
return ast.get_docstring(ast.parse(f.read()))
|
e253372bfb6f65907a5461332d14c414c2370c66
| 3,645,714
|
def get_authenticate_kwargs(oauth_credentials=None, http_=None):
"""Returns a dictionary with keyword arguments for use with discovery
Prioritizes oauth_credentials or a http client provided by the user
If none provided, falls back to default credentials provided by google's command line
utilities. If that also fails, tries using httplib2.Http()
Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client
"""
if oauth_credentials:
authenticate_kwargs = {
"credentials": oauth_credentials
}
elif http_:
authenticate_kwargs = {
"http": http_
}
else:
# neither http_ or credentials provided
try:
# try default credentials
oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default()
authenticate_kwargs = {
"credentials": oauth_credentials
}
except oauth2client.client.GoogleCredentials.ApplicationDefaultCredentialsError:
# try http using httplib2
authenticate_kwargs = {
"http": httplib2.Http()
}
return authenticate_kwargs
|
da3cef34a51fe1bc74cb8ce221e9610160f0f176
| 3,645,715
|
from re import T
def get_transforms(size=128, mobilenet=False):
"""
Gets all the torchvision transforms we will be applying to the dataset.
"""
# These are the transformations that we will do to our dataset
# For X transforms, let's do some of the usual suspects and convert to tensor.
# Don't forget to normalize to [0.0, 1.0], FP32
# and don't forget to resize to the same size every time.
x_transforms = [
T.Resize((size, size)),
T.RandomApply([
T.RandomAffine(degrees=20, translate=(0.1, 0.1)),
T.RandomHorizontalFlip(p=0.5),
T.RandomRotation(degrees=(-30, 30)),
T.RandomVerticalFlip(p=0.5),
], p=0.5),
T.ColorJitter(brightness=0.5),
T.ToTensor(), # Converts to FP32 [0.0, 1.0], Tensor type
]
# Pretrained MobileNetV2 requires normalizing like this:
if mobilenet:
x_transforms.append(T.Normalize(mean=MOBILENET_MEAN, std=MOBILENET_STD))
# For Y transforms, we need to make sure that we do the same thing to the ground truth,
# since we are trying to recreate the image.
y_transforms = [
T.Resize((size, size), interpolation=Image.NEAREST), # Make sure we don't corrupt the labels
T.RandomApply([
T.RandomAffine(degrees=20, translate=(0.1, 0.1)),
T.RandomHorizontalFlip(p=0.5),
T.RandomRotation(degrees=(-30, 30)),
T.RandomVerticalFlip(p=0.5),
], p=0.5),
]
return x_transforms, y_transforms
|
ddacbf265ba12ac259ec35ef57798688a3e36f02
| 3,645,716
|
def transform(f, a, b, c, d):
"""
Transform a given function linearly.
If f(t) is the original function, and a, b, c, and d are the parameters in
order, then the return value is the function
F(t) = af(cx + d) + b
"""
return lambda x: a * f(c * x + d) + b
|
a47b3f4f3dc1e3ed5ddb6155bcd67b8297c298ed
| 3,645,717
|
def delete_rules(request):
"""
Deletes the rules with the given primary key.
"""
if request.method == 'POST':
rules_id = strip_tags(request.POST['post_id'])
post = HouseRules.objects.get(pk=rules_id)
post.filepath.delete() # Delete actual file
post.delete()
return redirect('archive-rules')
|
e6be9d39dfe07d17fdb18634b422262917fbe6eb
| 3,645,718
|
def display_word(word, secret_word, word_to_guess):
"""Function to edit the word to display and the word to guess (word to display
is the test word with its colored letter and the word to guess is the word
with spaces in it, for each missing letter).
Args:
word (str): the input word
secret_word (str): the secret word that the user have to find
word_to_guess (str): the word with spaces for each missing letter
Returns:
str: the word to guess, to update it at each try
"""
word_to_display = ""
indexes = []
# We need to do the dictio at each input because we need to edit it for
# each test word. It will be needed to not display several yellow letter
# when there should be only one.
dictio = letters_dict(secret_word)
# For each letter in the word
for letter_index in range(len(word)):
word_letter = word[letter_index]
# If the letter is the same at the same place in the secret_word
if word_letter==secret_word[letter_index]:
# Colors the letter in green
word_to_display += colored(word_letter, "green")
# Adds the index to a list
indexes.append(letter_index)
dictio[word_letter] -= 1
# If the letter is not the same at the same place in the secret word
# but is in the word anyway
elif word_letter in secret_word:
if dictio[word_letter]>0:
# Colors the letter in yellow and substract 1 to the dictionary
# of letters, if it's not 0
word_to_display += colored(word_letter, "yellow")
dictio[word_letter] -= 1
else:
# If there's 0 for the letter in the dictionary, it's because we
# already encountered them all, so we don't color it
word_to_display += word_letter
else:
word_to_display += word_letter
# Transforms the word to guess as a list, within each letter is one element
word_to_guess_list = list(word_to_guess)
for index in range(len(secret_word)):
if index in indexes:
# If the user have found a letter, replaces the space (_) by it
word_to_guess_list[index] = secret_word[index]
# Reforms the word
word_to_guess = "".join(word_to_guess_list)
return word_to_display, word_to_guess
|
e55ef943d5e3d837ca1698ba1e2e65d9062b16f0
| 3,645,719
|
def get_config_cache(course_pk: 'int') -> dict:
"""Cacheからコンフィグを取得する.存在しない場合,新たにキャッシュを生成して格納後,コンフィグを返す."""
cache_key = f"course-config-{course_pk}"
cached_config = cache.get(cache_key, None)
if cached_config is None:
config = Config.objects.filter(course_id=course_pk).first()
cached_config = set_config_from_instance(config)
return cached_config
|
a155ce5354d8ec00eab0da42c919ac15eac43bb4
| 3,645,720
|
import logging
def log_command(func):
"""
Logging decorator for logging bot commands and info
"""
def log_command(*args, **kwargs):
slack, command, event = args
user = slack.user_info(event["user"])
log_line = 'USER: %s | CHANNEL ID: %s | COMMAND: %s | TEXT: %s'
command_info = log_line % (user["user"]["name"],
event["channel"],
command,
event["text"])
logging.info(command_info)
command = func(*args, **kwargs)
return command
return log_command
|
8ab4f36ff6c01a3799061f532d0c25ec04d725e8
| 3,645,721
|
import copy
def calc_stats(scores_summ, curr_lines, curr_idx, CI=0.95, ext_test=None,
stats="mean", shuffle=False):
"""
calc_stats(scores_summ, curr_lines, curr_idx)
Calculates statistics on scores from runs with specific analysis criteria
and records them in the summary scores dataframe.
Required args:
- scores_summ (pd DataFrame): DataFrame containing scores summary
- curr_lines (pd DataFrame) : DataFrame lines corresponding to specific
analysis criteria
- curr_idx (int) : Current row in the scores summary
DataFrame
Optional args:
- CI (num) : Confidence interval around which to collect
percentile values
default: 0.95
- extra_test (str): Name of extra test set, if any (None if none)
default: None
- stats (str) : stats to take, i.e., "mean" or "median"
default: "mean"
- shuffle (bool) : If True, data is for shuffled, and will be averaged
across runs before taking stats
default: False
Returns:
- scores_summ (pd DataFrame): Updated DataFrame containing scores, as
well as epoch_n, runs_total, runs_nan
summaries
"""
scores_summ = copy.deepcopy(scores_summ)
# score labels to perform statistics on
sc_labs = ["epoch_n"] + logreg_util.get_sc_labs(
True, ext_test_name=ext_test)
# avoids accidental nuisance dropping by pandas
curr_lines["epoch_n"] = curr_lines["epoch_n"].astype(float)
if shuffle: # group runs and take mean or median across
scores_summ.loc[curr_idx, "mouse_n"] = -1
keep_lines = \
[col for col in curr_lines.columns if col in sc_labs] + ["run_n"]
grped_lines = curr_lines[keep_lines].groupby("run_n", as_index=False)
if stats == "mean":
curr_lines = grped_lines.mean() # automatically skips NaNs
elif stats == "median":
curr_lines = grped_lines.median() # automatically skips NaNs
else:
gen_util.accepted_values_error("stats", stats, ["mean", "median"])
# calculate n_runs (without nans and with)
scores_summ.loc[curr_idx, "runs_total"] = len(curr_lines)
scores_summ.loc[curr_idx, "runs_nan"] = curr_lines["epoch_n"].isna().sum()
# percentiles to record
ps, p_names = math_util.get_percentiles(CI)
for sc_lab in sc_labs:
if sc_lab in curr_lines.keys():
cols = []
vals = []
data = curr_lines[sc_lab].astype(float)
for stat in ["mean", "median"]:
cols.extend([stat])
vals.extend(
[math_util.mean_med(data, stats=stat, nanpol="omit")])
for error in ["std", "sem"]:
cols.extend([error])
vals.extend([math_util.error_stat(
data, stats="mean", error=error, nanpol="omit")])
# get 25th and 75th quartiles
cols.extend(["q25", "q75"])
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit"))
# get other percentiles (for CI)
cols.extend(p_names)
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit", qu=ps))
# get MAD
cols.extend(["mad"])
vals.extend([math_util.error_stat(
data, stats="median", error="sem", nanpol="omit")])
# plug in values
cols = [f"{sc_lab}_{name}" for name in cols]
gen_util.set_df_vals(scores_summ, curr_idx, cols, vals)
return scores_summ
|
ddaa5b5a2c70c25488f572ad894f7aa0bedc7189
| 3,645,723
|
def report_date_time() -> str:
"""Return the report date requested as query parameter."""
report_date_string = dict(bottle.request.query).get("report_date")
return str(report_date_string).replace("Z", "+00:00") if report_date_string else iso_timestamp()
|
391db86e523c55f88c40c1bc8b9fb1ed6f3d97ff
| 3,645,724
|
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
|
4f56cc4d4ac1ae722deffb92d63d5867a885fb0e
| 3,645,725
|
def get_policy(arn):
"""Get info about a policy."""
client = get_client("iam")
response = client.get_policy(PolicyArn=arn)
return response
|
c56d79bfd8cf744bbe010ad0d5dfbaeaa3d59e76
| 3,645,726
|
def _write_roadways(roadway_feature_class, condition):
"""Writes roadway feature class to STAMINA syntax
Arguments:
roads_feature_class {String} -- Path to feature class
condition {String} -- Existing, NoBuild, or Build. Determines fields to use from geospatial template
Returns:
[string] -- [roadways]
"""
roadway_count = len([row for row in shapefile.Reader(roadway_feature_class)])
with shapefile.Reader(roadway_feature_class) as roadways:
roadway_string = "2,{}\n".format(roadway_count)
flds = validate_roadway_field(condition)
for row in roadways.shapeRecords():
road = row.record["road_name"]
speed = row.record["speed"]
auto = round(row.record[flds[0]], 0)
medium = round(row.record[flds[1]], 0)
heavy = round(row.record[flds[2]], 0)
roadway_string += "{}\n".format(road)
roadway_string += "CARS {} {}\n".format(auto, speed)
roadway_string += "MT {} {}\n".format(medium, speed)
roadway_string += "HT {} {}\n".format(heavy, speed)
roadway_string += _write_roadway_points(row.shape)
roadway_string += roadway_separator()
return roadway_string
|
0c82db17f3632a2c7023a6437a3f7e8221e667ba
| 3,645,728
|
from .tf import TF_BACKEND
from .torch import TORCH_BACKEND
from .jax import JAX_BACKEND
from .math.backend import BACKENDS
def detect_backends() -> tuple:
"""
Registers all available backends and returns them.
This includes only backends for which the minimal requirements are fulfilled.
Returns:
`tuple` of `phi.math.backend.Backend`
"""
try:
except ImportError:
pass
try:
except ImportError:
pass
try:
except ImportError:
pass
return tuple(BACKENDS)
|
4d7fb7c80e8a931a614549539b9e157223602d31
| 3,645,729
|
import random
def mix_audio(word_path=None,
bg_path=None,
word_vol=1.0,
bg_vol=1.0,
sample_time=1.0,
sample_rate=16000):
"""
Read in a wav file and background noise file. Resample and adjust volume as
necessary.
"""
# If no word file is given, just return random background noise
if word_path == None:
waveform = [0] * int(sample_time * sample_rate)
fs = sample_rate
else:
# Open wav file, resample, mix to mono
waveform, fs = librosa.load(word_path, sr=sample_rate, mono=True)
# Pad 0s on the end if not long enough
if len(waveform) < sample_time * sample_rate:
waveform = np.append(waveform, np.zeros(int((sample_time *
sample_rate) - len(waveform))))
# Truncate if too long
waveform = waveform[:int(sample_time * sample_rate)]
# If no background noise is given, just return the waveform
if bg_path == None:
return waveform
# Open background noise file
bg_waveform, fs = librosa.load(bg_path, sr=fs)
# Pick a random starting point in background file
max_end = len(bg_waveform) - int(sample_time * sample_rate)
start_point = random.randint(0, max_end)
end_point = start_point + int(sample_time * sample_rate)
# Mix the two sound samples (and multiply by volume)
waveform = [0.5 * word_vol * i for i in waveform] + \
(0.5 * bg_vol * bg_waveform[start_point:end_point])
return waveform
|
fba93e1f0d13bab4b9a30fe2d849fa3b1cf99927
| 3,645,730
|
def analytical_pulse_width(ekev):
"""
Estimate analytical_pulse_width (FWHM) from radiation energy (assumes symmetrical beam)
:param ekev: radiation energy [keV]
:return sig: Radiation pulse width (FWHM) [m]
"""
sig = np.log((7.4e03/ekev))*6
return sig/1e6
|
c56f861d1a83147ff425de7760416e870e1a69d4
| 3,645,731
|
def progress_timeout(progress_bar):
"""
Update the progress of the timer on a timeout tick.
Parameters
----------
progress_bar : ProgressBar
The UI progress bar object
Returns
-------
bool
True if continuing timer, False if done.
"""
global time_remaining, time_total
time_remaining -= 1
new_val = 1 - (time_remaining / time_total)
if new_val >= 1:
progress_bar.pb.set_text("Coffee extraction done.")
play_endsound()
return False
progress_bar.pb.set_fraction(new_val)
progress_bar.pb.set_text("{0:.1f} % Brewed ({1:01d}:{2:02d} Remaining)"
.format(new_val * 100, time_remaining / 60, time_remaining % 60))
return True
|
1b7e4976a5d96b2ede671c413ff0a7702603c6d8
| 3,645,732
|
def socket_file(module_name):
"""
Get the absolute path to the socket file for the named module.
"""
module_name = realname(module_name)
return join(sockets_directory(), module_name + '.sock')
|
df92c1a23374296d96c6419f32cdffd55b6564cf
| 3,645,733
|
def postBuild(id: str):
"""Register a new build.
Args:
id: Identifier of Repository for which build is to be registered.
Returns:
build_id: Identifier of Build created.
"""
return register_builds(
id, request.headers["X-Project-Access-Token"], request.json
)
|
13b8aed703e9e5cf2191baaf98583374021fb494
| 3,645,734
|
def boundary(shape, n_size, n):
""" Shape boundaries & their neighborhoods
@param shape 2D_bool_numpy_array: True if pixel in shape
@return {index: neighborhood}
index: 2D_int_tuple = index of neighborhood center in shape
neighborhood: 2D_bool_numpy_array of size n_size
Boundaries are shape pixels inside the shape having 1 or more 4-neighbors
outside the shape.
"""
return {i: shape[n(i)]
for i in np.ndindex(shape.shape)
if is_boundary_pixel(shape,i,n_size)}
|
619050d3dfff50ccea204538a4cabcd7ef2190ab
| 3,645,736
|
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
|
858fd2b43f0ac9eaca3db94108f9bec0dbf305c7
| 3,645,737
|
import torch
def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
"""Computes the accuracy for binary classification"""
with torch.no_grad():
batch_size = target.size(0)
pred = (output >= 0.5).float().t().view(-1)
correct = pred.eq(target.view(-1)).float().sum()
correct.mul_(100.0 / batch_size)
return correct
|
306d7d0e85a617b8b4508f2dfbbbac1f5fb67bc5
| 3,645,738
|
def prepare_config(config):
"""
Prepares a dictionary to be stored as a json.
Converts all numpy arrays to regular arrays
Args:
config: The config with numpy arrays
Returns:
The numpy free config
"""
c = {}
for key, value in config.items():
if isinstance(value, np.ndarray):
value = value.tolist()
c[key] = value
return c
|
4ad31fc20fab7e3f7a7de9f50b0431d8000df029
| 3,645,739
|
import json
def load_config(path='config.json'):
"""
Loads configruation from config.json file.
Returns station mac address, interval, and units for data request
"""
# Open config JSON
with open(path) as f:
# Load JSON file to dictionary
config = json.load(f)
# Return mac address, interval, and units
return (config['station_max_address'], int(config['interval']), config['units'])
|
5522f023ed3293149613dcc2dc007e34d50f3fa8
| 3,645,740
|
import torch
def log_px_z(pred_logits, outcome):
"""
Returns Bernoulli log probability.
:param pred_logits: logits for outcome 1
:param outcome: datapoint
:return: log Bernoulli probability of outcome given logits in pred_logits
"""
pred = pred_logits.view(pred_logits.size(0), -1)
y = outcome.view(outcome.size(0), -1)
return -torch.sum(torch.max(pred, torch.tensor(0., device=pred.device)) - pred * y +
torch.log(1 + torch.exp(-torch.abs(pred))), 1)
|
6369d893cc9bfe5c3f642f819511798d01ae3ae9
| 3,645,741
|
def _sort_rows(matrix, num_rows):
"""Sort matrix rows by the last column.
Args:
matrix: a matrix of values (row,col).
num_rows: (int) number of sorted rows to return from the matrix.
Returns:
Tensor (num_rows, col) of the sorted matrix top K rows.
"""
tmatrix = tf.transpose(a=matrix, perm=[1, 0])
sorted_tmatrix = tf.nn.top_k(tmatrix, num_rows)[0]
return tf.transpose(a=sorted_tmatrix, perm=[1, 0])
|
e9e8fcb6275915e8a42798411c0712eb34bbbfe4
| 3,645,742
|
import functools
def partial_at(func, indices, *args):
"""Partial function application for arguments at given indices."""
@functools.wraps(func)
def wrapper(*fargs, **fkwargs):
nargs = len(args) + len(fargs)
iargs = iter(args)
ifargs = iter(fargs)
posargs = (next((ifargs, iargs)[i in indices]) for i in range(nargs))
# posargs = list( posargs )
# print( 'posargs', posargs )
return func(*posargs, **fkwargs)
return wrapper
|
1b45e0bd8baea869d80c6b5963c6063f6b8fbdd4
| 3,645,743
|
import importlib
def try_load_module(module_name):
"""
Import a module by name, print the version info and file name.
Return None on failure.
"""
try:
mod = importlib.import_module(module_name)
print green("%s %s:" % (module_name, mod.__version__)), mod.__file__
return mod
except ImportError:
print yellow("Could not find nltk")
return None
|
527c2fb3dbbb3ef8ee5800f492a727a2d565892d
| 3,645,744
|
def project_image(request, uid):
"""
GET request : return project image
PUT request : change project image
"""
project = Project.objects.filter(uid=uid).first()
imgpath = project.image.path if project.image else get_thumbnail()
if request.method == "PUT":
file_object = request.data.get("file")
imgpath = change_image(obj=project, file_object=file_object)
data = open(imgpath, "rb") .read()
return HttpResponse(content=data, content_type="image/jpeg")
|
f05db1026f41ab15eece1068fe182e0673e798e3
| 3,645,745
|
from typing import Optional
def validate(prefix: str, identifier: str) -> Optional[bool]:
"""Validate the identifier against the prefix's pattern, if it exists.
:param prefix: The prefix in the CURIE
:param identifier: The identifier in the CURIE
:return: Whether this identifier passes validation, after normalization
>>> validate("chebi", "1234")
True
>>> validate("chebi", "CHEBI:12345")
True
>>> validate("chebi", "CHEBI:ABCD")
False
"""
resource = get_resource(prefix)
if resource is None:
return None
return resource.validate_identifier(identifier)
|
bbdc0eef34a03670963354d0cdf6e414eaa2aa8d
| 3,645,746
|
import torch
def laplacian_positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
#EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2)
EigVec = EigVec[:, EigVal.argsort()] # increasing order
out = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return out
|
69b09e69f37fc870fa36510ef05172f35bfc0093
| 3,645,747
|
async def replace_chain():
""" replaces the current chain with the most recent and longest chain """
blockchain.replace_chain()
blockchain.is_chain_valid(chain=blockchain.chain)
return{'message': 'chain has been updated and is valid',
'longest chain': blockchain.chain}
|
3ef0797ca582dbd2cb7ab47b09c847a4380215d5
| 3,645,748
|
import copy
def ucb(bufferx,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
model_type,
classification_model=None,
number_of_cpus=0):
"""
Multi-objective ucb acquisition function as detailed in https://arxiv.org/abs/1805.12168.
The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf
:param bufferx: a list of tuples containing the points to predict and scalarize.
:param objective_weights: a list containing the weights for each objective.
:param regression_models: the surrogate models used to evaluate points.
:param param_space: a space object containing the search space.
:param scalarization_method: a string indicating which scalarization method to use.
:param evaluations_per_optimization_iteration: how many configurations to return.
:param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
:param iteration_number: an integer for the current iteration number, used to compute the beta
:param classification_model: the surrogate model used to evaluate feasibility constraints
:param number_of_cpus: an integer for the number of cpus to be used in parallel.
:return: a list of scalarized values for each point in bufferx.
"""
beta = np.sqrt(0.125*np.log(2*iteration_number + 1))
augmentation_constant = 0.05
prediction_means = {}
prediction_variances = {}
number_of_predictions = len(bufferx)
tmp_objective_limits = copy.deepcopy(objective_limits)
prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space, var=True)
if classification_model != None:
classification_prediction_results = models.model_probabilities(bufferx, classification_model, param_space)
feasible_parameter = param_space.get_feasible_parameter()[0]
true_value_index = classification_model[feasible_parameter].classes_.tolist().index(True)
feasibility_indicator = classification_prediction_results[feasible_parameter][:,true_value_index]
else:
feasibility_indicator = [1]*number_of_predictions # if no classification model is used, then all points are feasible
# Compute scalarization
if (scalarization_method == "linear"):
scalarized_predictions = np.zeros(number_of_predictions)
beta_factor = 0
for objective in regression_models:
scalarized_predictions += objective_weights[objective]*prediction_means[objective]
beta_factor += objective_weights[objective]*prediction_variances[objective]
scalarized_predictions -= beta*np.sqrt(beta_factor)
scalarized_predictions = scalarized_predictions*feasibility_indicator
# The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below
# Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here.
elif(scalarization_method == "tchebyshev"):
scalarized_predictions = np.zeros(number_of_predictions)
total_values = np.zeros(number_of_predictions)
for objective in regression_models:
scalarized_values = objective_weights[objective] * np.absolute(prediction_means[objective] - beta*np.sqrt(prediction_variances[objective]))
total_values += scalarized_values
scalarized_predictions = np.maximum(scalarized_values, scalarized_predictions)
scalarized_predictions += augmentation_constant*total_values
scalarized_predictions = scalarized_predictions*feasibility_indicator
elif(scalarization_method == "modified_tchebyshev"):
scalarized_predictions = np.full((number_of_predictions), float("inf"))
reciprocated_weights = reciprocate_weights(objective_weights)
for objective in regression_models:
scalarized_value = reciprocated_weights[objective] * (prediction_means[objective] - beta*np.sqrt(prediction_variances[objective]))
scalarized_predictions = np.minimum(scalarized_value, scalarized_predictions)
scalarized_predictions = scalarized_predictions*feasibility_indicator
scalarized_predictions = -scalarized_predictions # We will minimize later, but we want to maximize instead, so we invert the sign
else:
print("Error: unrecognized scalarization method:", scalarization_method)
raise SystemExit
return scalarized_predictions, tmp_objective_limits
|
4ec8615d979fb9c3ee7539cd5e161ee920bc1c3a
| 3,645,749
|
def np_array_to_binary_vector(np_arr):
""" Converts a NumPy array to the RDKit ExplicitBitVector type. """
binary_vector = DataStructs.ExplicitBitVect(len(np_arr))
binary_vector.SetBitsFromList(np.where(np_arr)[0].tolist())
return binary_vector
|
c1865c47cd1abb71fbb3d3ce1b9a9cc75e87f70a
| 3,645,750
|
def augment_features(data, feature_augmentation):
"""
Augment features for a given data matrix.
:param data: Data matrix.
:param feature_augmentation: Function applied to augment the features.
:return: Augmented data matrix.
"""
if data is not None and feature_augmentation is not None:
if isinstance(feature_augmentation, list):
for augmentation_function in feature_augmentation:
data = augmentation_function(data)
else:
data = feature_augmentation(data)
return data
|
687a7ff2a4b61131f5d95e1f7d6eb77d75bd6f06
| 3,645,751
|
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
|
bd1c219ed2ef738cf403b984cccc4aa4cd96aa2f
| 3,645,752
|
def copy_keys_except(dic, *keys):
"""Return a copy of the dict without the specified items.
"""
ret = dic.copy()
for key in keys:
try:
del ret[key]
except KeyError:
pass
return ret
|
b1e57db9dbacbc2a7c502c36082f40598a0f4b90
| 3,645,753
|
import random
import math
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
|
80838328fc9383731e1a853c8dc572228d1a4567
| 3,645,754
|
from typing import Any
async def start_time() -> Any:
"""
Returns the contest start time.
"""
return schemas.Timestamp(timestamp=settings.EVENT_START_TIME)
|
5613f6d8928c1d1ca49677e829617769a3e6f8c3
| 3,645,755
|
def reshape(v, shape):
"""Implement `reshape`."""
return np.reshape(v, shape)
|
249e17a4b503b3434c5ec0d3e14bef1208321e92
| 3,645,756
|
def generate_html_from_module(module):
"""
Extracts a module documentations from a module object into a HTML string
uses a pre-written builtins list in order to exclude built in functions
:param module: Module object type to extract documentation from
:return: String representation of an HTML file
"""
html_content = f"<html><head><title>{module.__name__} Doc</title></head><body><h1>Module {module.__name__}:</h1>"
html_content += f"Function {module.__doc__}"
for function in module.__dict__:
if callable(getattr(module, function)):
html_content += f"<h2>Function {function}:</h2>"
html_content += f"{getattr(module, function).__doc__}"
html_content += f"<h3>Annotations:</h3>"
for annotation in getattr(module, function).__annotations__.keys():
html_content += f"{annotation} <br>"
html_content += "</body></html>"
return html_content
|
3e59931f3716dd3c50dfdda3ba17807b62f04c14
| 3,645,757
|
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the
interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with tf.name_scope("phi"):
if order == 1:
r = tf.maximum(r, EPSILON)
r = tf.sqrt(r)
return r
elif order == 2:
return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))
elif order == 4:
return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))
elif order % 2 == 0:
r = tf.maximum(r, EPSILON)
return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)
else:
r = tf.maximum(r, EPSILON)
return tf.pow(r, 0.5 * order)
|
b2270f17260e90b995c60b4bc0fb65f49be9c514
| 3,645,758
|
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
a hello call
Called after attempting (successfully or not) to call hello on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Set server type to Unknown if replica set name does not match.
if (set_name is not None and
set_name != server_description.replica_set_name):
error = ConfigurationError(
"client is configured to connect to a replica set named "
"'%s' but this node belongs to a set named '%s'" % (
set_name, server_description.replica_set_name))
sds[address] = server_description.to_unknown(error=error)
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer):
if len(topology_description._topology_settings.seeds) == 1:
topology_type = TOPOLOGY_TYPE.Single
else:
# Remove standalone from Topology when given multiple seeds.
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
|
3fe6f527c8fdb177f608d5130c0ce239aef84c20
| 3,645,759
|
import numpy
def target_mask(image, path, num_grid_corners):
"""
Arguments:
image: grayscale image of shape (N, M)
path: pathlib.Path object for the image
Returns: Boolean mask of shape (N, M), which is True for pixels that
we think are on the calibration target.
"""
ret, corners = get_cached_corners(
image_path=path, gray=image, num_grid_corners=num_grid_corners
)
if ret:
# Take the hull to get the outer 2D shape
hull = ConvexHull(corners.squeeze())
points2d = hull.points[hull.vertices]
# Scale the points outward slightly
scale = 1.3
center = numpy.average(points2d, axis=0)
for i in range(len(points2d)):
points2d[i] = center + scale * (points2d[i] - center)
# Clip to edges, note corners are (axis1, axis0)
points2d[:, 0] = numpy.clip(points2d[:, 0], 0, image.shape[1] - 1)
points2d[:, 1] = numpy.clip(points2d[:, 1], 0, image.shape[0] - 1)
# Make a boolean mask
mask = numpy.zeros(image.shape[:2], dtype=numpy.int32)
# import ipdb; ipdb.set_trace()
mask = cv2.fillPoly(
mask, [points2d.reshape((-1, 1, 2)).astype(numpy.int32)], color=1.0
)
mask = mask.astype(bool)
else:
mask = numpy.ones(image.shape[:2], dtype=bool)
return mask
|
369a37b1cc5a49761413bac0a9b48a275bb76e59
| 3,645,761
|
from pathlib import Path
def read_data(spec: dict) -> (dict, DataFrame):
"""Creates Pandas DataFrame by reading file at path.
Appropriate read_* pandas method will be called based
on the extension of the input file specified."""
path = spec['input']['file']
ext = Path(path).suffix
kwargs = build_kwargs_read(spec, ext)
return spec, read_funcs[ext](path, **kwargs)
|
22e18a0702261c23e322fe03687864d694ecba98
| 3,645,762
|
import textwrap
def bunk_choose(bot, update, user_data):
"""Removes keyboardMarkup sent in previous handler.
Stores the response (for Lectures/Practicals message sent in previous handler) in a ``user_data``
dictionary with the key `"stype"`.
``user_data`` is a user relative dictionary which holds data between different handlers/functions
in a ConversationHandler.
Selects the appropriate table (Lecture or Practical) based on ``stype`` value.
Checks if records exist in the table for a user and sends a warning message or proceeds
to list names of all subjects in the table.
Passes control to :py:func:`bunk_input`
:param bot: Telegram Bot object
:type bot: telegram.bot.Bot
:param update: Telegram Update object
:type update: telegram.update.Update
:param user_data: User data dictionary
:type user_data: dict
:return: ConversationHandler.END if no records else INPUT
:rtype: int
"""
user_data['type'] = update.message.text
chat_id = update.message.chat_id
stype = user_data['type']
reply_markup = ReplyKeyboardRemove()
reply_text = "{}\nChoose `Cancel` to exit.".format(stype)
bot.sendMessage(chat_id=chat_id, text=reply_text, reply_markup=reply_markup, parse_mode='markdown')
if stype == "Lectures":
subject_data = Lecture.query.filter(Lecture.chatID == chat_id).all()
else:
subject_data = Practical.query.filter(Practical.chatID == chat_id).all()
if not subject_data: #If list is empty
messageContent = textwrap.dedent("""
No records found!
Please use /attendance to pull your attendance from the website first.
""")
bot.sendMessage(chat_id=chat_id, text=messageContent)
return ConversationHandler.END
messageContent = ""
for digit, subject in enumerate(subject_data):
subject_name = subject.name
messageContent += "{digit}. {subject_name}\n".format(digit=digit+1, subject_name=subject_name)
keyboard = build_menu(subject_data, 3, footer_buttons='Cancel')
reply_markup = ReplyKeyboardMarkup(keyboard)
user_data['reply_markup'] = reply_markup
bot.sendMessage(chat_id=chat_id, text=messageContent, reply_markup=reply_markup)
return INPUT
|
11d1249ec1953cc38be80470a21ba95b694c1ed5
| 3,645,763
|
def module_of(obj):
"""Return the Module given object is contained within.
"""
if isinstance(obj, Module):
return obj
elif isinstance(obj, (Function, Class)):
return obj.module
elif isinstance(obj, Method):
return module_of(obj.klass)
elif isinstance(obj, TestCase):
return module_of(obj.parent)
else:
raise TypeError("Don't know how to find the module of %r" % obj)
|
02c69c72d46e8448f7cdf41e18582508b431e4e7
| 3,645,765
|
def day(date, atmos=atmos):
"""
Returns a dataframe of daily aggregated data
Parameters
-------
date: str
Format yyyy/mm/dd
"""
path = f"{get_day_folder_path(date)}{date.replace('/','')}_daily_agg.csv.gz"
return load_agg(path, atmos)
|
84f56d078b0aec9605a261ed26656d4771e0eb11
| 3,645,766
|
async def find_deck_position(hcapi: OT3API, mount: OT3Mount) -> float:
"""
Find the true position of the deck in this mount's frame of reference.
The deck nominal position in deck coordinates is 0 (that's part of the
definition of deck coordinates) but if we have not yet calibrated a
particular tool on a particular mount, then the z deck coordinate that
will cause a collision is not 0. This routine finds that value.
"""
z_offset_settings = hcapi.config.calibration.z_offset
await hcapi.home_z()
here = await hcapi.gantry_position(mount)
z_prep_point = Point(*z_offset_settings.point)
above_point = z_prep_point._replace(z=here.z)
await hcapi.move_to(mount, above_point)
deck_z = await hcapi.capacitive_probe(
mount, OT3Axis.by_mount(mount), z_prep_point.z, z_offset_settings.pass_settings
)
LOG.info(f"autocalibration: found deck at {deck_z}")
await hcapi.move_to(mount, z_prep_point + Point(0, 0, CAL_TRANSIT_HEIGHT))
return deck_z
|
f75c3d066c367853036adc1de138755a2a1ee29b
| 3,645,767
|
import random
def create_symbol_id(path_to_db: str) -> str:
"""
When creating a new symbol, need to ensure
that ID is not already used in the Physics Derivation Graph
Args:
path_to_db: filename of the SQL database containing
a JSON entry that returns a nested dictionary
Returns:
proposed_symbol_id
Raises:
>>> create_symbol_id("pdg.db")
"""
# trace_id = str(random.randint(1000000, 9999999))
# logger.info("[trace start " + trace_id + "]")
dat = clib.read_db(path_to_db)
symbol_ids_in_use = list(dat["symbols"].keys())
found_valid_id = False
loop_count = 0
while not found_valid_id:
loop_count += 1
proposed_symbol_id = str(random.randint(1000, 9999)) # 4 digits
if proposed_symbol_id not in symbol_ids_in_use:
found_valid_id = True
if loop_count > 100000:
logger.error("too many; this seems unlikely")
raise Exception("this seems unlikely")
# logger.info("[trace end " + trace_id + "]")
return proposed_symbol_id
|
1394f7f348abd43ee1cbb4fed8119fda39341028
| 3,645,770
|
import re
def get_file_name(content_disposition: str, ) -> str:
"""Content-Disposition has the filename between the `"`. get it.
Args:
content_disposition: the content disposition from download header
Returns:
the file name
"""
if match := re.search(r'"(.*?)"', content_disposition):
file_name = match.group(1)
else:
file_name = demisto.uniqueFile()
return file_name
|
f81c8ee80d341bf62b970565c062db348324905f
| 3,645,771
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.