content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_flavor(disk=None, min_disk=None, min_ram=None, name=None, ram=None, region=None, rx_tx_factor=None, swap=None, vcpus=None):
"""
Use this data source to get the ID of an available OpenStack flavor.
"""
__args__ = dict()
__args__['disk'] = disk
__args__['minDisk'] = min_disk
__args__['minRam'] = min_ram
__args__['name'] = name
__args__['ram'] = ram
__args__['region'] = region
__args__['rxTxFactor'] = rx_tx_factor
__args__['swap'] = swap
__args__['vcpus'] = vcpus
__ret__ = pulumi.runtime.invoke('openstack:compute/getFlavor:getFlavor', __args__)
return GetFlavorResult(
is_public=__ret__.get('isPublic'),
region=__ret__.get('region'),
id=__ret__.get('id'))
|
f6689712d4bde04ae43bb4c999b04df34b1db089
| 3,643,205
|
from typing import Tuple
def deal_hands(deck: Deck) -> Tuple[Deck, Deck, Deck, Deck]:
"""Deal the cards in the deck into four hands"""
return (deck[0::4], deck[1::4], deck[2::4], deck[3::4])
|
151def07061a23df6c80f2be6d9015e3efbd515e
| 3,643,206
|
import select
def add_new_publication_group(project):
"""
Create a new publication_group
POST data MUST be in JSON format
POST data SHOULD contain the following:
name: name for the group
published: publication status for the group, 0 meaning unpublished
"""
request_data = request.get_json()
if not request_data:
return jsonify({"msg": "No data provided."}), 400
groups = get_table("publication_group")
connection = db_engine.connect()
insert = groups.insert()
new_group = {
"name": request_data.get("name", None),
"published": request_data.get("published", 0)
}
try:
result = connection.execute(insert, **new_group)
new_row = select([groups]).where(groups.c.id == result.inserted_primary_key[0])
new_row = dict(connection.execute(new_row).fetchone())
result = {
"msg": "Created new group with ID {}".format(result.inserted_primary_key[0]),
"row": new_row
}
return jsonify(result), 201
except Exception as e:
result = {
"msg": "Failed to create new group",
"reason": str(e)
}
return jsonify(result), 500
finally:
connection.close()
|
0890b71f972149bd860768dfeb5a377bd4fd28b0
| 3,643,207
|
def test_gmres_against_graph_scipy(n, tensor_type, dtype, error, preconditioner, solve_method):
"""
Feature: ALL TO ALL
Description: test cases for [N x N] X [N X 1]
Expectation: the result match scipy in graph
"""
if not _is_valid_platform(tensor_type):
return
# Input CSRTensor of gmres in mindspore graph mode is not supported, just ignored it.
if tensor_type == "CSRTensor":
return
class TestNet(nn.Cell):
def __init__(self, solve_method):
super(TestNet, self).__init__()
self.solve_method = solve_method
def construct(self, a, b, x0, tol, restart, maxiter, m, atol):
return msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m,
atol=atol, solve_method=self.solve_method)
onp.random.seed(0)
a = create_full_rank_matrix((n, n), dtype)
b = onp.random.rand(n).astype(dtype)
x0 = onp.zeros_like(b).astype(dtype)
m = _fetch_preconditioner(preconditioner, a)
tol = float(onp.finfo(dtype=dtype).eps)
atol = tol
restart = n
maxiter = None
scipy_output, _ = osp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m, atol=atol)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
a = to_tensor((a, tensor_type))
b = Tensor(b)
x0 = Tensor(x0)
m = to_tensor((m, tensor_type)) if m is not None else m
# Not in graph's construct
ms_output, _ = msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter,
M=m, atol=atol)
assert onp.allclose(scipy_output, ms_output.asnumpy(), rtol=error, atol=error)
# With in graph's construct
ms_net_output, _ = TestNet(solve_method)(a, b, x0, tol, restart, maxiter, m, atol)
assert onp.allclose(scipy_output, ms_net_output.asnumpy(), rtol=error, atol=error)
|
47fde403d403138dd1746cab532f7c7cf9b2f5a3
| 3,643,208
|
def wtime() -> float:
"""
:return: the current time as a floating point number.
"""
return MPI.Wtime()
|
862b18fc688fd5e34ccfc7a2f986bdb2ceb98ed4
| 3,643,209
|
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]):
"""
Transform original DataFrame to survival dataframe that would be used in model
training or predicting.
Parameters
----------
data: DataFrame
Survival data to be transformed.
t_col: str
Column name of data indicating time.
e_col: str
Column name of data indicating events or status.
label_col: str
Name of new label in transformed survival data.
exclude_col: list
Columns to be excluded.
Returns
-------
DataFrame:
Transformed survival data. Negtive values in label are taken as right censored.
"""
x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col]
# Negtive values are taken as right censored
data.loc[:, label_col] = data.loc[:, t_col]
data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col]
return data[x_cols + [label_col]]
|
8d35c27a75340d5c6535727e0e419fc0548d6094
| 3,643,210
|
from datetime import datetime
def get_date_today():
"""Get date today in str format such as 20201119. """
return datetime.today().strftime('%Y%m%d')
|
d5e69607dbf4b8c829cfe30ea0335f46c7d2512a
| 3,643,211
|
import json
def generate_api_key(request):
"""Handles AJAX requests for a new API key."""
new_key = ApiUser.objects.get_unique_key()
return HttpResponse(json.dumps({'token' : new_key}), content_type="application/javascript")
|
9e29a82c3a967d7a98537ec680a0a2bfe068a88c
| 3,643,213
|
def input_output_details(interpreter):
"""
input_output_details:
Used to get the details from the interperter
"""
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
return input_details, output_details
|
3024f2a6c91a533c3aff858ee3a1db11d360bb25
| 3,643,214
|
def charge_drone_battery(drone):
"""Handle the drone battery charging operation."""
battery_level = drone["State"]["Battery"]
if float(battery_level) < 95:
# Increase battery level
drone["State"]["Battery"] = float(battery_level) + 5
else:
# If battery >= 95 set battery level to 100%
drone["State"]["Battery"] = 100
dronelog = gen_DroneLog("Drone %s" % (
str(drone["DroneID"])), "charging complete, returning to Active state")
send_dronelog(dronelog)
drone["State"]["Status"] = "Active"
http_api_log = gen_HttpApiLog("Drone %s" % (
str(drone["DroneID"])), "PUT DroneLog", "Controller")
send_http_api_log(http_api_log)
return drone
|
2f7d955a44310215883ac5bed57fb27463a66315
| 3,643,215
|
def expirations(self, symbol, useDatetime=True, block: bool = True):
"""Gets list of available expiration dates for a symbol.
Calls the 'market/options/expirations.json' endpoint to get list of all
exp_dates available for some given equity.
Args:
symbol: Specify the stock symbol against which to query
useDatetime: Specify whether to return datetime objects, or strings
block: Specify whether to block thread if request exceeds rate limit
Returns:
List of dates (datetime obj, or string)
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
a.expirations('spy')
# [ datetime.datetime(2022, 3, 18, 0, 0), ... ]
a.expirations('spy', useDatetime = False)
# [ '2022-03-18', ... ]
"""
# Create request
req = Expirations(
auth=self.auth, account_nbr=self.account_nbr, block=block, symbol=symbol
)
# Add in the extra information
req.useDatetime = useDatetime
# result
result = req.request()
return result
|
24a33bdac8da42be9433400b32c16fa4fb860766
| 3,643,216
|
from re import T
def track(name, x, direction=None):
"""
An identity function that registers hooks to
track the value and gradient of the specified tensor.
Here is an example of how to track an intermediate output ::
input = ...
conv1 = nnt.track('op', nnt.Conv2d(shape, 4, 3), 'all')
conv2 = nnt.Conv2d(conv1.output_shape, 5, 3)
intermediate = conv1(input)
output = nnt.track('conv2_output', conv2(intermediate), 'all')
loss = T.sum(output ** 2)
loss.backward(retain_graph=True)
d_inter = T.autograd.grad(loss, intermediate, retain_graph=True)
d_out = T.autograd.grad(loss, output)
tracked = nnt.eval_tracked_variables()
testing.assert_allclose(tracked['conv2_output'], nnt.utils.to_numpy(output))
testing.assert_allclose(np.stack(tracked['grad_conv2_output']), nnt.utils.to_numpy(d_out[0]))
testing.assert_allclose(tracked['op'], nnt.utils.to_numpy(intermediate))
for d_inter_, tracked_d_inter_ in zip(d_inter, tracked['grad_op_output']):
testing.assert_allclose(tracked_d_inter_, nnt.utils.to_numpy(d_inter_))
:param name:
name of the tracked tensor.
:param x:
tensor or module to be tracked.
If module, the output of the module will be tracked.
:param direction:
there are 4 options
``None``: tracks only value.
``'forward'``: tracks only value.
``'backward'``: tracks only gradient.
``'all'``: tracks both value and gradient.
Default: ``None``.
:return: `x`.
"""
assert isinstance(name, str), 'name must be a string, got %s' % type(name)
assert isinstance(x, (T.nn.Module, T.Tensor)), 'x must be a Torch Module or Tensor, got %s' % type(x)
assert direction in (
'forward', 'backward', 'all', None), 'direction must be None, \'forward\', \'backward\', or \'all\''
if isinstance(x, T.nn.Module):
if direction in ('forward', 'all', None):
def _forward_hook(module, input, output):
_TRACKS[name] = output.detach()
hooks[name] = x.register_forward_hook(_forward_hook)
if direction in ('backward', 'all'):
def _backward_hook(module, grad_input, grad_output):
_TRACKS['grad_' + name + '_output'] = tuple([grad_out.detach() for grad_out in grad_output])
hooks['grad_' + name + '_output'] = x.register_backward_hook(_backward_hook)
else:
if direction in ('forward', 'all', None):
_TRACKS[name] = x.detach()
if direction in ('backward', 'all'):
def _hook(grad):
_TRACKS['grad_' + name] = tuple([grad_.detach() for grad_ in grad])
hooks['grad_' + name] = x.register_hook(_hook)
return x
|
81ae80bc8b77c16d493befdb209fe648e9a07c96
| 3,643,217
|
from pathlib import Path
from typing import Callable
from datetime import datetime
def expected_l1_ls8_folder(
l1_ls8_folder: Path,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="1",
l1_collection="1",
lineage=None,
):
"""
:param collection: The collection of the current scene
:param l1_collection: The collection of the original landsat l1 scene
:return:
"""
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls8c_level1_{collection}"
if collection == "2":
processing_datetime = datetime(2020, 9, 7, 19, 30, 5)
cloud_cover = 93.28
points_model = 125
points_version = 5
rmse_model_x = 4.525
rmse_model_y = 5.917
software_version = "LPGS_15.3.1c"
uuid = "d9221c40-24c3-5356-ab22-4dcac2bf2d70"
quality_tag = "QA_PIXEL"
else:
processing_datetime = datetime(2017, 4, 5, 11, 17, 36)
cloud_cover = 93.22
points_model = 66
points_version = 4
rmse_model_x = 4.593
rmse_model_y = 5.817
software_version = "LPGS_2.7.0"
uuid = "a780754e-a884-58a7-9ac0-df518a67f59d"
quality_tag = "BQA"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_2016-01-21",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2016, 1, 21, 23, 50, 23, 54435),
# The minor version comes from the processing date (as used in filenames to distinguish reprocesses).
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level1",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 15.0,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 74.007_443_8,
"eo:sun_elevation": 55.486_483,
"landsat:collection_category": "T1",
"landsat:collection_number": int(l1_collection),
"landsat:data_type": "L1TP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1",
"landsat:landsat_scene_id": "LC80900842016021LGN02",
"landsat:processing_software_version": software_version,
"landsat:station_id": "LGN",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[879307.5, -3776885.4340469087],
[879307.5, -3778240.713151076],
[839623.3108524992, -3938223.736900397],
[832105.7835592609, -3953107.5],
[831455.8296215904, -3953107.5],
[831453.7930575205, -3953115.0],
[819969.5411349908, -3953115.0],
[641985.0, -3906446.160824098],
[641985.0, -3889797.3351159613],
[685647.6920251067, -3717468.346156044],
[688909.3673333039, -3714585.0],
[708011.4230769231, -3714585.0],
[879315.0, -3761214.3020833335],
[879315.0, -3776857.8139976147],
[879307.5, -3776885.4340469087],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
3955.5,
0.0,
641_985.0,
0.0,
-3975.500_000_000_000_5,
-3_714_585.0,
0.0,
0.0,
1.0,
),
},
"panchromatic": {
"shape": (60, 60),
"transform": (
3955.25,
0.0,
641_992.5,
0.0,
-3975.25,
-3_714_592.5,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"coastal_aerosol": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B1.TIF",
)
},
"blue": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B2.TIF",
)
},
"green": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B3.TIF",
)
},
"red": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B4.TIF",
)
},
"nir": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B5.TIF",
)
},
"swir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B6.TIF",
)
},
"swir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B7.TIF",
)
},
"panchromatic": {
"grid": "panchromatic",
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B8.TIF",
),
},
"cirrus": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B9.TIF",
)
},
"lwir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B10.TIF",
)
},
"lwir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B11.TIF",
)
},
"quality": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_{quality_tag}.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
|
16b934d3ea6c4a3daee3bde2a37bd5a7a48856b9
| 3,643,218
|
def fetchPackageNames(graphJson):
"""Parses serialized graph and returns all package names it uses
:param graphJson: Serialized graph
:type graphJson: dict
:rtyoe: list(str)
"""
packages = set()
def worker(graphData):
for node in graphData["nodes"]:
packages.add(node["package"])
for inpJson in node["inputs"]:
packages.add(inpJson['package'])
for outJson in node["inputs"]:
packages.add(outJson['package'])
if "graphData" in node:
worker(node["graphData"])
worker(graphJson)
return packages
|
ccac1cfa1305d5d318cf3e2e3ed85d00fff7e56b
| 3,643,219
|
def get_nc_BGrid_POP(grdfile, name='POP_NEP', \
xrange=(170,270), yrange=(240, 350)):
"""
grd = get_nc_BGrid_POP(grdfile)
Load Bgrid object for POP from netCDF file
"""
nc = pycnal.io.Dataset(grdfile)
lon_t = nc.variables['TLONG'][:]
lat_t = nc.variables['TLAT'][:]
lon_u = nc.variables['ULONG'][:]
lat_u = nc.variables['ULAT'][:]
angle = nc.variables['ANGLET'][:]
h_t = nc.variables['HT'][:]
h_u = nc.variables['HU'][:]
z_t = nc.variables['z_t'][:]
z_w_top = nc.variables['z_w_top'][:]
z_w_bot = nc.variables['z_w_bot'][:]
z_w = np.zeros(z_t.size + 1)
z_w[:-1] = z_w_top
z_w[-1] = z_w_bot[-1]
return BGrid_POP(lon_t, lat_t, lon_u, lat_u, angle, h_t, h_u, z_t, z_w, \
name, xrange, yrange)
|
3ce9eec34f3332d21fce1ceaca2862faa69443d1
| 3,643,220
|
def types_and_shorthands():
"""a mapping from type names in the json doc to their
one letter short hands in the output of 'attr'
"""
return {
'int': 'i',
'uint': 'u',
'bool': 'b',
'decimal': 'd',
'color': 'c',
'string': 's',
'regex': 'r',
'SplitAlign': 'n',
'LayoutAlgorithm': 'n',
'font': 'f',
'Rectangle': 'R',
'WindowID': 'w',
}
|
39f364677a8e2ee1d459599ba2574a8a4f4cd49e
| 3,643,221
|
def to_region(obj):
"""Convert `obj` to instance of Region."""
if obj is not None and not isinstance(obj, Region):
return Region(*obj)
else:
return obj
|
da5412adcc182c97950465e3c4e3248be00f242b
| 3,643,223
|
import PIL
def create_textures():
""" Create a list of images for sprites based on the global colors.
!!! SHOULD be able to add custom images in here instead of the general colors."""
texture_list = []
for color in colors:
image = PIL.Image.new('RGB', (WIDTH, HEIGHT), color)
texture_list.append(arcade.Texture(str(color), image=image))
return texture_list
|
2652812d96157fc6f7d502e6ca39f4c4eee32dea
| 3,643,224
|
import psutil
def check_if_process_is_running(process_name):
""""
Check if there is any running process that contains the given name process_name.
"""
# Iterate over the all the running process
for process in psutil.process_iter():
try:
# Check if process name contains the given name string.
if process_name.lower() in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
|
372cd2183b7250ce738157c986adb9a3abfdca84
| 3,643,225
|
import cProfile
import pstats
import io
from pstats import SortKey # type: ignore
import optparse
def exec_main_with_profiler(options: "optparse.Values") -> int:
"""Enable profiler."""
profile = cProfile.Profile()
profile.enable()
ret = exec_main(options)
profile.disable()
string_io = io.StringIO()
sortby = SortKey.TIME
print_stats = pstats.Stats(profile, stream=string_io).sort_stats(sortby)
print_stats.print_stats(40)
print(string_io.getvalue())
return ret
|
8eefc801319d94081364218fc80500b710610f31
| 3,643,226
|
def put_text(image, text, point, scale, color, thickness):
"""Draws text in image.
# Arguments
image: Numpy array.
text: String. Text to be drawn.
point: Tuple of coordinates indicating the top corner of the text.
scale: Float. Scale of text.
color: Tuple of integers. RGB color coordinates.
thickness: Integer. Thickness of the lines used for drawing text.
# Returns
Numpy array with shape ``[H, W, 3]``. Image with text.
"""
# cv2.putText returns an image in contrast to other drawing cv2 functions.
return cv2.putText(image, text, point, FONT, scale, color, thickness, LINE)
|
aeab690da16577e7eff27b515cf9b682110716e9
| 3,643,227
|
def CreateRootRelativePath(self, path):
"""
Generate a path relative from the root
"""
result_path = self.engine_node.make_node(path)
return result_path.abspath()
|
79053bb1bcb724e8ddf9bfc4b5b13b67be9227f0
| 3,643,229
|
import json
from typing import Mapping
def to_shape(shape_ser):
""" Deserializes a shape into a Shapely object - can handle WKT, GeoJSON,
Python dictionaries and Shapely types.
"""
if isinstance(shape_ser, str):
try:
# Redirecting stdout because there's a low level exception that
# prints.
with redirect_stderr("/dev/null"):
shape_obj = wkt_loads(shape_ser)
except WKTReadingError:
try:
shape_obj = shape(json.loads(shape_ser))
except Exception:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
elif isinstance(shape_ser, Mapping):
shape_obj = shape(shape_ser)
elif isinstance(
shape_ser,
(
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
),
):
shape_obj = shape_ser
else:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
return shape_obj
|
d9a0975696ee48d816d5b61e7cc57c0547ff5033
| 3,643,231
|
import inspect
def _from_module(module, object):
"""
Return true if the given object is defined in the given module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
|
564157a4eb10b887b6b56c82d17d74557c233104
| 3,643,232
|
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:')
|
3926f683a2715ff1d41d8433b525793e8214f7a9
| 3,643,233
|
from typing import Dict
from typing import Set
from typing import List
import math
def gen_index(doc_term_map: Dict[PT.Word, Set[PT.IndexNum]],
dependency_map: Dict[PT.IndexNum, Count],
i: PT.IndexNum,
words: List[PT.Word]
) -> PT.PkgIndex:
"""Generate package index by scoring each word / term."""
word_freq: Dict[PT.Word, Count] = utils.count_freq(words)
total_docs = len(doc_term_map)
pkg_index: PT.PkgIndex = dict()
for word in word_freq:
doc_inverse_freq = get_doc_inverse_freq(total_docs,
len(doc_term_map[word]))
dependency_freq = get_dependency_freq(i, dependency_map)
pkg_index[word] = math.log(word_freq[word] *
doc_inverse_freq *
dependency_freq)
return pkg_index
|
4d82498309019f9bdc55fec8b6917576b2b0ff22
| 3,643,235
|
from typing import OrderedDict
def arr_to_dict(arr, ref_dict):
"""
Transform an array of data into a dictionary keyed by the same keys in
ref_dict, with data divided into chunks of the same length as in ref_dict.
Requires that the length of the array is the sum of the lengths of the
arrays in each entry of ref_dict. The other dimensions of the input
array and reference dict can differ.
Arguments
---------
arr : array
Input array to be transformed into dictionary.
ref_dict : dict
Reference dictionary containing the keys used to construct the output
dictionary.
Returns
-------
out : dict
Dictionary of values from arr keyed with keys from ref_dict.
"""
out = OrderedDict()
idx = 0
assert len(arr) == sum([len(v) for v in ref_dict.values()])
for k, bd in ref_dict.items():
out[k] = arr[idx : idx + len(bd)]
idx += len(bd)
return out
|
55339447226cdd2adafe714fa12e144c6b38faa2
| 3,643,236
|
def test_makecpt_truncated_zlow_zhigh(position):
"""
Use static color palette table that is truncated to z-low and z-high.
"""
fig = Figure()
makecpt(cmap="rainbow", truncate=[0.15, 0.85], series=[0, 1000])
fig.colorbar(cmap=True, frame=True, position=position)
return fig
|
f615d425d6433a6e4bc6dc0d8d5e18f2a0aa60c7
| 3,643,237
|
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
|
9226c23c13ad86cc09f2c08ce1ffb44f324a1044
| 3,643,239
|
def login():
"""
Handles user authentication.
The hash of the password the user entered is compared to the hash in the database.
Also saves the user_id in the user's session.
"""
form = SignInForm()
banned = None
reason = None
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
db = get_db()
user = db.execute("""SELECT * FROM users
where user_id = ?;""", (user_id,)).fetchone()
if user is None:
form.user_id.errors.append("Unkown user id")
elif not check_password_hash(user["password"], password):
form.password.errors.append("Incorrect password!")
elif user["isBanned"] == 1:
banned = "You have been banned"
reason = user["bannedReason"]
else:
session.clear()
session["user_id"] = user_id
next_page = request.args.get("next")
if not next_page:
next_page = url_for("chat")
return redirect(next_page)
return render_template("login.html", form=form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, banned=banned, reason=reason)
|
d2cea08572c7b1461cda490f063009bc139c7c3a
| 3,643,240
|
def create_ipu_strategy(num_ipus,
fp_exceptions=False,
enable_recomputation=True,
min_remote_tensor_size=50000,
max_cross_replica_sum_buffer_size=10*1024*1024):
"""
Creates an IPU config and returns an IPU strategy ready to run
something on IPUs
:param num_ipus: Int representing the number of IPUs required.
:param fp_exceptions: Bool, if True floating point exceptions will
be raised.
:param enable_recomputation: Bool, if True recomputation will be
enabled.
:param min_remote_tensor_size: The minimum size (in bytes) a tensor
must be in order to be considered for being stored in remote
memory.
:param max_cross_replica_sum_buffer_size: The maximum number of bytes
that can be waiting before a cross replica sum op is scheduled.
Represents an always-live vs not-always-live trade off. The
default used here is effective for BERT.
:return: An IPU strategy
"""
ipu_config = ipu.config.IPUConfig()
ipu_config.auto_select_ipus = num_ipus
ipu_config.allow_recompute = enable_recomputation
ipu_config.floating_point_behaviour.inv = fp_exceptions
ipu_config.floating_point_behaviour.div0 = fp_exceptions
ipu_config.floating_point_behaviour.oflo = fp_exceptions
ipu_config.floating_point_behaviour.nanoo = fp_exceptions
ipu_config.optimizations.minimum_remote_tensor_size = min_remote_tensor_size
ipu_config.optimizations.merge_infeed_io_copies = True
ipu_config.optimizations.maximum_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size
ipu_config.device_connection.type = ipu.config.DeviceConnectionType.ON_DEMAND
ipu_config.device_connection.enable_remote_buffers = True
ipu_config.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategy()
return strategy
|
2aaa76de946e43305cdb7e50b673e39a08e19a50
| 3,643,241
|
def run_pipeline(context, func, ast, func_signature,
pipeline=None, **kwargs):
"""
Run a bunch of AST transformers and visitors on the AST.
"""
# print __import__('ast').dump(ast)
pipeline = pipeline or context.numba_pipeline(context, func, ast,
func_signature, **kwargs)
return pipeline, pipeline.run_pipeline()
|
559d9c44ae143e49ff505fd76df0393bae56f012
| 3,643,242
|
from typing import Union
from typing import Optional
def convert_acc_data_to_g(
data: Union[AccDataFrame, ImuDataFrame], inplace: Optional[bool] = False
) -> Optional[Union[AccDataFrame, ImuDataFrame]]:
"""Convert acceleration data from :math:`m/s^2` to g.
Parameters
----------
data : :class:`~biopsykit.utils.datatype_helper.AccDataFrame` or \
:class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
dataframe containing acceleration data.
inplace : bool, optional
whether to perform the operation inplace or not. Default: ``False``
Returns
-------
:class:`~biopsykit.utils.datatype_helper.AccDataFrame` or :class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
acceleration data converted to g
"""
if not inplace:
data = data.copy()
acc_cols = data.filter(like="acc").columns
data.loc[:, acc_cols] = data.loc[:, acc_cols] / 9.81
if inplace:
return None
return data
|
e9c60ebdc143cd8243fa7ec7ebde27f0ad5beceb
| 3,643,243
|
def replace_by_one_rule(specific_rule: dict, sentence: str):
"""
This function replace a sentence with the given specific replacement dict.
:param specific_rule: A dict containing the replacement rule, where the keys are the words to use, the values will
be replaced by the keys.
:param sentence: A string to be replaced by the dict and given rule.
:return: The string after replaced by the rules.
"""
original = sentence.lower()
for key in specific_rule.keys():
for word in specific_rule[key]:
original = original.replace(word, key)
original = " ".join([i if i != 'be' else 'is' for i in original.split(' ')])
return original.replace('(s)', '').replace('is at there', 'been there').replace('(es)', ''). \
replace('is in there', 'been there').replace('is there', 'been there').replace('possess', 'have')
|
31a5bd58ef77d76c968c353dd493ba3357d5b506
| 3,643,244
|
def get_os(platform):
"""
Return the icon-name of the OS.
@type platform: C{string}
@param platform: A string that represents the platform of the
relay.
@rtype: C{string}
@return: The icon-name version of the OS of the relay.
"""
if platform:
for os in __OS_LIST:
if os in platform:
if os == 'Windows' and 'Server' in platform:
return 'WindowsServer'
else:
return os
return 'NotAvailable'
|
1610c373076a8fd9b647dad22c5ff39732d14fa7
| 3,643,245
|
def get_loglikelihood_fn(dd_s, f_l=f_l, f_h=f_h, n_f=n_f):
"""
x: parameter point
dd_s: signal system
"""
fs = jnp.linspace(f_l, f_h, n_f)
pad_low, pad_high = get_match_pads(fs)
def _ll(x):
# Unpack parameters into dark dress ones
gamma_s, rho_6T, M_chirp_MSUN, log10_q = x
M_chirp = M_chirp_MSUN * MSUN
q = 10 ** log10_q
rho_6 = rho_6T_to_rho6(rho_6T)
f_c = get_f_isco(get_m_1(M_chirp, q))
dd_h = DynamicDress(
gamma_s, rho_6, M_chirp, q, dd_s.Phi_c, dd_s.tT_c, dd_s.dL, f_c
)
return loglikelihood_fft(dd_h, dd_s, fs, pad_low, pad_high)
return _ll
|
da3843fd069a9c3a4646aa282c854bbd5557d74b
| 3,643,246
|
def to_module_name(field):
"""_to_module_name(self, field: str) -> str
Convert module name to match syntax used in https://github.com/brendangregg/FlameGraph
Examples:
[unknown] -> [unknown]'
/usr/bin/firefox -> [firefox]
"""
if field != '[unknown]':
field = '[{}]'.format(field.split('/')[-1])
return field
|
75e3fbb9a45710ea6dacecf5ecc34a5b9409606a
| 3,643,247
|
def ApplyMomentum(variable, accumulation, learning_rate, gradient, momentum, use_nesterov=False, gradient_scale=1.0):
"""apply momentum"""
return apply_momentum.apply_momentum(variable, gradient, accumulation, learning_rate,
momentum, use_nesterov=use_nesterov, grad_scale=gradient_scale)
|
f86b923e707c7f98d55cbf23a7ac17040bf2929c
| 3,643,248
|
def init():
"""Return True if the plugin has loaded successfully."""
ok = True
if ok:
#g.registerHandler('start2',onStart2)
g.plugin_signon(__name__)
#serve_thread()
#g.app.remoteserver = ss = LeoSocketServer()
return ok
|
74cc6395310d648b809b6df965700ca708581b5e
| 3,643,249
|
def _fftconvolve_14(in1, in2, int2_fft, mode="same"):
"""
scipy routine scipy.signal.fftconvolve with kernel already fourier transformed
"""
in1 = signaltools.asarray(in1)
in2 = signaltools.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return signaltools.array([])
s1 = signaltools.array(in1.shape)
s2 = signaltools.array(in2.shape)
shape = s1 + s2 - 1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [signaltools._next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
ret = signaltools.irfftn(signaltools.rfftn(in1, fshape) *
int2_fft, fshape)[fslice].copy()
#np.fft.rfftn(in2, fshape)
if mode == "full":
return ret
elif mode == "same":
return signaltools._centered(ret, s1)
elif mode == "valid":
return signaltools._centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.")
|
a32d85bb24fe0d1e64b12cdae14391c0c8e9e111
| 3,643,250
|
def solve_step(previous_solution_space, phase_space_position, step_num):
"""
Solves the differential equation across the full spectrum of trajectory angles and neutrino energies
:param previous_solution_space: solution to previous step of the differential equation
across all angles and energies, includes phase space values in first 2 columns (ndarray)
:param phase_space_position: which cosine / neutrino energy slice for which to compute the solution
(int, row index of previous solution)
:param step_num: step number in discretized radial distance away from the initial core (int)
:return: solution (ndarray)
"""
euler_solution = solve_fixed_energy_angle(
previous_solution_space=previous_solution_space,
phase_space_position=phase_space_position,
step_num=step_num
)
return previous_solution_space[phase_space_position][0], previous_solution_space[phase_space_position][1], euler_solution
|
6678a9482453f2a43942f04085c892e8484e75cc
| 3,643,251
|
def execute(*args, **kw):
"""Wrapper for ``Cursor#execute()``."""
return _m.connection["default"].cursor().execute(*args, **kw)
|
8bb11436046e479c580c48eb42d4cb2b37372945
| 3,643,253
|
def get_login_client():
"""
Returns a LinodeLoginClient configured as per the config module in this
example project.
"""
return LinodeLoginClient(config.client_id, config.client_secret)
|
786d3d6981f81afa95ed7de62544896982b17c58
| 3,643,254
|
import re
def detect_ascii_slice(lines):
# type: (List[str]) -> slice
"""
Given a list of strings, this will return the most likely positions of byte
positions. They are returned slice which should be able to extract the
columns from each line.
"""
for line in lines:
# if the content contains a ":" character, it contains the byte offset
# in the beginning. This is the case for libsnmp command output using
# the "-d" switch. We need to remove the offset
match = re.match(r"^\d{4}:", line)
if ":" in line:
return slice(6, 56)
else:
return slice(0, 50)
return slice(0, -1)
|
06cf3b9faa24f46aef37ae94495cbe129851bd7c
| 3,643,255
|
import torch
def inception_model_pytorch():
"""The InceptionBlocks model the WebGME folks provided as a test case for deepforge."""
class InceptionBlocks(nn.Module):
def __init__(self):
super().__init__()
self.asymmetric_pad = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2d = nn.Conv2d(
in_channels=5, out_channels=64, kernel_size=(5, 5), padding=2, bias=True
)
self.prelu = nn.PReLU(init=0.0)
self.averagepooling2d = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d2 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu2 = nn.PReLU(init=0.0)
self.conv2d3 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu3 = nn.PReLU(init=0.0)
self.conv2d4 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu4 = nn.PReLU(init=0.0)
self.averagepooling2d2 = nn.AvgPool2d((2, 2), stride=1)
self.conv2d5 = nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu5 = nn.PReLU(init=0.0)
self.conv2d6 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu6 = nn.PReLU(init=0.0)
self.conv2d7 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu7 = nn.PReLU(init=0.0)
self.conv2d8 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d9 = nn.Conv2d(
in_channels=240,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d10 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu8 = nn.PReLU(init=0.0)
self.conv2d11 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu9 = nn.PReLU(init=0.0)
self.prelu10 = nn.PReLU(init=0.0)
self.averagepooling2d3 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d12 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu11 = nn.PReLU(init=0.0)
self.conv2d13 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu12 = nn.PReLU(init=0.0)
self.prelu13 = nn.PReLU(init=0.0)
self.averagepooling2d4 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d14 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu14 = nn.PReLU(init=0.0)
self.conv2d15 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu15 = nn.PReLU(init=0.0)
self.conv2d16 = nn.Conv2d(
in_channels=340,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu16 = nn.PReLU(init=0.0)
self.conv2d17 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu17 = nn.PReLU(init=0.0)
self.averagepooling2d5 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d18 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu18 = nn.PReLU(init=0.0)
self.conv2d19 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu19 = nn.PReLU(init=0.0)
self.conv2d20 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu20 = nn.PReLU(init=0.0)
self.conv2d21 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu21 = nn.PReLU(init=0.0)
self.conv2d22 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu22 = nn.PReLU(init=0.0)
self.averagepooling2d6 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d23 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu23 = nn.PReLU(init=0.0)
self.conv2d24 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu24 = nn.PReLU(init=0.0)
self.conv2d25 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu25 = nn.PReLU(init=0.0)
self.averagepooling2d7 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d26 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu26 = nn.PReLU(init=0.0)
self.averagepooling2d8 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d27 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu27 = nn.PReLU(init=0.0)
self.conv2d28 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu28 = nn.PReLU(init=0.0)
self.conv2d29 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu29 = nn.PReLU(init=0.0)
self.dense = nn.Linear(22273, 1096, bias=True)
self.prelu30 = nn.PReLU(init=0.0)
self.dense2 = nn.Linear(1096, 1096, bias=True)
self.prelu31 = nn.PReLU(init=0.0)
self.dense3 = nn.Linear(1096, 180, bias=True)
def forward(self, galaxy_images_output, ebv_output):
conv2d_output = self.conv2d(galaxy_images_output)
prelu_output = self.prelu(conv2d_output)
averagepooling2d_output = self.averagepooling2d(prelu_output)
conv2d_output2 = self.conv2d2(averagepooling2d_output)
prelu_output2 = self.prelu2(conv2d_output2)
conv2d_output3 = self.conv2d3(prelu_output2)
prelu_output3 = self.prelu3(conv2d_output3)
conv2d_output4 = self.conv2d4(averagepooling2d_output)
prelu_output4 = self.prelu4(conv2d_output4)
prelu_output4 = self.asymmetric_pad(prelu_output4)
averagepooling2d_output2 = self.averagepooling2d2(prelu_output4)
conv2d_output5 = self.conv2d5(averagepooling2d_output)
prelu_output5 = self.prelu5(conv2d_output5)
conv2d_output6 = self.conv2d6(averagepooling2d_output)
prelu_output6 = self.prelu6(conv2d_output6)
conv2d_output7 = self.conv2d7(prelu_output6)
prelu_output7 = self.prelu7(conv2d_output7)
concatenate_output = torch.cat(
(prelu_output5, prelu_output3, prelu_output7, averagepooling2d_output2),
dim=1,
)
conv2d_output8 = self.conv2d8(concatenate_output)
conv2d_output9 = self.conv2d9(concatenate_output)
conv2d_output10 = self.conv2d10(concatenate_output)
prelu_output8 = self.prelu8(conv2d_output10)
conv2d_output11 = self.conv2d11(prelu_output8)
prelu_output9 = self.prelu9(conv2d_output11)
prelu_output10 = self.prelu10(conv2d_output8)
prelu_output10 = self.asymmetric_pad(prelu_output10)
averagepooling2d_output3 = self.averagepooling2d3(prelu_output10)
conv2d_output12 = self.conv2d12(concatenate_output)
prelu_output11 = self.prelu11(conv2d_output12)
conv2d_output13 = self.conv2d13(prelu_output11)
prelu_output12 = self.prelu12(conv2d_output13)
prelu_output13 = self.prelu13(conv2d_output9)
concatenate_output2 = torch.cat(
(
prelu_output13,
prelu_output12,
prelu_output9,
averagepooling2d_output3,
),
dim=1,
)
averagepooling2d_output4 = self.averagepooling2d4(concatenate_output2)
conv2d_output14 = self.conv2d14(averagepooling2d_output4)
prelu_output14 = self.prelu14(conv2d_output14)
conv2d_output15 = self.conv2d15(prelu_output14)
prelu_output15 = self.prelu15(conv2d_output15)
conv2d_output16 = self.conv2d16(averagepooling2d_output4)
prelu_output16 = self.prelu16(conv2d_output16)
conv2d_output17 = self.conv2d17(averagepooling2d_output4)
prelu_output17 = self.prelu17(conv2d_output17)
prelu_output17 = self.asymmetric_pad(prelu_output17)
averagepooling2d_output5 = self.averagepooling2d5(prelu_output17)
conv2d_output18 = self.conv2d18(averagepooling2d_output4)
prelu_output18 = self.prelu18(conv2d_output18)
conv2d_output19 = self.conv2d19(prelu_output18)
prelu_output19 = self.prelu19(conv2d_output19)
concatenate_output3 = torch.cat(
(
prelu_output16,
prelu_output19,
prelu_output15,
averagepooling2d_output5,
),
dim=1,
)
conv2d_output20 = self.conv2d20(concatenate_output3)
prelu_output20 = self.prelu20(conv2d_output20)
conv2d_output21 = self.conv2d21(prelu_output20)
prelu_output21 = self.prelu21(conv2d_output21)
conv2d_output22 = self.conv2d22(concatenate_output3)
prelu_output22 = self.prelu22(conv2d_output22)
prelu_output22 = self.asymmetric_pad(prelu_output22)
averagepooling2d_output6 = self.averagepooling2d6(prelu_output22)
conv2d_output23 = self.conv2d23(concatenate_output3)
prelu_output23 = self.prelu23(conv2d_output23)
conv2d_output24 = self.conv2d24(prelu_output23)
prelu_output24 = self.prelu24(conv2d_output24)
conv2d_output25 = self.conv2d25(concatenate_output3)
prelu_output25 = self.prelu25(conv2d_output25)
concatenate_output4 = torch.cat(
(
prelu_output25,
prelu_output21,
prelu_output24,
averagepooling2d_output6,
),
dim=1,
)
averagepooling2d_output7 = self.averagepooling2d7(concatenate_output4)
conv2d_output26 = self.conv2d26(averagepooling2d_output7)
prelu_output26 = self.prelu26(conv2d_output26)
prelu_output26 = self.asymmetric_pad(prelu_output26)
averagepooling2d_output8 = self.averagepooling2d8(prelu_output26)
conv2d_output27 = self.conv2d27(averagepooling2d_output7)
prelu_output27 = self.prelu27(conv2d_output27)
conv2d_output28 = self.conv2d28(prelu_output27)
prelu_output28 = self.prelu28(conv2d_output28)
conv2d_output29 = self.conv2d29(averagepooling2d_output7)
prelu_output29 = self.prelu29(conv2d_output29)
concatenate_output5 = torch.cat(
(prelu_output29, prelu_output28, averagepooling2d_output8), dim=1
)
flatten_output = torch.flatten(concatenate_output5)
concatenate_output6 = torch.cat((flatten_output, ebv_output), dim=0)
dense_output = self.dense(concatenate_output6)
prelu_output30 = self.prelu30(dense_output)
dense_output2 = self.dense2(prelu_output30)
prelu_output31 = self.prelu31(dense_output2)
dense_output3 = self.dense3(prelu_output31)
return dense_output3
torch.manual_seed(0)
model = InceptionBlocks()
model.eval()
return model
|
c70e4ea71eb38ae0d81b6985076a0c1588758df2
| 3,643,256
|
def project_in_2D(K, camera_pose, mesh, resolution_px):
"""
Project all 3D triangle vertices in the mesh into
the 2D image of given resolution
Parameters
----------
K: ndarray
Camera intrinsics matrix, 3x3
camera_pose: ndarray
Camera pose (inverse of extrinsics), 4x4
mesh: ndarray
Triangles to be projected in 2d, (Nx3x3)
resolution_px: tuple
Resolution of image in pixel
Returns
-------
coords_projected_2D: ndarray
Triangle vertices projected in 2D and clipped to
image resolution
"""
resolution_x_px, resolution_y_px = resolution_px # image resolution in pixels
# Decompose camera pose into rotation and translation
RT = camera_pose[:-1, :] # remove homogeneous row
R = RT[:, :-1] # rotation matrix 3x3
T = RT[:, -1:] # translation vector 3x1
# Invert the camera pose matrix to get the camera extrinsics
# Due to the particular matrix geometry we can avoid raw inversion
Rc = tf.matrix_transpose(R)
Tc = tf.matmul(-Rc, T)
RT = tf.concat([Rc, Tc], axis=-1) # camera extrinsics
# Correct reference system of extrinsics matrix
# y is down: (to align to the actual pixel coordinates used in digital images)
# right-handed: positive z look-at direction
correction_factor = tf.constant(value=np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]]), dtype=tf.float32)
RT = tf.matmul(correction_factor, RT)
# Compose whole camera projection matrix (3x4)
P = tf.matmul(K, RT)
mesh_flat = tf.reshape(mesh, shape=(-1, 3))
len_mesh_flat = tf.shape(mesh_flat)[0]
# Create constant tensor to store 3D model coordinates
coords_3d_h = tf.concat([mesh_flat, tf.ones(shape=(len_mesh_flat, 1), dtype=tf.float32)], axis=-1) # n_triangles, 4
coords_3d_h = tf.transpose(coords_3d_h, perm=[1, 0]) # 4, n_triangles
# Project 3D vertices into 2D
coords_projected_2D_h = tf.transpose(tf.matmul(P, coords_3d_h), perm=[1, 0]) # n_triangles, 3
coords_projected_2D = coords_projected_2D_h[:, :2] / (coords_projected_2D_h[:, 2:3] + 1e-8)
# Clip indexes in image range
coords_projected_2D_x_clip = tf.clip_by_value(coords_projected_2D[:, 0:0 + 1],
clip_value_min=-1, clip_value_max=resolution_x_px)
coords_projected_2D_y_clip = tf.clip_by_value(coords_projected_2D[:, 1:1 + 1],
clip_value_min=-1, clip_value_max=resolution_y_px)
return tf.concat([coords_projected_2D_x_clip, coords_projected_2D_y_clip], axis=-1)
|
9615d940fe083853e0bc179b79e1a19b7f9304bf
| 3,643,259
|
from typing import Any
def forbidden(description: Any) -> APIGatewayProxyResult:
"""Return a response with FORBIDDEN status code."""
error = ForbiddenError(description)
return _build_response(error, HTTPStatus.FORBIDDEN)
|
7b87e41081f1f7fa8f1e140a2c4d5ee597222193
| 3,643,260
|
from datetime import datetime
def str_2_datetime(p_str, fmt="%Y-%m-%d %H:%M:%S"):
""" 将字符串转换成日期
:param p_str: 原始时间字符串
:param fmt: 时间格式
:rtype: datetime.datetime
"""
# don't need to transform
if isinstance(p_str, datetime.datetime):
return p_str
if not isinstance(p_str, str):
raise TypeError("params `p_str` must be type of str")
return datetime.datetime.strptime(p_str, fmt)
|
0fa86e0aebcf2c2ff53ceb26ae93ed762175ef03
| 3,643,261
|
def traitement(l):
"""Permet de retirer les cartes blanches inutiles"""
while l[-1][1] == 'nan':
del l[-1]
return l
|
d21a7d493a35fc53195315da9b824b0ca3c8ba25
| 3,643,262
|
from datetime import datetime
import bisect
def group_frames_by_track_date(frames):
"""Classify frames by track and date."""
hits = {}
grouped = {}
dates = {}
footprints = {}
metadata = {}
for h in frames:
if h['_id'] in hits: continue
fields = h['fields']['partial'][0]
#print("h['_id'] : %s" %h['_id'])
# get product url; prefer S3
prod_url = fields['urls'][0]
if len(fields['urls']) > 1:
for u in fields['urls']:
if u.startswith('s3://'):
prod_url = u
break
#print("prod_url : %s" %prod_url)
hits[h['_id']] = "%s/%s" % (prod_url, fields['metadata']['archive_filename'])
match = SLC_RE.search(h['_id'])
#print("match : %s" %match)
if not match:
raise RuntimeError("Failed to recognize SLC ID %s." % h['_id'])
day_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
0, 0, 0)
#print("day_dt : %s " %day_dt)
bisect.insort(grouped.setdefault(fields['metadata']['trackNumber'], {}) \
.setdefault(day_dt, []), h['_id'])
slc_start_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
int(match.group('start_hour')),
int(match.group('start_min')),
int(match.group('start_sec')))
#print("slc_start_dt : %s" %slc_start_dt)
slc_end_dt = datetime(int(match.group('end_year')),
int(match.group('end_month')),
int(match.group('end_day')),
int(match.group('end_hour')),
int(match.group('end_min')),
int(match.group('end_sec')))
#print("slc_end_dt : %s" %slc_end_dt)
dates[h['_id']] = [ slc_start_dt, slc_end_dt ]
footprints[h['_id']] = fields['location']
metadata[h['_id']] = fields['metadata']
#break
#print("grouped : %s" %grouped)
logger.info("grouped keys : %s" %grouped.keys())
return {
"hits": hits,
"grouped": grouped,
"dates": dates,
"footprints": footprints,
"metadata": metadata,
}
|
327a6357c5ce8fc9a54d27107e2cb43424dd7630
| 3,643,265
|
def generate_violin_figure(dataframe, columns, ytitle, legend_title=None):
""" Plot 2 columns of data as violin plot, grouped by block.
:param dataframe: Variance of projections.
:type dataframe: pandas.DataFrame
:param columns: 2 columns for the negative and the positive side of the violins.
:type columns: list
:param ytitle: Title of Y-axis. What is being plotted? What are the units of the data?
:type ytitle: str
:param legend_title: What's the common denominator of the columns?
:type legend_title: str
:return: Figure object of graph.
:rtype: plotly.graph_objs.Figure
"""
legend = go.layout.Legend(
xanchor='right',
yanchor='top',
orientation='v',
title=legend_title,
)
fig = go.Figure()
fig.layout.update(xaxis_title='Task',
yaxis_title=ytitle,
legend=legend,
margin=theme['graph_margins'])
if dataframe.empty:
return fig
# Make sure we plot only 2 columns, left and right.
columns = columns[:2]
sides = ('negative', 'positive')
grouped = dataframe.groupby('task')
for name, group_df in grouped:
for i, col in enumerate(columns):
fig.add_trace(go.Violin(x=group_df['task'].map(task_order),
y=group_df[col],
legendgroup=col, scalegroup=col, name=col,
side=sides[i],
pointpos=i - 0.5,
line_color=theme[col],
text=[f"{col}<br />participant: {j['user']}<br />"
f"block: {j['block']}<br />condition: {j['condition']}"
for _, j in group_df.iterrows()],
hoverinfo='y+text',
spanmode='hard',
showlegend=bool(name == dataframe['task'].unique()[0]), # Only 1 legend.
)
)
# update characteristics shared by all traces
fig.update_traces(meanline={'visible': True, 'color': 'dimgray'},
box={'visible': True, 'width': 0.5, 'line_color': 'dimgray'},
points='all', # Show all points.
jitter=0.1, # Add some jitter on points for better visibility.
scalemode='count') # Scale violin plot area with total count.
fig.update_layout(violingap=0, violingroupgap=0, violinmode='overlay', hovermode='closest')
fig.update_xaxes(tickvals=task_order[dataframe['task'].unique()],
ticktext=task_order[dataframe['task'].unique()].index)
fig.update_yaxes(zeroline=True, zerolinewidth=2, zerolinecolor='LightPink')
return fig
|
23baa052cf835ba55a43ffa496d606cccadb0c5b
| 3,643,266
|
def measure_single(state, bit):
"""
Method one qubit one time
:param state:
:param bit:
:return:
"""
n = len(state.shape)
axis = list(range(n))
axis.remove(n - 1 - bit)
probs = np.sum(np.abs(state) ** 2, axis=tuple(axis))
rnd = np.random.rand()
# measure single bit
if rnd < probs[0]:
out = 0
prob = probs[0]
else:
out = 1
prob = probs[1]
# collapse single bit
if out == 0:
matrix = np.array([[1.0 / np.sqrt(prob), 0.0],
[0.0, 0.0]], complex)
else:
matrix = np.array([[0.0, 0.0],
[0.0, 1.0 / np.sqrt(prob)]], complex)
state = transfer_state(state, matrix, [bit])
return out, state
|
ff2c18039e6900febaa731f9a3db9f16b797e18b
| 3,643,267
|
def anchor_to_offset(anchors, ground_truth):
"""Encodes the anchor regression predictions with the
ground truth.
Args:
anchors: A numpy array of shape (N, 6) representing
the generated anchors.
ground_truth: A numpy array of shape (6,) containing
the label boxes in the anchor format.
Returns:
anchor_offsets: A numpy array of shape (N, 6)
encoded/normalized with the ground-truth, representing the
offsets.
"""
fc.check_anchor_format(anchors)
anchors = np.asarray(anchors).reshape(-1, 6)
ground_truth = np.reshape(ground_truth, (6,))
# t_x_gt = (x_gt - x_anch)/dim_x_anch
t_x_gt = (ground_truth[0] - anchors[:, 0]) / anchors[:, 3]
# t_y_gt = (y_gt - y_anch)/dim_y_anch
t_y_gt = (ground_truth[1] - anchors[:, 1]) / anchors[:, 4]
# t_z_gt = (z_gt - z_anch)/dim_z_anch
t_z_gt = (ground_truth[2] - anchors[:, 2]) / anchors[:, 5]
# t_dx_gt = log(dim_x_gt/dim_x_anch)
t_dx_gt = np.log(ground_truth[3] / anchors[:, 3])
# t_dy_gt = log(dim_y_gt/dim_y_anch)
t_dy_gt = np.log(ground_truth[4] / anchors[:, 4])
# t_dz_gt = log(dim_z_gt/dim_z_anch)
t_dz_gt = np.log(ground_truth[5] / anchors[:, 5])
anchor_offsets = np.stack((t_x_gt,
t_y_gt,
t_z_gt,
t_dx_gt,
t_dy_gt,
t_dz_gt), axis=1)
return anchor_offsets
|
3aced37f0838d2ab4f90ce0e212747111fc87876
| 3,643,268
|
def horizontal_flip(img_array):
"""Flip image horizontally."""
img_array = cv2.flip(img_array, 1)
return img_array
|
7f53442b072127e5c02253aefabcc8e7bd422504
| 3,643,269
|
def chunker(file_path):
"""
Read a block of lines from a file
:param file_path:
:return:
"""
words = []
with open(file_path, 'r') as file_object:
for word in file_object:
word = word.strip()
if word:
words.append(word)
return words
|
a60b6f3cc7003955ae6acd8ac5e74574cdbd5976
| 3,643,270
|
def legalize_names(varnames):
"""returns a dictionary for conversion of variable names to legal
parameter names.
"""
var_map = {}
for var in varnames:
new_name = var.replace("_", "__").replace("$", "_").replace(".", "_")
assert new_name not in var_map
var_map[var] = new_name
return var_map
|
ad8e9ef3394d4ac3cfa80198f488c1834bd227fc
| 3,643,272
|
def _IsUidUsed(uid):
"""Check if there is any process in the system running with the given user-id
@type uid: integer
@param uid: the user-id to be checked.
"""
pgrep_command = [constants.PGREP, "-u", uid]
result = utils.RunCmd(pgrep_command)
if result.exit_code == 0:
return True
elif result.exit_code == 1:
return False
else:
raise errors.CommandError("Running pgrep failed. exit code: %s"
% result.exit_code)
|
8a4e529a98298ec4c2d9df30c6fc28a91c124edd
| 3,643,273
|
def mobilenetv3_large(data_channel):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, NL, s
[3, 16, 16, 0, 0, 1],
[3, 64, 24, 0, 0, 2],
[3, 72, 24, 0, 0, 1],
[5, 72, 40, 1, 0, 2],
[5, 120, 40, 1, 0, 1],
[5, 120, 40, 1, 0, 1],
[3, 240, 80, 0, 1, 2],
[3, 200, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 480, 112, 1, 1, 1],
[3, 672, 112, 1, 1, 1],
[5, 672, 160, 1, 1, 1],
[5, 672, 160, 1, 1, 2],
[5, 960, 160, 1, 1, 1]
]
model = MobileNetV3(cfgs, mode='large', data_channel=data_channel)
model.set_name(BackboneName.MobileNetv3_large)
return model
|
ec14d6628bf9e69f05a79f4207a32e00fbae1b8b
| 3,643,274
|
def ravel_lom_dims(tensor, name='ravel_lom_dims'):
"""Assumes LOM is in the last 3 dims."""
return tf.reshape(tensor, tensor.shape_as_list()[:-3] + [-1], name=name)
|
f0f56c4f747b4a40e63bddbb7d6dd3452044151f
| 3,643,275
|
def run_cv(cfg, df, horiz, freq, cv_start, cv_stride=1, dc_dict=None,
metric="smape"):
"""Run a sliding-window temporal cross-validation (aka backtest) using a
given forecasting function (`func`).
"""
y = df["demand"].values
# allow only 1D time-series arrays
assert(y.ndim == 1)
params, func = cfg
if len(y) == 1:
y = np.pad(y, [1, 0], constant_values=1)
# the cross-val horizon length may shrink depending on the length of
# historical data; shrink the horizon if it is >= the timeseries
if horiz >= len(y):
cv_horiz = len(y) - 1
else:
cv_horiz = horiz
if len(df) == len(y):
ts = df.index
else:
assert len(y) > len(df)
diff = len(y) - len(df)
ts = np.append(
pd.date_range(end=df.index[0], freq=freq, periods=diff+1), df.index)
# sliding window horizon actuals
Y = sliding_window_view(y[cv_start:], cv_horiz)[::cv_stride,:]
Ycv = []
# | y | horiz |..............|
# | y | horiz |.............|
# | y | horiz |............|
# ::
# ::
# | y | horiz |
for i in range(cv_start, len(y)-cv_horiz+1, cv_stride):
yp = func(y[:i], cv_horiz, freq, dc=dc_dict[i])
Ycv.append(yp)
# keep the backtest forecasts at each cv_stride
Ycv = np.vstack(Ycv)
# keep the backtest forecast time indices
Yts = sliding_window_view(ts[cv_start:], cv_horiz)[::cv_stride,:]
assert Yts.shape == Y.shape
assert Yts.shape == Ycv.shape
assert not np.any(np.isnan(Ycv))
assert Ycv.shape == Y.shape
# calc. error metrics
df_results = calc_metrics(Y, Ycv, metric)
df_results.insert(0, "model_type", params.split("|")[0])
df_results.insert(1, "params", params)
# store the final backtest window actuals and predictions
df_results["y_cv"] = [Y]
df_results["yp_cv"] = [Ycv]
df_results["ts_cv"] = [Yts]
# generate the final forecast (1-dim)
df_results["yhat"] = [func(y, horiz, freq, dc=dc_dict[len(y)-1])]
return df_results
|
cb95657aeaa4cb74c6252d46029db88c6be18ddb
| 3,643,276
|
import pyarrow as pa
def ST_Area(geos):
"""
Calculate the 2D Cartesian (planar) area of geometry.
:type geos: Series(dtype: object)
:param geos: Geometries in WKB form.
:rtype: Series(dtype: float64)
:return: The value that represents the area of geometry.
:example:
>>> import pandas
>>> import arctern
>>> data = ["POLYGON((0 0,1 0,1 1,0 1,0 0))", "POLYGON((0 0,0 8,8 8,8 0,0 0))"]
>>> data = pandas.Series(data)
>>> rst = arctern.ST_Area(arctern.ST_GeomFromText(data1))
>>> print(rst)
0 1.0
1 64.0
dtype: float64
"""
arr_geos = pa.array(geos, type='binary')
return arctern_caller(arctern_core_.ST_Area, arr_geos)
|
ae69ec90c9e5c54c5f6afa468d6cb1212e64eaf4
| 3,643,277
|
from operator import matmul
def P_from_K_R_t(K, R, t):
"""Returns the 3x4 projection matrix P = K [R | t]."""
K = K.astype(np.float64)
R = R.astype(np.float64)
t = t.astype(np.float64)
return matmul(K, np.column_stack((R, t)))
|
0304ea513df81a67e653ba1e3516c39ec38f94ad
| 3,643,279
|
from typing import List
from typing import Union
from typing import Callable
from typing import Type
from typing import OrderedDict
from typing import Any
def multi_value_precondition(parameter_selector: List[Union[int, str]], predicate: Callable[..., bool],
exception_factory: Union[Type[BaseException], Callable[[OrderedDict], BaseException]]
=PreconditionViolatedError) -> Any:
"""
This is a factory that will create a decorator for a method based on a parameter selector and a predicate. The
decorator will cause the method to raise an Exception (PreConditionViolatedError) if the selected parameters do not
satisfy the predicate.
:param parameter_selector: a selector that indicates which parameters of the method should be checked. This may
be ints for positional parameters or strings for keyword parameters. The parameter_selector will indicate some
parameters, these will be passed (positionally in the listed order) to the predicate.
:param predicate: a predicate that evaluates parameters (function that returns True or False)
:param exception_factory: Either an Exception class or a Callable that can create the desired Exception (defaults
to PreconditionViolatedError)
:return: a decorator based on the passed parameter selector and predicate
"""
def decorator(decorated_function):
"""
This decorator adds a check to this function that one of its parameters matches a predicate
:param decorated_function: The function to be decorated
:return: The decorated function
"""
_signature = signature(decorated_function)
_verify_decorator_correctness(_signature, parameter_selector, exception_factory)
@wraps(decorated_function)
def function_with_condition(*args, **kwargs):
"""
a decorated function that checks parameter values of the original match a given predicate.
If the parameters do not match, the original function is never called.
:param args: The positional arguments for the original function
:param kwargs: The keyword arguments for the original function
:return: The result of the function if the parameters matched the predicate
:raises: PreConditionViolatedError if the parameters of the function do not match the predicate
"""
arguments = _get_bound_arguments(_signature, *args, **kwargs)
selected_parameters = _get_key_value_pairs(arguments, parameter_selector)
if not predicate(*selected_parameters.values()):
if isinstance(exception_factory, type) and issubclass(exception_factory, BaseException):
parameter_descriptions = map(lambda key_value: _parameter_description(*key_value),
selected_parameters.items())
descriptions = ', '.join(parameter_descriptions).capitalize()
message = f"{descriptions} failed to pass precondition {predicate.__name__}"
raise exception_factory(message)
elif isinstance(exception_factory, FunctionType):
raise exception_factory(selected_parameters)
else:
raise MalformedDecoratorError(f'Incorrect type for exception_factory: {type(exception_factory)}')
return decorated_function(*args, **kwargs)
return function_with_condition
return decorator
|
97632d8e77858e3d854a8f610fae772a8a6c3c5b
| 3,643,280
|
from operator import itemgetter
import json
def activity_list_retrieve_view(request): # activityListRetrieve
"""
Retrieve activity so we can populate the news page
:param request:
:return:
"""
status = ''
activity_list = []
activity_manager = ActivityManager()
activity_notice_seed_list = []
activity_post_list = []
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_friend_we_vote_id_list = []
voter_we_vote_id = ''
activity_tidbit_we_vote_id_list = request.GET.getlist('activity_tidbit_we_vote_id_list[]')
activity_tidbit_we_vote_id_list = list(filter(None, activity_tidbit_we_vote_id_list))
if positive_value_exists(voter_device_id):
voter_we_vote_id = fetch_voter_we_vote_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_we_vote_id):
status += "RETRIEVE_ACTIVITY_LIST_MISSING_VOTER_WE_VOTE_ID "
json_data = {
'status': status,
'success': False,
'activity_list': activity_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# Retrieve the NOTICE_FRIEND_ENDORSEMENTS_SEED and the ActivityPost entries below
results = activity_manager.retrieve_activity_notice_seed_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
kind_of_seed_list=[NOTICE_FRIEND_ENDORSEMENTS_SEED],
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list)
if results['success']:
activity_notice_seed_list = results['activity_notice_seed_list']
voter_friend_we_vote_id_list = results['voter_friend_we_vote_id_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_LIST_FAILED "
for activity_notice_seed in activity_notice_seed_list:
new_positions_entered_count = 0
position_name_list = []
position_we_vote_id_list = []
# In this scenario we want to return both friends and public values
# Position names
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_public_serialized)
# Position we_vote_ids
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_friends_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_public_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_public_serialized)
new_positions_entered_count += len(position_we_vote_id_list)
if not positive_value_exists(activity_notice_seed.we_vote_id):
try:
activity_notice_seed.save()
except Exception as e:
status += "COULD_NOT_UPDATE_SEED_WE_VOTE_ID: " + str(e) + ' '
activity_notice_seed_dict = {
'date_created': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_notice_seed.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'id': activity_notice_seed.id, # We normalize to generate activityTidbitKey
'activity_notice_seed_id': activity_notice_seed.id,
'kind_of_activity': "ACTIVITY_NOTICE_SEED",
'kind_of_seed': activity_notice_seed.kind_of_seed,
'new_positions_entered_count': new_positions_entered_count,
'position_name_list': position_name_list,
'position_we_vote_id_list': position_we_vote_id_list,
'speaker_name': activity_notice_seed.speaker_name,
'speaker_organization_we_vote_id': activity_notice_seed.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_notice_seed.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_notice_seed.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_notice_seed.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_notice_seed.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_notice_seed.speaker_twitter_followers_count,
'we_vote_id': activity_notice_seed.we_vote_id,
}
activity_list.append(activity_notice_seed_dict)
# ####################################################
# Retrieve entries directly in the ActivityPost table
results = activity_manager.retrieve_activity_post_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list,
voter_friend_we_vote_id_list=voter_friend_we_vote_id_list)
if results['success']:
activity_post_list = results['activity_post_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_POST_LIST_FAILED "
for activity_post in activity_post_list:
date_created_string = ''
if activity_post.date_created:
date_created_string = activity_post.date_created.strftime('%Y-%m-%d %H:%M:%S')
if not positive_value_exists(activity_post.we_vote_id):
try:
activity_post.save()
except Exception as e:
status += "COULD_NOT_UPDATE_POST_WE_VOTE_ID: " + str(e) + ' '
activity_post_dict = {
'date_created': date_created_string,
'date_last_changed': activity_post.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': date_created_string,
'id': activity_post.id, # We normalize to generate activityTidbitKey
'activity_post_id': activity_post.id,
'kind_of_activity': 'ACTIVITY_POST',
'kind_of_seed': '',
'new_positions_entered_count': 0,
'position_name_list': [],
'position_we_vote_id_list': [],
'speaker_name': activity_post.speaker_name,
'speaker_organization_we_vote_id': activity_post.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_post.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_post.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_post.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_post.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_post.speaker_twitter_followers_count,
'statement_text': activity_post.statement_text,
'visibility_is_public': activity_post.visibility_is_public,
'we_vote_id': activity_post.we_vote_id,
}
activity_list.append(activity_post_dict)
# Now cycle through these activities and retrieve all related comments
activity_list_with_comments = []
for activity_tidbit_dict in activity_list:
results = activity_manager.retrieve_activity_comment_list(
parent_we_vote_id=activity_tidbit_dict['we_vote_id'])
activity_comment_list = []
if results['success']:
activity_comment_object_list = results['activity_comment_list']
for activity_comment in activity_comment_object_list:
# Retrieve the Child comments
child_results = activity_manager.retrieve_activity_comment_list(
parent_comment_we_vote_id=activity_comment.we_vote_id)
child_comment_list = []
if results['success']:
child_comment_object_list = child_results['activity_comment_list']
for child_comment in child_comment_object_list:
child_comment_dict = {
'date_created': child_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': child_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': child_comment.commenter_name,
'commenter_organization_we_vote_id': child_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': child_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': child_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': child_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': child_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': child_comment.commenter_twitter_followers_count,
'parent_we_vote_id': child_comment.parent_we_vote_id,
'parent_comment_we_vote_id': child_comment.parent_comment_we_vote_id,
'statement_text': child_comment.statement_text,
'visibility_is_public': child_comment.visibility_is_public,
'we_vote_id': child_comment.we_vote_id,
}
child_comment_list.append(child_comment_dict)
activity_comment_dict = {
'comment_list': child_comment_list,
'date_created': activity_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': activity_comment.commenter_name,
'commenter_organization_we_vote_id': activity_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': activity_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': activity_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': activity_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': activity_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': activity_comment.commenter_twitter_followers_count,
'parent_we_vote_id': activity_comment.parent_we_vote_id,
'parent_comment_we_vote_id': activity_comment.parent_comment_we_vote_id,
'statement_text': activity_comment.statement_text,
'visibility_is_public': activity_comment.visibility_is_public,
'we_vote_id': activity_comment.we_vote_id,
}
activity_comment_list.append(activity_comment_dict)
activity_tidbit_dict['activity_comment_list'] = activity_comment_list
activity_list_with_comments.append(activity_tidbit_dict)
# Order entries in the activity_list by "date_created"
activity_list_ordered = sorted(activity_list_with_comments, key=itemgetter('date_created'), reverse=True)
json_data = {
'status': status,
'success': True,
'activity_list': activity_list_ordered,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
|
4ab7d7e2cd5c0a3c812b6ec1729efda19fb87ced
| 3,643,281
|
import http
from typing import Optional
def edit_action(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.HttpResponse:
"""Invoke the specific edit view.
:param request: Request object
:param pk: Action PK
:param workflow: Workflow being processed,
:param action: Action being edited (set by the decorator)
:return: HTML response
"""
del pk
return services.ACTION_PROCESS_FACTORY.process_edit_request(
request,
workflow,
action)
|
43f128dfe2abd47c1c6cf78d667343d9086aeb98
| 3,643,283
|
from typing import Any
from typing import Set
from typing import KeysView
def to_set(data: Any) -> Set[Any]:
"""Convert data to a set. A single None value will be converted to the empty set.
```python
x = fe.util.to_set(None) # set()
x = fe.util.to_set([None]) # {None}
x = fe.util.to_set(7) # {7}
x = fe.util.to_set([7, 8]) # {7,8}
x = fe.util.to_set({7}) # {7}
x = fe.util.to_set((7)) # {7}
```
Args:
data: Input data, within or without a python container. The `data` must be hashable.
Returns:
The input `data` but inside a set instead of whatever other container type used to hold it.
"""
if data is None:
return set()
if not isinstance(data, set):
if isinstance(data, (tuple, list, KeysView)):
data = set(data)
else:
data = {data}
return data
|
df2649d0b7c7c2323984edd3eeea76eff0eab4d2
| 3,643,284
|
def Pei92(wavelength, Av, z, Rv=-99.0, ext_law="smc", Xcut=False):
"""
Extinction laws from Pei 1992 article
Parameters
----------
wavelength: `array` or `float`
wavlength in angstroms
Av: `float`
amount of extinction in the V band
z: `float`
redshift
Rv: `float`, optional, default: -99.
selective attenuation Rv = Av / E(B-V)
if 'd-99.' set values by default from article
if a float is given, use this value instead
ext_law: `str`
type of extinction law to use.
Choices: mw, lmc, smc
Xcut: `boolean`, optional, default: False
Whether to set attenuation to 0 for wavelength below 700 angstrom
Useful when coupling with X-ray data
Returns
-------
[Alambda_over_Av, Trans_dust]
Alambda_over_Av : `array`
atteanuation as a function of wavelength normalise by Av
(attenuation in V band)
Trans_dust: `array`
transmission through dust as a function of wavelength
"""
wvl = wavelength * 1e-4 / (1 + z)
if ext_law.lower() == "smc":
if Rv == -99.:
Rv = 2.93
a = [185, 27, 0.005, 0.010, 0.012, 0.03]
wvl_ = [0.042, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.50, -1.95, -1.95, -1.80, 0.0]
n = [2.0, 4.0, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "lmc":
if Rv == -99.:
Rv = 3.16
a = [175, 19, 0.023, 0.005, 0.006, 0.02]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.5, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "mw":
if Rv == -99.:
Rv = 3.08
a = [165, 14, 0.045, 0.002, 0.002, 0.012]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 4.0, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 6.5, 2.0, 2.0, 2.0, 2.0]
sums = np.zeros(len(wvl))
for i in range(len(a)):
sums += a[i] / ((wvl / wvl_[i]) ** n[i] + (wvl_[i] / wvl) ** n[i] + b[i])
# Need to check whether extrapolation is needed
# outside the range defined in Pei92
# convert Alambda_over_Ab to Alambda_over_Av
Alambda_over_Av = (1.0 / Rv + 1.0) * sums
# Applied a cut for wavelength below 700 angstrom
# Useful when coupling with Xray data
if Xcut:
w = np.where(wvl < 0.07)
Alambda_over_Av[w] = 0
# Return optical depth due to dust reddening in funtion of wavelength
Tau_dust = Av * Alambda_over_Av / 1.086
Trans_dust = np.exp(-Tau_dust)
Trans_dust[Trans_dust < 0] = 0
Trans_dust[Trans_dust > 1] = 1
return [Alambda_over_Av, Trans_dust]
|
9b0b9690f548319ffed7fcc964dc0e651828371f
| 3,643,285
|
import mimetypes
def get_mimetype(path):
"""
Get (guess) the mimetype of a file.
"""
mimetype, _ = mimetypes.guess_type(path)
return mimetype
|
7677259fcdf052f9647fe41e4b4cb71d83ea50cd
| 3,643,287
|
import select
async def read_clients_epics(
client_id: int = None, session: Session = Depends(get_session)
):
"""Get epics from a client_id"""
statement = (
select(Client.id, Client.name, Epic.name)
.select_from(Client)
.join(Epic)
.where(Client.id == client_id)
)
results = session.exec(statement).all()
return results
|
e5af5d2776a941cde83ea341143732bcdb67da2a
| 3,643,288
|
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the Luhn algorithm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
# Letter? It's an interimspersonnummer and we substitute the letter
# with 1.
if c.isalpha():
c = 1
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s // 10) + 1) * 10) - s
|
bbf0a9fa7f6ed2c2bfc414173fd2ac9e9c1d8835
| 3,643,289
|
def date_loss_l1(pred,
target_min,
target_max,
mask):
"""L1 loss function for dates."""
pred = jnp.squeeze(pred, 0)
loss = 0.
loss += jnp.abs(pred - target_min) * jnp.less(pred, target_min).astype(
pred.dtype)
loss += jnp.abs(pred - target_max) * jnp.greater(pred, target_max).astype(
pred.dtype)
# Mask loss
loss = jnp.multiply(loss, mask.astype(loss.dtype))
return loss
|
12f0d5a1f7efbb8d51501c4d3fe41d192528010d
| 3,643,290
|
def new_single_genres(genres, val):
"""Takes the genres list and returns only one genre back if multiple genres are present
Also has the parameter val with values "high" and "low"
High picks the genres belonging to the existing genres with the highest examples count
Low picks the genres belonging to the existing genres with the least examples count"""
genres_file = "fma_metadata/genres.csv"
reference_genres = pd.read_csv(genres_file)
reference_tracks = reference_genres.iloc[:, 1]
reference_genres = reference_genres.iloc[:, 0]
for index, genre in genres.items():
split = genre.split(",")
if len(split) == 1:
new_genre = split[0]
new_genre = new_genre.strip("[]")
genres[index] = int(new_genre)
elif len(split) > 1:
new_genre = [int(item.strip(" [ ] ")) for item in split]
count = {}
for indices, value in reference_genres.items():
if value in new_genre:
count[value] = reference_tracks[indices]
counts = {k: v for k, v in sorted(count.items(), key=lambda item: item[1])}
if val == "high":
genres[index] = int(list(counts.keys())[-1])
elif val == "low":
genres[index] = int(list(counts.keys())[0])
print("The shape of genres after single is:{}".format(genres.shape))
genres = genres.astype('int')
return genres
|
0cabcb93548007b3cd87dc40740da5d0f4614867
| 3,643,291
|
def roparameter(cosphi, hist, s_cosphi=0.25):
"""
...
Parameters
----------
cosphi : ...
hist : ...
s_cosphi : ...
Returns
-------
...
"""
perp=(np.abs(cosphi)>1.-s_cosphi).nonzero()
para=(np.abs(cosphi)<s_cosphi).nonzero()
xi=(np.sum(hist[para])-np.sum(hist[perp]))/float(np.sum(hist[para])+np.sum(hist[perp]))
return xi
|
c9f30db90482600c50a9369139fc75c046d57e40
| 3,643,293
|
def polyfill_bbox(
min_lng, max_lng, min_lat, max_lat, min_resolution=0, max_resolution=30
):
"""Polyfill a planar bounding box with compact s2 cells between resolution levels"""
check_valid_polyfill_resolution(min_resolution, max_resolution)
rc = s2sphere.RegionCoverer()
rc.min_level = min_resolution
rc.max_level = max_resolution
lower_left = s2sphere.LatLng(radians(min_lat), radians(min_lng))
upper_right = s2sphere.LatLng(radians(max_lat), radians(max_lng))
rect = s2sphere.LatLngRect(lower_left, upper_right)
cell_ids = [int(uint64_to_int64(cell.id())) for cell in rc.get_covering(rect)]
cell_ids_str = '[' + ','.join([str(id) for id in cell_ids]) + ']'
return cell_ids_str
|
d6c2cb0d3f0d7a9eea05a456beda96a1a646e306
| 3,643,295
|
from lvmspec.qa import qalib
import copy
def qa_skysub(param, frame, skymodel, quick_look=False):
"""Calculate QA on SkySubtraction
Note: Pixels rejected in generating the SkyModel (as above), are
not rejected in the stats calculated here. Would need to carry
along current_ivar to do so.
Args:
param : dict of QA parameters : see qa_frame.init_skysub for example
frame : lvmspec.Frame object; Should have been flat fielded
skymodel : lvmspec.SkyModel object
quick_look : bool, optional
If True, do QuickLook specific QA (or avoid some)
Returns:
qadict: dict of QA outputs
Need to record simple Python objects for yaml (str, float, int)
"""
#- QAs
#- first subtract sky to get the sky subtracted frame. This is only for QA. Pipeline does it separately.
tempframe=copy.deepcopy(frame) #- make a copy so as to propagate frame unaffected so that downstream pipeline uses it.
subtract_sky(tempframe,skymodel) #- Note: sky subtract is done to get residuals. As part of pipeline it is done in fluxcalib stage
# Sky residuals first
qadict = qalib.sky_resid(param, tempframe, skymodel, quick_look=quick_look)
# Sky continuum
if not quick_look: # Sky continuum is measured after flat fielding in QuickLook
channel = frame.meta['CAMERA'][0]
wrange1, wrange2 = param[channel.upper()+'_CONT']
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(frame,wrange1,wrange2)
qadict["SKYFIBERID"] = skyfiber.tolist()
qadict["SKYCONT"] = skycont
qadict["SKYCONT_FIBER"] = meancontfiber
if quick_look: # The following can be a *large* dict
qadict_snr = qalib.SignalVsNoise(tempframe,param)
qadict.update(qadict_snr)
return qadict
|
3b53f99ae4936fa6870dc8020d677ffddcb2d4ef
| 3,643,296
|
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
|
4b783adad975246c9d6722f6eeeb95a2388d1823
| 3,643,297
|
def gesv(a, b):
"""Solve a linear matrix equation using cusolverDn<t>getr[fs]().
Computes the solution to a system of linear equation ``ax = b``.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(M)`` or ``(M, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != a.shape[1]:
raise ValueError('a must be a square matrix.')
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = _numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
helper = getattr(_cusolver, t + 'getrf_bufferSize')
getrf = getattr(_cusolver, t + 'getrf')
getrs = getattr(_cusolver, t + 'getrs')
n = b.shape[0]
nrhs = b.shape[1] if b.ndim == 2 else 1
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = _cupy.asfortranarray(a, dtype=dtype)
b = _cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
handle = _device.get_cusolver_handle()
dipiv = _cupy.empty(n, dtype=_numpy.int32)
dinfo = _cupy.empty(1, dtype=_numpy.int32)
lwork = helper(handle, n, n, a.data.ptr, n)
dwork = _cupy.empty(lwork, dtype=a.dtype)
# LU factrization (A = L * U)
getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr,
dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrf, dinfo)
# Solves Ax = b
getrs(handle, _cublas.CUBLAS_OP_N, n, nrhs, a.data.ptr, n,
dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrs, dinfo)
return b
|
333f06bd8f91bdfde5526c80894c284580074bb5
| 3,643,298
|
def del_none(d):
"""
Delete dict keys with None values, and empty lists, recursively.
"""
for key, value in d.items():
if value is None or (isinstance(value, list) and len(value) == 0):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
|
46cf9e331c633f5f69b980f3b10c96306d3478c2
| 3,643,299
|
def aes_block(ciphertext, key):
"""Uses the AES algorithm in ECB mode to decrypt a 16-byte ciphertext
block with a given key of the same length.
Keyword arguments:
ciphertext -- the byte string to be decrypted
key -- the byte string key
"""
if len(ciphertext) != 16:
raise ValueError("The ciphertext can only be one block (16 bytes).")
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)
decryptor = cipher.decryptor()
return decryptor.update(ciphertext) + decryptor.finalize()
|
b0b894bcf860c92b46235ce45b8fd6c8c045b1ca
| 3,643,300
|
import logging
def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel is not None:
logger.setLevel(logging.NOTSET)
return logger
|
f4ae90925e8bd20a63997e2e5e04924aeeafbcaa
| 3,643,301
|
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
Split text to chunks by entered length.
Example:
```python
text = "ABCDEFGHIJKLM"
result = split_metadata_string(text, 3)
print(result)
>>> ['ABC', 'DEF', 'GHI', 'JKL']
```
Args:
text (str): Text that will be split into chunks.
chunk_length (int): Single chunk size. Default chunk_length is
set to global variable `TVPAINT_CHUNK_LENGTH`.
Returns:
list: List of strings with at least one item.
"""
if chunk_length is None:
chunk_length = TVPAINT_CHUNK_LENGTH
chunks = []
for idx in range(chunk_length, len(text) + chunk_length, chunk_length):
start_idx = idx - chunk_length
chunks.append(text[start_idx:idx])
return chunks
|
c2e97aa768f64f02ef1a691dfadce3dd9fe5538a
| 3,643,302
|
def load_kimmel_data(root_data_path,
flag_size_factor=True, total_ct_per_cell=1e4,
flag_log1p=True):
"""Load normalized data from Kimmel et al, GR, 2019
1. Size factor normalization to counts per 1 million (total_ct_per_cell)
2. log(x+1) transform
Args:
file_path (str): file path. Should contain ./Kimmel_GR_2019_data
Returns:
adata_combine (AnnData): Combined data for kidney, lung, and spleen
"""
# Load filtered data
file_path=root_data_path+'/Kimmel_GR_2019_data'
adata_kidney = read_h5ad(file_path + '/kidney.h5ad')
adata_lung = read_h5ad(file_path + '/lung.h5ad')
adata_spleen = read_h5ad(file_path + '/spleen.h5ad')
# Size factor normalization
if flag_size_factor == True:
sc.pp.normalize_per_cell(adata_kidney, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_lung, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_spleen, counts_per_cell_after=total_ct_per_cell)
# log(x+1) transform
if flag_log1p == True:
sc.pp.log1p(adata_kidney)
sc.pp.log1p(adata_lung)
sc.pp.log1p(adata_spleen)
# Combine data
adata = adata_kidney.concatenate(adata_lung, adata_spleen,
batch_key='batch_combine', join='inner')
adata.obs['tissue'] = ''
adata.obs.loc[adata.obs['batch_combine']=='0', 'tissue'] = 'Kidney'
adata.obs.loc[adata.obs['batch_combine']=='1', 'tissue'] = 'Lung'
adata.obs.loc[adata.obs['batch_combine']=='2', 'tissue'] = 'Spleen'
adata.obs['sex'] = 'male'
adata.obs['age_old'] = adata.obs['age'].values.copy()
adata.obs['age'] = ['7m' if x=='young' else '22m' for x in adata.obs['age_old']]
adata.obs['age_num'] = [7 if x=='young' else 22 for x in adata.obs['age_old']]
return adata
|
324f5779c180811db0b9316125553f7089d5a34b
| 3,643,303
|
import requests
def get_coupon_page() -> bytes:
"""
Gets the coupon page HTML
"""
try:
response = requests.get(COUPONESE_DOMINOS_URL)
return response.content
except RequestException as e:
bot.logger.error(e.response.content)
return None
|
919cd65ec9e4f0af7b06a79c8aa962f164fb7af6
| 3,643,304
|
def get_program_similarity(fingerprint_a, fingerprint_b):
"""Find similarity between fingerprint of two programs.
A fingerprint is a subset of k-gram hashes generated from program. Each of
the k-gram hashes is formed by hashing a substring of length K and hence
fingerprint is indirectly based on substrings of a program. Fingerprint acts
as identity of the program and can be used to compare two programs.
Args:
fingerprint_a: list((int, int)). Fingerprint of first program. First
integer stores the fingerprint hash value and 2nd integer stores
location in the program where fingerprint is present.
fingerprint_b: list((int, int)). Fingerprint of second program.
Returns:
float. Similarity between first and second program.
"""
multiset_a = [h for (h, _) in fingerprint_a]
multiset_b = [h for (h, _) in fingerprint_b]
return calc_jaccard_index(multiset_a, multiset_b)
|
c3cc3def2d17657c266e09ce5b05da773e1f6f1a
| 3,643,305
|
def linear_transformation(x, y_min, y_max):
"""
x : the range to be transformed
y_min, y_max : lower and upper boundaries for the range into which x
is transformed to
Returns y = f(x), f(x) = m * x + b
"""
x_min = np.min(x)
x_max = np.max(x)
if x_min == x_max:
x_max = x_min * 1.0001
return (y_min + (y_max - y_min) / (x_max - x_min) * (x - x_min))
|
ddd6da6b006888a43711dc391948ffce96bd0a81
| 3,643,308
|
import random
def resize_image(image, desired_width=768, desired_height=384, random_pad=False):
"""Resizes an image keeping the aspect ratio mostly unchanged.
Returns:
image: the resized image
window: (x1, y1, x2, y2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [left, top, right, bottom]
"""
# Default window (x1, y1, x2, y2) and default scale == 1.
w, h = image.size
width_scale = desired_width / w
height_scale = desired_height / h
scale = min(width_scale, height_scale)
# Resize image using bilinear interpolation
if scale != 1:
image = functional.resize(image, (round(h * scale), round(w * scale)))
w, h = image.size
y_pad = desired_height - h
x_pad = desired_width - w
top_pad = random.randint(0, y_pad) if random_pad else y_pad // 2
left_pad = random.randint(0, x_pad) if random_pad else x_pad // 2
padding = (left_pad, top_pad, x_pad - left_pad, y_pad - top_pad)
assert all([x >= 0 for x in padding])
image = functional.pad(image, padding)
window = [left_pad, top_pad, w + left_pad, h + top_pad]
return image, window, scale, padding
|
9744bb52c58e1049c8cbd6ce9e1f1864f64ac3c5
| 3,643,309
|
def get_state(*names):
"""
Return a list of the values of the given state keys
Paramters
---------
*names : *str
List of name of state values to retreive
Returns
-------
[any, ...]
List of value matching the requested state property names
"""
_app = get_app_instance()
results = []
for name in names:
results.append(_app.get(name))
return results
|
8f863bdb9f578eb0e12731d1b752f197d4476a2c
| 3,643,310
|
def ver_datos_basicos(request, anexo_id):
"""
Visualización de los datos básicos de un anexo.
"""
anexo = __get_anexo(request, anexo_id)
parts = anexo.get_cue_parts()
return my_render(request, 'registro/anexo/ver_datos.html', {
'template': 'registro/anexo/ver_datos_basicos.html',
'anexo': anexo,
'page_title': 'Datos básicos',
'actual_page': 'datos_basicos',
'configuracion_solapas': ConfiguracionSolapasAnexo.get_instance(),
'datos_verificados': anexo.get_verificacion_datos().get_datos_verificados()
})
|
63cb5222cad1fa702dd5bd2fc7a14c38f4b71d65
| 3,643,311
|
def fill_sections(source, sections):
"""
>>> fill_sections(\
' /* Begin User Code Section: foobar *//* End User Code Section: foobar */', {'foobar': 'barbaz'})
' /* Begin User Code Section: foobar */\\n barbaz\\n /* End User Code Section: foobar */'
"""
def repl(matches):
indent_amt = len(matches[1])
secname = matches[2]
return indent(create_section(secname, sections.get(secname, '') + '\n'), indent_amt)
return fill_section.sub(repl, source)
|
6a76826f45aa0880039e70ad6bb41aa93442976b
| 3,643,312
|
def CNOT(n):
"""CNOT gate on 2-Qubit system with control qubit = 0 and target qubit = 1"""
x=np.copy(I4)
t=np.copy(x[2,])
x[2,]=x[3,]
x[3,]=t
return x.dot(n)
|
af72004c9dd6f4a970e95d1da48a9d3776bd730b
| 3,643,313
|
import tokenize
def parse(s):
"""Parse a single string. This is just a convenience function."""
return pogo(parseSingleExpression(tokenize(s),
identity_cont))
|
9a7a2f4b2afd1daf22e6d2258e13ac9d13d380b3
| 3,643,314
|
def link_match_check(row):
"""
Indicating that link is already in database
"""
all_objects = Post.objects.all()
try:
row_link = row.a["href"]
for object_founded in all_objects:
return row_link == object_founded.link
except TypeError:
return False
|
2d55554248791b8edb5ec6080bc4c4f152a6a23a
| 3,643,315
|
def merge_s2_threshold(log_area, gap_thresholds):
"""Return gap threshold for log_area of the merged S2
with linear interpolation given the points in gap_thresholds
:param log_area: Log 10 area of the merged S2
:param gap_thresholds: tuple (n, 2) of fix points for interpolation
"""
for i, (a1, g1) in enumerate(gap_thresholds):
if log_area < a1:
if i == 0:
return g1
a0, g0 = gap_thresholds[i - 1]
return (log_area - a0) * (g1 - g0) / (a1 - a0) + g0
return gap_thresholds[-1][1]
|
36dd06c8af828e3dc2ef5f1048046039feaa6c21
| 3,643,316
|
def rename_indatabet_cols(df_orig):
"""
"""
df = df_orig.copy(deep=True)
odds_cols = {'odds_awin_pinn': 'awinOddsPinnIndatabet',
'odds_draw_pinn': 'drawOddsPinnIndatabet',
'odds_hwin_pinn': 'hwinOddsPinnIndatabet',
'odds_awin_bet365': 'awinOddsBet365Indatabet',
'odds_draw_bet365': 'drawOddsBet365Indatabet',
'odds_hwin_bet365': 'hwinOddsBet365Indatabet',
'odds_ftgoalso2.5_bet365': 'ftGoalsO2.5OddsBet365Indatabet',
'odds_ftgoalsu2.5_bet365': 'ftGoalsU2.5OddsBet365Indatabet',
'odds_ftgoalso2.5_pinn': 'ftGoalsO2.5OddsPinnIndatabet',
'odds_ftgoalsu2.5_pinn': 'ftGoalsU2.5OddsPinnIndatabet'}
df.rename(columns=odds_cols, inplace=True)
return df
|
a07e7c9757e1b207528f7b7fda63e06a1dced47a
| 3,643,317
|
import urllib
def get_market_updates(symbols, special_tags):
"""
Get current yahoo quote.
'special_tags' is a list of tags. More info about tags can be found at
http://www.gummy-stuff.org/Yahoo-data.htm
Returns a DataFrame
"""
if isinstance(symbols, str):
sym_list = symbols
elif not isinstance(symbols, pd.Series):
symbols = pd.Series(symbols)
sym_list = str.join('+', symbols)
else:
sym_list = str.join('+', symbols)
# Symbol must be in the special_tags for now
if not 's' in special_tags:
special_tags.insert(0, 's')
request = ''.join(special_tags) # code request string
special_tag_names = [settings.YAHOO_SYMBOL_TAGS[x] for x in special_tags]
header = special_tag_names
data = dict(list(zip(
list(special_tag_names), [[] for i in range(len(special_tags))]
)))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (
sym_list, request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e)
print(s)
return None
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
data[header[i]].append(float(field.strip('"%')))
elif field[0] == '"':
data[header[i]].append(field.strip('"'))
else:
try:
data[header[i]].append(float(field))
except ValueError:
data[header[i]].append(np.nan)
idx = data.pop('Symbol')
return pd.DataFrame(data, index=idx)
|
d3dd970ef513a131147cc687cb9ad2076ee0b0ff
| 3,643,318
|
def HLRBRep_SurfaceTool_Torus(*args):
"""
:param S:
:type S: Standard_Address
:rtype: gp_Torus
"""
return _HLRBRep.HLRBRep_SurfaceTool_Torus(*args)
|
46aa63882557b1a2e13cb245f81fcf9871903a18
| 3,643,319
|
def load_augmentation_class():
"""
Loads the user augmentation class.
Similar in spirit to django.contrib.auth.load_backend
"""
try:
class_name = AUTH.USER_AUGMENTOR.get()
i = class_name.rfind('.')
module, attr = class_name[:i], class_name[i + 1:]
mod = import_module(module)
klass = getattr(mod, attr)
LOG.info("Augmenting users with class: %s" % (klass,))
return klass
except:
LOG.exception('failed to augment class')
raise ImproperlyConfigured("Could not find user_augmentation_class: %s" % (class_name,))
|
16f737a2687e0b2e5002982adcafef9c32c82e36
| 3,643,320
|
def FieldTypeFor(descriptor, field_desc, nullable):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
nullable: Whether or not the value may be null.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = {
descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name),
}[field_desc.type]()
# However, if the field is actually a reference to a tagspec name (string),
# make it a number instead as we'll be replacing this with the tagspec id.
if field_desc.full_name in TAG_NAME_REFERENCE_FIELD:
element_type = 'number'
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if nullable:
return 'Array<!%s>' % element_type
return '!Array<!%s>' % element_type
if nullable:
return '?%s' % element_type
return '%s' % element_type
|
0e2c2d48dc22d209053d06fda354e4df9912144a
| 3,643,321
|
def unadmin(bot, input):
"""Removes person from admins list, owner only"""
if not input.owner: return False
bot.config.set_del('admins',input.group(2).lower())
bot.reply("Unadmin'd {0}".format(input.group(2)))
|
1a74ab0a3d3d1b41dd6d1f065b71a48557af84ed
| 3,643,322
|
def sim_beta_ratio(table, threshold, prior_strength, hyperparam, N,
return_bayes=False):
"""
Calculates simulated ratios of match probabilites using a beta
distribution and returns corresponding means and 95% credible
intervals, posterior parameters, Bayes factor
Parameters
------------
table : 2x2 numpy array
corresponds to contingency table,
for example,
False True
GroupA 5 4
GroupB 3 4
contains frequency counts: [[5, 4], [3, 4]]
threshold : float
value to split continuous variable on
prior_strength : string from {'weak', 'strong', 'uniform'}
prior distribution to be 'informative'/'noninformative'/'uniform'
N : int
number of posterior samples to draw for each simulation
Returns
------------
list : means and 95% credible intervals, posterior parameters, Bayes factor
"""
n_sim = N
# store array of total counts in table by category
category_counts = table.sum(axis=1, dtype=float)
# store array of number of matches by categories
match_counts = table[:, 1]
# set hyperparameters according to threshold and sample size
if prior_strength == 'weak':
# weakly informative prior, has standard deviation
# of 0.1 at alpha / (alpha + beta) = 0.5
# coefficient 24 is empirically derived for best smoothing at small N
alpha1, beta1 = (1 - threshold) * 24., threshold * 24.
alpha2, beta2 = (1 - threshold) * 24., threshold * 24.
elif prior_strength == 'strong':
# observing 'idealized' dataset of size n
alpha1 = round((1 - threshold) * category_counts[0])
beta1 = round(threshold * category_counts[0])
alpha2 = round((1 - threshold) * category_counts[1])
beta2 = round(threshold * category_counts[1])
elif prior_strength == 'uniform':
# uniform prior
alpha1, beta1 = 1, 1
alpha2, beta2 = 1, 1
else:
# user specified, defaults to uniform
alpha1, beta1, alpha2, beta2 = hyperparam
# draw posterior sample of matching probabilities
post_alpha1 = alpha1 + match_counts[0]
post_beta1 = beta1 + category_counts[0] - match_counts[0]
post_alpha2 = alpha2 + match_counts[1]
post_beta2 = beta2 + category_counts[1] - match_counts[1]
p1 = np.random.beta(post_alpha1, post_beta1, n_sim)
p2 = np.random.beta(post_alpha2, post_beta2, n_sim)
# posterior draw of ratios
p1p2 = p1 / p2
p2p1 = p2 / p1
sim_beta_ratio_metrics = [np.mean(p1p2), np.mean(p2p1),
np.std(p1p2), np.std(p2p1),
np.percentile(p1p2, 2.5),
np.percentile(p2p1, 2.5),
np.percentile(p1p2, 97.5),
np.percentile(p2p1, 97.5),
(post_alpha1, post_beta1),
(post_alpha2, post_beta2)]
if return_bayes:
# Return bayes factor for % of posterior ratios in range [.8, 1.25]
post_prob_null = np.sum((p1p2 >= 0.8) & (p1p2 <= 1.25)) / float(n_sim)
bayes_factor = post_prob_null / (1 - post_prob_null)
sim_beta_ratio_metrics.append(bayes_factor)
return sim_beta_ratio_metrics
|
01e61719dbecb89e40bdf2688578d493c951591c
| 3,643,323
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.