content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def isinteger(x):
"""
determine if a string can be converted to an integer
"""
try:
a = int(x)
except ValueError:
return False
else:
return True
|
180cea2f61733ada26b20ff046ae26deffa5d396
| 3,640,639
|
from typing import Tuple
from typing import List
def unmarshal_tools_pcr_values(
buf: bytes, selections: TPML_PCR_SELECTION
) -> Tuple[int, List[bytes]]:
"""Unmarshal PCR digests from tpm2_quote using the values format.
Args:
buf (bytes): content of tpm2_quote PCR output.
selections (TPML_PCR_SELECTION): The selected PCRs.
Returns:
A tuple of the number of bytes consumed from buf and a list of digests.
"""
trs = list()
for sel in selections:
digsize = _get_digest_size(sel.hash)
pb = bytes(reversed(bytes(sel.pcrSelect)))
pi = int.from_bytes(pb, "big")
for i in range(0, sel.sizeofSelect * 8):
if pi & (1 << i):
trs.append(digsize)
n = 0
digs = list()
for s in trs:
dig = buf[:s]
n += s
digs.append(dig)
buf = buf[s:]
return n, digs
|
3a5b9dd36ca787026bb9bade4b5e5cc175add9e9
| 3,640,640
|
def new_topic(request):
"""添加新主题"""
if request.method != 'POST':
#未提交数据,创建一个新表单
form = TopicForm()
else:
#POST提交的数据,对数据进行处理
form = TopicForm(request.POST)
if form.is_valid():
new_topic = form.save(commit = False)
new_topic.owner = request.user
new_topic.save()
form.save()
return HttpResponseRedirect(reverse('learning_logs:topics'))
context = {'form':form}
return render(request,'learning_logs/new_topic.html',context)
|
8d41cb1926d809742e89ec7e79ef7bd1ed14443c
| 3,640,641
|
from typing import Union
from typing import List
from typing import Any
from typing import Optional
def address(lst: Union[List[Any], str], dim: Optional[int] = None) -> Address:
"""
Similar to :meth:`Address.fromList`, except the name is shorter, and
the dimension is inferred if possible. Otherwise, an exception is thrown.
Here are some examples:
>>> address('*')
Address(*, 0)
>>> address([['*'], [], ['*', '*']])
Address([[*][][**]], 2)
"""
def dimension(k: Any) -> Optional[int]:
"""
Tries to infer the dimension.
"""
if k == []:
return None
elif k == '*':
return 0
elif isinstance(k, list):
i = None # type: Optional[int]
for a in k:
j = dimension(a)
if i is None:
i = j
elif j is not None and i != j: # Contradictory dim inferrences
return None
if i is None:
return None
else:
return i + 1
else:
raise NotImplementedError("[Address from list] Incompatible type: "
"a list representation of an address "
"(LA) for short, is either the string "
"'*', or a list of LA")
if isinstance(lst, str):
if lst == '*':
return Address.epsilon(0)
else:
raise DerivationError(
"Address from list",
"The following expression does not represent an address: "
"{lst}",
lst=lst)
elif dim is not None:
return Address.fromList(lst, dim)
d = dimension(lst)
if d is None:
raise DerivationError("Address from list",
"Cannot infer dimension of list {lst}",
lst=lst)
else:
return Address.fromList(lst, d)
|
ef9e21bb3ef98b12c8ad02c75ccf2cbf6552fd44
| 3,640,642
|
import io
def create_scenario_dataframes_geco(scenario):
"""
Reads GECO dataset and creates a dataframe of the given scenario
"""
df_sc = pd.read_csv(io["scenario_geco_path"])
df_sc_europe = df_sc.loc[df_sc["Country"] == "EU28"]
df_scenario = df_sc_europe.loc[df_sc_europe["Scenario"] == scenario]
return df_scenario
|
8a17d452feeb506bc2c2bf61a91e8473f2097649
| 3,640,644
|
def escape_env_var(varname):
"""
Convert a string to a form suitable for use as an environment variable.
The result will be all uppercase, and will have all invalid characters
replaced by an underscore.
The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]*
Example:
"my.private.registry/cat/image" will become
"MY_PRIVATE_REGISTRY_CAT_IMAGE"
"""
varname = list(varname.upper())
if not varname[0].isalpha():
varname[0] = "_"
for i, c in enumerate(varname):
if not c.isalnum() and c != "_":
varname[i] = "_"
return "".join(varname)
|
c1e57ff3b9648e93a540202f00d0325f91bccde1
| 3,640,645
|
def rms(signal):
"""
rms(signal)
Measures root mean square of a signal
Parameters
----------
signal : 1D numpy array
"""
return np.sqrt(np.mean(np.square(signal)))
|
6643ad464b4048ad71c7fb115e97b42d58a84a9c
| 3,640,646
|
def get_config(
config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"
):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode: a config object
"""
cfg_file = get_config_file(config_path)
runner = create_runner(runner)
cfg = runner.get_default_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
|
e6d2b57bcadd833d625bd0a291fbb8de9d333624
| 3,640,648
|
from typing import Optional
def make_game(
width: int = defaults.WIDTH,
height: int = defaults.HEIGHT,
max_rooms: int = defaults.MAX_ROOMS,
seed: Optional[int] = defaults.SEED,
slippery_coefficient: float = defaults.SLIPPERY_COEFFICIENT,
default_reward: float = defaults.DEFAULT_REWARD,
goal_reward: float = defaults.GOAL_REWARD,
catastrophe_reward: float = defaults.CATASTROPHE_REWARD,
) -> Engine:
"""Builds a gridworld `pycolab` game.
Args:
Returns:
A `pycolab` game.
"""
maze = labmaze.RandomMaze(
width=width,
height=height,
max_rooms=max_rooms,
random_seed=seed,
spawns_per_room=1,
spawn_token="P",
objects_per_room=1,
object_token="G",
)
# Keep only one agent position.
agent_positions = np.asarray(np.where(maze.entity_layer == "P"))
I_p = np.random.choice(agent_positions.shape[-1])
maze.entity_layer[maze.entity_layer == "P"] = " "
maze.entity_layer[tuple(agent_positions[:, I_p])] = "P"
# Keep only one goal.
goal_positions = np.asarray(np.where(maze.entity_layer == "G"))
I_g, I_c = np.random.choice(goal_positions.shape[-1], size=2, replace=False)
maze.entity_layer[maze.entity_layer == "G"] = " "
maze.entity_layer[tuple(goal_positions[:, I_g])] = "G"
maze.entity_layer[tuple(goal_positions[:, I_c])] = "C"
art = str(maze.entity_layer).split("\n")[:-1]
sprites = {
"P":
ascii_art.Partial(
AgentSprite,
default_reward=default_reward,
slippery_coefficient=slippery_coefficient,
seed=seed,
)
}
drapes = {
"G":
ascii_art.Partial(
BoxDrape,
reward=goal_reward,
terminal=True,
),
"C":
ascii_art.Partial(
BoxDrape,
reward=catastrophe_reward,
terminal=True,
)
}
return ascii_art.ascii_art_to_game(
art,
what_lies_beneath=" ",
sprites=sprites,
drapes=drapes,
)
|
908c772cdc3af5a891bce8c169d744e335da6e61
| 3,640,649
|
def weighted_var(x, weights=None):
"""Unbiased weighted variance (sample variance) for the components of x.
The weights are assumed to be non random (reliability weights).
Parameters
----------
x : np.ndarray
1d or 2d with observations in rows
weights : np.ndarray or None
1d array of weights. None defaults to standard variance.
Returns
-------
s2 : np.array
1d vector of component variances
References
----------
[1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
"""
if weights is None:
weights = np.ones(len(x))
V_1 = np.sum(weights)
V_2 = np.sum(weights ** 2)
xbar = np.average(x, weights=weights, axis=0)
numerator = weights.dot((x - xbar) ** 2)
s2 = numerator / (V_1 - (V_2 / V_1))
return s2
|
2166b214351da22117bf395fda950f1c79ccf0d1
| 3,640,650
|
def start_detailed_result_worker_route():
"""
Add detailed result worker if not exist
:return: JSON
"""
# check if worker already exist
if check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME) == env.HTML_STATUS.OK.value:
return jsonify(status=env.HTML_STATUS.OK.value)
if 'db_name' in request.json:
db_name = request.json["db_name"]
else:
return jsonify(status=env.HTML_STATUS.ERROR.value, mesasge="No database selected")
Process(target=start_result_worker, args=(RABBITMQ_DETAILED_RESULT_QUEUE_NAME,
DB_DETAILED_RESULT_COLLECTION_NAME, db_name)).start()
return jsonify(status=env.HTML_STATUS.OK.value, detailed_result_worker=check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME))
|
567595574a09ce99f3feb11988bed0ba403b04cd
| 3,640,651
|
def _find_next_pickup_item(not_visited_neighbors, array_of_edges_from_node):
"""
Args:
not_visited_neighbors:
array_of_edges_from_node:
Returns:
"""
# last node in visited_nodes is where the traveling salesman is.
cheapest_path = np.argmin(
array_of_edges_from_node[not_visited_neighbors])
return not_visited_neighbors[cheapest_path]
|
06dc80e09d5b87cbc94558bee53677c887844b4b
| 3,640,652
|
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
"""
:api_attr: Static Graph
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
|
0dce5c2333a0a3dcaa568a85f3a6dec1536d2cfb
| 3,640,653
|
def ieee():
"""IEEE fixture."""
return t.EUI64.deserialize(b"ieeeaddr")[0]
|
b00b13bb16c74bc96e52ad067dc0c523f5b5a249
| 3,640,654
|
def is_in(a_list):
"""Returns a *function* that checks if its argument is in list.
Avoids recalculation of list at every comparison."""
def check(arg): return arg in a_list
return check
|
34afbc269c164f0e095b1cbbf4e9576bafc7a9e1
| 3,640,655
|
def get_log_record_extra_fields(record):
"""Taken from `common` repo logging module"""
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra',
'stack_info', 'exc_type', 'exc_msg')
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
fields[key] = repr(value)
return fields
|
95fe6a74cd169c14ac32728f0bb1d16a2aa9e874
| 3,640,656
|
def ldap_is_intromember(member):
"""
:param member: A CSHMember instance
"""
return _ldap_is_member_of_group(member, 'intromembers')
|
d858afac4870cacc18be79a0b2d6d7d51dd33e07
| 3,640,657
|
def details(request, slug):
"""
Show product set
"""
productset = get_object_or_404(models.ProductSet, slug=slug)
context = {}
response = []
variant_instances = productset.variant_instances()
signals.product_view.send(
sender=type(productset), instances=variant_instances,
request=request, response=response, extra_context=context)
if len(response) == 1:
return response[0]
elif len(response) > 1:
raise ValueError, "Multiple responses returned."
context['variants'] = variant_instances
context['productset'] = productset
return direct_to_template(request,
'satchless/productset/details.html',
context)
|
9ac9b3f975a6501cfb94dd5d545c29c63a47a125
| 3,640,658
|
def applies(platform_string, to='current'):
""" Returns True if the given platform string applies to the platform
specified by 'to'."""
def _parse_component(component):
component = component.strip()
parts = component.split("-")
if len(parts) == 1:
if parts[0] in VALID_PLATFORMS_FILTER:
return parts[0], None
elif parts[0] in _ARCHBITS_TO_ARCH:
return "all", parts[0]
else:
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
elif len(parts) == 2:
if (
parts[0] not in VALID_PLATFORMS_FILTER
or parts[1] not in _ARCHBITS_TO_ARCH
):
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
return parts[0], parts[1]
else:
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
def _are_compatible(short_left, short_right):
return short_left == short_right or \
short_left == "rh" and short_right.startswith("rh") \
or short_right == "rh" and short_left.startswith("rh") \
or short_left == "all"
if isinstance(to, str):
if to == 'current':
full = EPDPlatform.from_running_system()
to_platform = full.platform_name
to_arch_bits = full.arch_bits
elif '-' in to:
full = EPDPlatform.from_epd_string(to)
to_platform = full.platform_name
to_arch_bits = full.arch_bits
else:
if not (to in PLATFORM_NAMES or to == 'rh'):
raise ValueError("Invalid 'to' argument: {0!r}".format(to))
to_platform = to
to_arch_bits = None
else:
to_platform = to.platform_name
to_arch_bits = to.arch_bits
conditions = []
platform_string = platform_string.strip()
if platform_string.startswith("!"):
invert = True
platform_string = platform_string[1:]
else:
invert = False
platform_strings = [s for s in platform_string.split(",")]
for platform_string in platform_strings:
short, bits = _parse_component(platform_string)
if _are_compatible(short, to_platform):
if bits is None:
conditions.append(True)
else:
conditions.append(bits == to_arch_bits or to_arch_bits is None)
else:
conditions.append(False)
if invert:
return not any(conditions)
else:
return any(conditions)
|
4692fb0d302948e07a1b2586f614dfcfa5618503
| 3,640,659
|
import threading
def _GetClassLock(cls):
"""Returns the lock associated with the class."""
with _CLASS_LOCKS_LOCK:
if cls not in _CLASS_LOCKS:
_CLASS_LOCKS[cls] = threading.Lock()
return _CLASS_LOCKS[cls]
|
98b034a0984431a752801407bfbc5e5694ad44ae
| 3,640,660
|
from apysc._expression import event_handler_scope
def _get_expression_table_name() -> TableName:
"""
Get a expression table name. This value will be switched whether
current scope is event handler's one or not.
Returns
-------
table_name : str
Target expression table name.
"""
event_handler_scope_count: int = \
event_handler_scope.get_current_event_handler_scope_count()
if event_handler_scope_count == 0:
return TableName.EXPRESSION_NORMAL
return TableName.EXPRESSION_HANDLER
|
61d3207d51264e876a472cbd2eea43de730508ac
| 3,640,661
|
import gc
import time
import joblib
def make_inference(input_data, model):
"""
input_data is assumed to be a pandas dataframe, and model uses standard sklearn API with .predict
"""
input_data['NIR_V'] = m.calc_NIR_V(input_data)
input_data = input_data.replace([np.nan, np.inf, -np.inf, None], np.nan)
input_data = input_data.dropna(subset=m.features)
gc.collect()
print(f'predicting on {len(input_data)} records')
t0 = time.time()
with joblib.parallel_backend('threading', n_jobs=8):
model.n_jobs = 8
input_data['biomass'] = model.predict(input_data)
t1 = time.time()
print(f'took {round(t1-t0)} seconds')
return input_data[['x', 'y', 'biomass']]
|
472c6eaadf0f9dd705e8600ccb4939d67f387a0e
| 3,640,663
|
def property_elements(rconn, redisserver, name, device):
"""Returns a list of dictionaries of element attributes for the given property and device
each dictionary will be set in the list in order of label
:param rconn: A redis connection
:type rconn: redis.client.Redis
:param redisserver: The redis server parameters
:type redisserver: namedtuple
:param name: The property name
:type name: String
:param device: The device name
:type device: String
:return: A list of element attributes dictionaries.
:rtype: List
"""
element_name_list = elements(rconn, redisserver, name, device)
if not element_name_list:
return []
element_dictionary_list = list( elements_dict(rconn, redisserver, elementname, name, device) for elementname in element_name_list )
# sort element_dictionary_list by label
element_dictionary_list.sort(key=_split_element_labels)
return element_dictionary_list
|
70ee3de0a18a84e9f21df341c685c1380a4ab164
| 3,640,664
|
def _dtype(a, b=None):
"""Utility for getting a dtype"""
return getattr(a, 'dtype', getattr(b, 'dtype', None))
|
c553851231f0c4be544e5f93738b43fa98e65176
| 3,640,665
|
def parse_garmin_tcx(filename):
""" Parses tcx activity file from Garmin Connect to Pandas DataFrame object
Args: filename (str) - tcx file
Returns: a tuple of id(str) and data(DataFrame)
DF columns=['time'(datetime.time), 'distance, m'(float), 'HR'(int),
'cadence'(int), 'speed, m/s'(int)]
"""
tree = etree.parse(str(filename))
# set namespaces for garmin tcx file
ns = {'ns0': '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}',
'ns3': '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}'}
id = to_datetime(tree.find('.//' + ns['ns0'] + 'Id').text).date()
trackpoints = tree.findall('.//' + ns['ns0'] + 'Trackpoint')
data = DataFrame(columns='time,distance,HR,speed,cadence,latitude,longitude,altitude'.split(','))
for n, trackpoint in enumerate(trackpoints):
data.loc[n, 'time'] = trackpoint.find('.//' + ns['ns0'] + 'Time').text
data.loc[n, 'distance'] = float(trackpoint.find('.//' + ns['ns0'] + 'DistanceMeters').text)
data.loc[n, 'altitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'AltitudeMeters').text)
data.loc[n, 'HR'] = int(trackpoint.find('.//' + ns['ns0'] + 'HeartRateBpm/').text)
try:
data.loc[n, 'latitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LatitudeDegrees').text)
except:
data.loc[n, 'latitude'] = nan
try:
data.loc[n, 'longitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LongitudeDegrees').text)
except:
data.loc[n, 'longitude'] = nan
try:
data.loc[n, 'speed'] = float(trackpoint.find('.//' + ns['ns3'] + 'Speed').text)
except:
data.loc[n, 'speed'] = nan
try:
data.loc[n, 'cadence'] = int(trackpoint.find('.//' + ns['ns3'] + 'RunCadence').text) * 2
except:
data.loc[n, 'cadence'] = nan
data.loc[:,'time'] = to_datetime(data['time'])
return (id, data)
|
bc8052850b9aa9fdab82de2814d38ae62aa298c6
| 3,640,666
|
def get_decay_fn(initial_val, final_val, start, stop):
"""
Returns function handle to use in torch.optim.lr_scheduler.LambdaLR.
The returned function supplies the multiplier to decay a value linearly.
"""
assert stop > start
def decay_fn(counter):
if counter <= start:
return 1
if counter >= stop:
return final_val / initial_val
time_range = stop - start
return 1 - (counter - start) * (1 - final_val / initial_val) / time_range
assert decay_fn(start) * initial_val == initial_val
assert decay_fn(stop) * initial_val == final_val
return decay_fn
|
d84c0f0305d239834429d83ba4bd5c6d6e945b69
| 3,640,667
|
from typing import Optional
async def is_logged(jwt_cookie: Optional[str] = Cookie(None, alias=config.login.jwt_cookie_name)):
"""
Check if user is logged
"""
result = False
if jwt_cookie:
try:
token = jwt.decode(
jwt_cookie,
smart_text(orjson.dumps(config.secret_key)),
algorithms=[config.login.jwt_algorithm],
audience="auth",
)
result = isinstance(token, dict) and "sub" in token
except JWTError:
pass
return JSONResponse(result, status_code=200)
|
e6e3ed4003dc6b60f3b118a98b0fbfb3bcb3b60a
| 3,640,668
|
from typing import List
def cached_query_molecules(
client_address: str, molecule_ids: List[str]
) -> List[QCMolecule]:
"""A cached version of ``FractalClient.query_molecules``.
Args:
client_address: The address of the running QCFractal instance to query.
molecule_ids: The ids of the molecules to query.
Returns:
The returned molecules.
"""
return _cached_client_query(
client_address,
molecule_ids,
"query_molecules",
_molecule_cache,
)
|
33d8c336daba7a79ba66d8823ba93f35fa37c351
| 3,640,669
|
def _domain_to_json(domain):
"""Translates a Domain object into a JSON dict."""
result = {}
# Domain names and bounds are not populated yet
if isinstance(domain, sch.IntDomain):
result['ints'] = {
'min': str(domain.min_value),
'max': str(domain.max_value),
'isCategorical': domain.is_categorical,
'vocabularyFile': domain.vocabulary_file
}
elif isinstance(domain, sch.FloatDomain):
result['floats'] = {}
elif isinstance(domain, sch.StringDomain):
result['strings'] = {}
elif isinstance(domain, sch.BoolDomain):
result['bools'] = {}
return result
|
c1d9d860ea1735feacfb7349f4516634e217ea5b
| 3,640,670
|
def draw_point(state, x, y, col=COLORS["WHITE"], symb="▓"):
"""returns a state with a placed point"""
state[y][x] = renderObject(symb, col)
return state
|
64b500fdacda30b0506397d554e8ce6d3b7b4a66
| 3,640,671
|
def _vars_to_add(new_query_variables, current_query_variables):
"""
Return list of dicts representing Query Variables not yet persisted
Keyword Parameters:
new_query_variables -- Dict, representing a new inventory of Query
Variables, to be associated with a DWSupport Query
current_query_variables -- Dict, representing the Query Variables
currently associated with the 'new_query_variables' Query mapped
by tuple(table_name, column_name)
>>> from pprint import pprint
>>> test_new_vars = { 'great_fact': ['measure_a', 'measure_b']
... ,'useful_dim': ['field_one']
... ,'occasionally_useful_dim': ['field_two']}
>>> persisted_vars = { ('great_fact', 'measure_a'): object() #fake
... ,('useful_dim', 'field_one'): object()#objects
... ,('useful_dim', 'field_two'): object()}
>>> out = _vars_to_add(test_new_vars, persisted_vars)
>>> pprint(out) # check detected additions
{'great_fact': ['measure_b'], 'occasionally_useful_dim': ['field_two']}
"""
additional_fields_by_table_name = {} # Values to return
# detect additions
for new_variable_table_name, table_columns in new_query_variables.items():
for column_name in table_columns:
key = (new_variable_table_name, column_name) #table+column tuple
if key not in current_query_variables:
# New Query Variable - add variable name to table's list
table_variables = additional_fields_by_table_name.setdefault(
new_variable_table_name
,list()) #default to new, empty list (if none exists yet)
table_variables.append(column_name)
return additional_fields_by_table_name
|
fd5ea2209b374ab9987a05c139ba1f28805f3eff
| 3,640,672
|
def Ak(Y2d, H, k):
"""
Calculate Ak for Sk(x)
Parameters
----------
Y2d : list
list of y values with the second derived
H : list
list of h values from spline
k : int
index from Y2d and H
Returns
-------
float
Ak from cubic spline
"""
return (Y2d[k] - Y2d[k - 1]) / (6 * H[k - 1])
|
baea453b9c7b023b78c1827dc23bacbd8fd6b057
| 3,640,673
|
def cycle_list_next(vlist, current_val):
"""Return the next element of *current_val* from *vlist*, if
approaching the list boundary, starts from begining.
"""
return vlist[(vlist.index(current_val) + 1) % len(vlist)]
|
48e2ac31178f51f981eb6a27ecf2b35d44b893b4
| 3,640,674
|
def _cal_hap_stats(gt, hap, pos, src_variants, src_hom_variants, src_het_variants, sample_size):
"""
Description:
Helper function for calculating statistics for a haplotype.
Arguments:
gt allel.GenotypeArray: Genotype data for all the haplotypes within the same window of the haplotype to be analyzed.
hap allel.GenotypeVector: Genotype data for the haplotype to be analyzed.
pos list: List containing positions of variants on the haplotype.
src_variants list: List containing positions of variants on the individual from the source population.
src_hom_variants list: List containing positions of homozygous variants on the individual from the source population.
src_het_variants list: List containing positions of heterozygous variants on the individual from the source population.
sample_size int: Number of individuals analyzed.
Returns:
hap_variants_num int: Number of SNPs with derived alleles on the haplotype.
hap_site_num int: Number of SNPs with derived alleles either on the haplotype or the source genomes.
hap_match_src_allele_num int: Number of SNPs with derived alleles both on the haplotype and the source genomes.
hap_sfs int: Average number of derived variants per site per haplotype.
hap_match_pct float: Match percent of the haplotype.
sample_size int: Number of individuals analyzed.
"""
if hap is None: return 'NA', 'NA', 'NA', 'NA', 'NA'
else:
hap_variants = pos[np.equal(hap, 1)]
hap_variants_num = len(hap_variants)
# Assume the alternative allele is the derived allele
hap_shared_src_hom_site_num = len(np.intersect1d(hap_variants, src_hom_variants))
hap_shared_src_het_site_num = len(np.intersect1d(hap_variants, src_het_variants))
hap_site_num = len(np.union1d(hap_variants, src_variants))
hap_match_src_allele_num = hap_shared_src_hom_site_num + 0.5*hap_shared_src_het_site_num
hap_shared_src_site_num = hap_shared_src_hom_site_num + hap_shared_src_het_site_num
if hap_site_num != 0: hap_match_pct = round(hap_match_src_allele_num/hap_site_num, 6)
else: hap_match_pct = 'NA'
hap_sfs = np.sum(np.sum(gt[hap == 1], axis=2), axis=1)
if hap_sfs.size != 0:
hap_sfs_mean = np.mean(hap_sfs)
# See https://stackoverflow.com/questions/10825926/python-3-x-rounding-behavior
#if not np.isnan(sfs_mean): sfs_mean = int(round(sfs_mean))
#if not np.isnan(hap_sfs_mean): hap_sfs = int(int(py2round(hap_sfs_mean))/10*108)
#if not np.isnan(hap_sfs_mean): hap_sfs = int(py2round(hap_sfs_mean))/(2*sample_size)
if not np.isnan(hap_sfs_mean): hap_sfs = round(hap_sfs_mean/(2*sample_size), 6)
else:
hap_sfs = np.nan
return hap_variants_num, hap_site_num, hap_match_src_allele_num, hap_sfs, hap_match_pct
|
10c3105fe582078d1f24cd740600ccf3c6863407
| 3,640,675
|
import json
def read_cfg(file):
"""Read configuration file and return list of (start,end) tuples """
result = []
if isfile(file):
with open(file) as f:
cfg = json.load(f)
for entry in cfg:
if "start" in entry:
filter = (entry["start"], entry.get("end", None))
result.append(filter)
return result
|
bb9c20b03e95f45708eab17313bc446cc1540308
| 3,640,676
|
import scipy
def least_l2_affine(
source: np.ndarray, target: np.ndarray, shift: bool = True, scale: bool = True
) -> AffineParameters:
"""Finds the squared-error minimizing affine transform.
Args:
source: a 1D array consisting of the reward to transform.
target: a 1D array consisting of the target to match.
shift: affine includes constant shift.
scale: affine includes rescale.
Returns:
(shift, scale) such that (scale * reward + shift) has minimal squared-error from target.
Raises:
ValueError if source or target are not 1D arrays, or if neither shift or scale are True.
"""
if source.ndim != 1:
raise ValueError("source must be vector.")
if target.ndim != 1:
raise ValueError("target must be vector.")
if not (shift or scale):
raise ValueError("At least one of shift and scale must be True.")
a_vals = []
if shift:
# Positive and negative constant.
# The shift will be the sum of the coefficients of these terms.
a_vals += [np.ones_like(source), -np.ones_like(source)]
if scale:
a_vals += [source]
a_vals = np.stack(a_vals, axis=1)
# Find x such that a_vals.dot(x) has least-squared error from target, where x >= 0.
coefs, _ = scipy.optimize.nnls(a_vals, target)
shift_param = 0.0
scale_idx = 0
if shift:
shift_param = coefs[0] - coefs[1]
scale_idx = 2
scale_param = 1.0
if scale:
scale_param = coefs[scale_idx]
return AffineParameters(shift=shift_param, scale=scale_param)
|
5a6d6d69400327c30d21ae205cab88fd95d856d6
| 3,640,677
|
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
|
80b3fc0b47a9a0044538a2862433a50d5ad36edb
| 3,640,678
|
import math
def AIC_score(y_true, y_pred, model=None, df=None):
""" calculate Akaike Information Criterion (AIC)
Input:
y_true: actual values
y_pred: predicted values
model (optional): predictive model
df (optional): degrees of freedom of model
One of model or df is requried
"""
if df is None and model is None:
raise ValueError('You need to provide either model or df')
n = len(y_pred)
p = len(model.coef_) + 1 if df is None else df
resid = np.array(y_true) - np.array(y_pred)
sse = np.sum(resid ** 2)
constant = n + n * np.log(2 * np.pi)
return n * math.log(sse / n) + constant + 2 * (p + 1)
|
6b59dea007f414b0bdc6b972434cb1c2def40bb2
| 3,640,679
|
def to_rgb(data, output=None, vmin=None, vmax=None, pmin=2, pmax=98,
categorical=False, mask=None, size=None, cmap=None):
"""Turn some data into a numpy array representing an RGB image.
Parameters
----------
data : list of DataArray
output : str
file path
vmin : float or list of float
minimum value, or list of values per channel (default: None).
vmax : float or list of float
maximum value, or list of values per channel (default: None).
pmin : float
lowest percentile to plot (default: 2). Ignored if vmin is passed.
pmax : float
highest percentile to plot (default: 98). Ignored if vmax is passed.
Returns
-------
np.ndarray or None
Returns the generate RGB image if output is None, else returns None.
"""
if isinstance(data, list):
n_channels = len(data)
elif isinstance(data, xr.DataArray) or isinstance(data, np.ndarray):
n_channels = 1
data = [data]
else:
raise ValueError("`data` must be a DataArray or list of DataArrays")
values = [np.asarray(d) for d in data]
shape = data[0].shape + (n_channels,)
if vmin is not None:
if isinstance(vmin, (int, float)):
vmin = [vmin] * n_channels
if vmax is not None:
if isinstance(vmax, (int, float)):
vmax = [vmax] * n_channels
if categorical:
colored = colorize(values[0], nan_vals=[0])
else:
im = np.empty(shape)
for i in range(n_channels):
channel = values[i]
# Stretch
if vmin is not None:
minval = vmin[i]
else:
minval = np.percentile(channel, pmin)
if vmax is not None:
maxval = vmax[i]
else:
maxval = np.percentile(channel, pmax)
if maxval > minval:
channel = (channel - minval) / (maxval - minval) * 255
im[:, :, i] = channel
im = np.clip(im, 0, 255).astype(np.uint8)
if n_channels == 1:
colored = cv2.cvtColor(im[:, :, 0], cv2.COLOR_GRAY2BGR)
if cmap is not None:
# colored is now in BGR
colored = cv2.applyColorMap(colored, _cmap_from_str(cmap))
else:
# im is in RGB
colored = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# if output is not None:
# colored = cv2.cvtColor(colored, cv2.COLOR_RGB2BGR)
if mask is not None:
colored[~mask] = 0
if size is not None:
if size[0] is None:
size = (int(colored.shape[0] * size[1] / colored.shape[1]),
size[1])
elif size[1] is None:
size = (size[0],
int(colored.shape[1] * size[0] / colored.shape[0]))
colored = cv2.resize(colored, (size[1], size[0]))
if output is None:
return cv2.cvtColor(colored, cv2.COLOR_BGR2RGB)
else:
cv2.imwrite(output, colored)
|
477241dd890b78d7bbf56d3095b42f106af694a7
| 3,640,680
|
import requests
def id_convert(values, idtype=None):
"""
Get data from the id converter API.
https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
"""
base = 'http://www.pubmedcentral.nih.gov/utils/idconv/v1.0/'
params = {
'ids': values,
'format': 'json',
}
if idtype is not None:
params['idtype'] = idtype
resp = requests.get(base, params=params)
raw = resp.json()
records = raw.get('records')
if records is None:
return None
status = records[0].get('status')
if status == u"error":
return None
return raw['records'][0]
|
a60698fb20ba94445bbd06384b8523e92bfb91a3
| 3,640,681
|
import base64
def authenticate_user():
"""Authenticate user"""
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username, password=password).first()
if user is not None:
ma_schema = UserSchema()
user_data = ma_schema.dump(user)
user_data['id'] = user.pk
user_data['token'] = base64.b64encode(bytes(user.token, 'utf-8')).decode("utf-8")
del user_data['pk']
return jsonify(user_data)
else:
return jsonify({"message":"Invalid credentials"}),404
|
8e15a3bddf4700c1b207798e3162e3fcef0e7d79
| 3,640,682
|
import typing
import inspect
import warnings
def map_signature(
r_func: SignatureTranslatedFunction,
is_method: bool = False,
map_default: typing.Optional[
typing.Callable[[rinterface.Sexp], typing.Any]
] = _map_default_value
) -> typing.Tuple[inspect.Signature, typing.Optional[int]]:
"""
Map the signature of an function to the signature of a Python function.
While mapping the signature, it will report the eventual presence of
an R ellipsis.
Args:
r_func (SignatureTranslatedFunction): an R function
is_method (bool): Whether the function should be treated as a method
(adds a `self` param to the signature if so).
map_default (function): Function to map default values in the Python
signature. No mapping to default values is done if None.
Returns:
A tuple (inspect.Signature, int or None).
"""
params = []
r_ellipsis = None
if is_method:
params.append(inspect.Parameter('self',
inspect.Parameter.POSITIONAL_ONLY))
r_params = r_func.formals()
rev_prm_transl = {v: k for k, v in r_func._prm_translate.items()}
if r_params.names is not rinterface.NULL:
for i, (name, default_orig) in enumerate(zip(r_params.names, r_params)):
if default_orig == '...':
r_ellipsis = i
warnings.warn('The R ellispsis is not yet well supported.')
transl_name = rev_prm_transl.get(name)
default_orig = default_orig[0]
if map_default and not rinterface.MissingArg.rsame(default_orig):
default_mapped = map_default(default_orig)
else:
default_mapped = inspect.Parameter.empty
prm = inspect.Parameter(
transl_name if transl_name else name,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_mapped
)
params.append(prm)
return (inspect.Signature(params), r_ellipsis)
|
e0655bab739b59b0fad94772654a70ce4e6f84fd
| 3,640,683
|
def get_random(X):
"""Get a random sample from X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
array-like, shape (1, n_features)
"""
size = len(X)
idx = np.random.choice(range(size))
return X[idx]
|
e493b68ae5b7263786a1a447a0cff78d1deeba24
| 3,640,684
|
def get_secondary_connections(network, user):
"""
Finds all the secondary connections (i.e. connections of connections)
of a given user.
Arguments:
network: the gamer network data structure.
user: a string containing the name of the user.
Returns:
A list containing the secondary connections (connections of connections).
- If the user is not in the network, returns None.
- If a user has no primary connections to begin with,
returns an empty list.
NOTE:
It is OK if a user's list of secondary connections includes the user
himself/herself. It is also OK if the list contains a user's primary
connection that is a secondary connection as well.
"""
if user not in network:
return None
if network[user][0] == []:
return []
return [person
for group in
[network[connection][0] for connection in network[user][0]]
for person in group]
|
4e53f6e43f2fb132932381370efa4b3a3cd4793c
| 3,640,686
|
def get_regression_function(model, model_code):
"""
Method which return prediction function for trained regression model
:param model: trained model object
:return: regression predictor function
"""
return model.predict
|
fca4a0767b1e741952534baf59ac07cece2c9342
| 3,640,687
|
def beam_motion_banding_filter(img, padding=20):
"""
:param img: numpy.array.
2d projection image or sinogram. The left and right side of the image should be
empty. So that `padding` on the left and right will be used to create an beam motion
banding image and be normalized from the original image.
:param padding: int.
The size of on the left and right empty area to be used to find the average value
where there is no object.
:return img_new: numpy.array
Smoothed image.
"""
nx = img.shape[1]
mean_left = img[:, 0:padding].mean(axis=1)
mean_right = img[:, -padding:].mean(axis=1)
mean_middle = (mean_left + mean_right) / 2
slope = (mean_right - mean_left) / (nx - padding)
# Make an image with only bandings.
img_banding = img * 0.0
for i in range(img_banding.shape[1]): # iterate cols
img_banding[:, i] = mean_middle + (i - nx / 2) * slope
# Subtract the banding from the original.
img_new = img-img_banding
return img_new
|
5191c1f3022711459ce81cfbf0c4d6c6fb7dcd41
| 3,640,688
|
def log(session):
"""Clear nicos log handler content"""
handler = session.testhandler
handler.clear()
return handler
|
086e362c8195b917c826fc8b20d3095210ac82fd
| 3,640,689
|
def calculate_dvh(dose_grid, label, bins=1001):
"""Calculates a dose-volume histogram
Args:
dose_grid (SimpleITK.Image): The dose grid.
label (SimpleITK.Image): The (binary) label defining a structure.
bins (int | list | np.ndarray, optional): Passed to np.histogram,
can be an int (number of bins), or a list (specifying bin edges). Defaults to 1001.
Returns:
bins (numpy.ndarray): The points of the dose bins
values (numpy.ndarray): The DVH values
"""
if dose_grid.GetSize() != label.GetSize():
print("Dose grid size does not match label, automatically resampling.")
dose_grid = sitk.Resample(dose_grid, label)
dose_arr = sitk.GetArrayViewFromImage(dose_grid)
label_arr = sitk.GetArrayViewFromImage(label)
dose_vals = dose_arr[np.where(label_arr)]
counts, bin_edges = np.histogram(dose_vals, bins=bins)
# Get mid-points of bins
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
# Calculate the actual DVH values
values = np.cumsum(counts[::-1])[::-1]
values = values / values.max()
return bins, values
|
007c7eb9c2ddca9809ac2c86f7bf6d34ed14d41b
| 3,640,691
|
def dataframe_with_new_calendar(df: pd.DataFrame, new_calendar: pd.DatetimeIndex):
"""
Returns a new DataFrame where the row data are based on the new calendar (similar to Excel's VLOOKUP with
approximate match)
:param df: DataFrame
:param new_calendar: DatetimeIndex
:return: DataFrame
"""
# find the position in the old calendar that closest represents the new calendar dates
original_calendar = df.index
date_index_list = np.searchsorted(original_calendar, new_calendar, side='right')
date_index_list = [d_i - 1 for d_i in date_index_list if d_i > 0]
data_for_new_calendar = df.to_numpy()[date_index_list, :]
# in case the first dates in the new calendar are before the first available date in the DataFrame, add nans to the
# first rows
if data_for_new_calendar.shape[0] != len(new_calendar):
num_missing_rows = len(new_calendar) - data_for_new_calendar.shape[0]
nan_array = np.empty((num_missing_rows, data_for_new_calendar.shape[1]))
nan_array[:] = np.nan
# add the data after the nan rows
data_for_new_calendar = np.vstack([nan_array, data_for_new_calendar])
return pd.DataFrame(data=data_for_new_calendar, index=new_calendar, columns=df.columns)
|
4f5b39494080f3eae9083c78d6dd1666c1945e35
| 3,640,693
|
def get_language_titles():
""" Extract language and title from input file. """
language_titles = {}
input_file = open("resources/events/%s.tsv" % args.event).readlines()
for line in sorted(input_file):
try:
language, title = line.split('\t')[0], line.split('\t')[1].strip()
except IndexError:
language, title = line.split(',')[0], line.split(',')[1].strip()
if args.language:
if language != args.language: continue
if language == "lang": continue
if language.startswith("%"): continue # languages with % in front of them can't be scraped.
language_titles[language] = title
return language_titles
|
dedfa8720194aef1b27c7762041692625c2955e7
| 3,640,694
|
def _find_additional_age_entities(request, responder):
"""
If the user has a query such as 'list all employees under 30', the notion of age is
implicit rather than explicit in the form of an age entity. Hence, this function is
beneficial in capturing the existence such implicit entities.
Returns a true/false depending on the existence or lack of the combination of
numerical entities and comparators, thereby indicating an implicit age entitiy or
lack of it, respectively.
"""
try:
comparator_entity = [e for e in request.entities if e['type'] == 'comparator'][0]
num_entity = [float(e['value'][0]['value'])
for e in request.entities
if e['type'] == 'sys_number']
# if any token in the text query is numeric that was missed by the num_entity,
# add it to the list
for i in request.text.split():
try:
num_entity.append(float(i))
except ValueError:
continue
except (IndexError, ValueError):
comparator_entity = []
num_entity = []
return True if comparator_entity and num_entity else False
|
971bc0805c607134b6947e0d61ebab6f217c6961
| 3,640,695
|
def merge_local_and_remote_resources(resources_local, service_sync_type, service_id, session):
"""
Main function to sync resources with remote server.
"""
if not get_last_sync(service_id, session):
return resources_local
remote_resources = _query_remote_resources_in_database(service_id, session=session)
max_depth = SYNC_SERVICES_TYPES[service_sync_type]("", "").max_depth
merged_resources = _merge_resources(resources_local, remote_resources, max_depth)
_sort_resources(merged_resources)
return merged_resources
|
1809caa17c3a8a32a5a3236b313c575ec939c0d8
| 3,640,696
|
def alertmanager():
"""
to test this:
$ curl -H "Content-Type: application/json" -d '[{"labels":{"alertname":"test-alert"}}]' 172.17.0.2:9093/api/v1/alerts
or
$ curl -H "Content-Type: application/json" -d '{"alerts":[{"labels":{"alertname":"test-alert"}}]}' 127.0.0.1:5000/alertmanager
"""
alert_json=request.get_json()
#print (alert["alerts"])
with open(alertfile, 'a') as f:
for alert in alert_json["alerts"]:
f.write(alert["labels"]["alertname"])
f.write('\n')
return ("HTTP 200 received")
|
1e204bd6dce8368c3401cb7e13ea062abebafd71
| 3,640,697
|
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
"""
Computes refundable payroll tax credit amounts.
"""
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc)
|
e282139921045fe8e286abbde6bb4ae44151a50d
| 3,640,699
|
def is_stable(A, domain='z'):
"""Determines if a linear state-space model is stable from eigenvalues of `A`
Parameters
----------
A : ndarray(n,n)
state matrix
domain : str, optional {'z', 's'}
'z' for discrete-time, 's' for continuous-time state-space models
returns
-------
bool
"""
if domain == 'z': # discrete-time
# Unstable if at least one pole outside unit circle
if any(abs(eigvals(A)) > 1):
return False
elif domain == 's': # continuous-time
# Unstable if at least one pole in right-half plane
if any(np.real(eigvals(A)) > 0):
return False
else:
raise ValueError(f"{domain} wrong. Use 's' or 'z'")
return True
|
8b073fa021b0f50363d4f5f1a7bf3722a62ae71b
| 3,640,700
|
def email_sent_ipn(path: str) -> tuple:
"""
**email_sent_ipn**
Delivered ipn for mailgun
:param path: organization_id
:return: OK, 200
"""
# NOTE: Delivered ipn will end up here
if path == "delivered":
pass
elif path == "clicks":
pass
elif path == "opens":
pass
elif path == "failure":
pass
elif path == "spam":
pass
elif path == "unsubscribe":
pass
return "OK", 200
|
4bbfed4f86916ddc2b68ade0c8739e25a562bbda
| 3,640,701
|
import json
def load_posts_view(request):
"""Load posts view, handles asynchronous queries to retrieve more posts.
"""
if request.method == 'GET':
results, start = get_more_posts(request.GET)
json_result = json.dumps({'posts': results,
'start': start
})
return HttpResponse(json_result, mimetype='application/json')
else:
return HttpResponse('', mimetype='application/json')
|
832f2a04b23eb78ad25ad7db2d3cabfdaa61b075
| 3,640,703
|
def create_dataset(m, timestep, var='all', chunks=(10, 300, 300)):
"""
Create xarray Dataset from binary model data
for one time step. This also incorporates all model
grid information and dimensions, regardless of the variable selected.
Parameters
----------
m : LLCRegion
Model class generated with LLCRegion()
var : str, optional
Variable to be read. Defaults to 'all', but only one variable,
e.g. 'v', or a list of variabbles, e.g. ['t', 'v']
can be selected here instead.
chunks : tuple, optional
Chunk size for dask. Defaults to (10, 300, 300)
Returns
-------
ds : xarray Dataset
Dataset
"""
if var is 'all':
vars = _model_variables
else:
vars = {k: _model_variables[k] for k in var}
# vars = {var: _model_variables[var]}
# reduce xc/yc, xg/yg to 1d vector
lon, lat = _reduce_2d_coords(m.xc, m.yc)
xc, yc = _reduce_2d_coords(m.xc, m.yc)
xg, yg = _reduce_2d_coords(m.xg, m.yg)
# calculate Zu, Zl, Zp1 (combination of Zu, Zl)
tmp = m.drf
tmp = np.insert(tmp, 0, 0)
Zp1 = np.cumsum(tmp)
Zl = Zp1[0:-1]
Zu = Zp1[1::]
# calculate drc
drc = np.diff(m.z)
drc = np.insert(drc, 0, m.z[0])
drc = np.append(drc, Zp1[-1]-m.z[-1])
# generate xarray dataset with only grid information first
ds = xr.Dataset(coords={'xc': (['xc'], xc, {'axis': 'X'}),
'yc': (['yc'], yc, {'axis': 'Y'}),
'lon': (['xc'], xc, {'axis': 'X'}),
'lat': (['yc'], yc, {'axis': 'Y'}),
'dxc': (['yc', 'xg'], m.dxc),
'dyc': (['yg', 'xc'], m.dxc),
'xg': (['xg'], xg, {'axis': 'X', 'c_grid_axis_shift': -0.5}),
'yg': (['yg'], yg, {'axis': 'Y', 'c_grid_axis_shift': -0.5}),
'dxg': (['yg', 'xc'], m.dxg),
'dyg': (['yc', 'xg'], m.dyg),
'dxv': (['yg', 'xg'], m.dxv),
'dyu': (['yg', 'xg'], m.dyu),
'z': (['z'], m.z, {'axis': 'Z'}, {'axis': 'Z'}),
'zl': (['zl'], Zl, {'axis': 'Z', 'c_grid_axis_shift': -0.5}),
'zu': (['zu'], Zu, {'axis': 'Z', 'c_grid_axis_shift': +0.5}),
'zp1': (['zp1'], Zp1, {'axis': 'Z', 'c_grid_axis_shift': (-0.5,0.5)}),
'drc': (['zp1'], drc, {'axis': 'Z'}),
'drf': (['z'], m.drf, {'axis': 'Z'}),
'ra': (['yc', 'xc'], m.rac),
'raz': (['yg', 'xg'], m.raz),
'depth': (['yc', 'xc'], m.hb),
'hfacc': (['z', 'yc', 'xc'], m.hfacc),
'hfacw': (['z', 'yc', 'xg'], m.hfacw),
'hfacs': (['z', 'yg', 'xc'], m.hfacs)})
# define dictionary that will hold dask arrays
d = {}
# read all variables into a dict with dask arrays
for k, v in vars.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.90'
# account for funky V file names
if v=='V':
exist = _check_file_exists(filename, verbose=False)
if ~exist:
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6858.1_936.1062.90_Neg'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_3d_data)(filename), (m.Nz, m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks)
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
# add 2d variables
if var is 'all':
vars2d = _model_2dvariables
d = {}
for k, v in vars2d.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.1'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_2d_data)(filename), (m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks[1:])
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
return ds
|
02dd6a4e7ff520e5ae65c7a3a9e3bd2b92d58629
| 3,640,704
|
def mummer_cmds_four(path_file_four):
"""Example MUMmer commands (four files)."""
return MUMmerExample(
path_file_four,
[
"nucmer --mum -p nucmer_output/file1_vs_file2 file1.fna file2.fna",
"nucmer --mum -p nucmer_output/file1_vs_file3 file1.fna file3.fna",
"nucmer --mum -p nucmer_output/file1_vs_file4 file1.fna file4.fna",
"nucmer --mum -p nucmer_output/file2_vs_file3 file2.fna file3.fna",
"nucmer --mum -p nucmer_output/file2_vs_file4 file2.fna file4.fna",
"nucmer --mum -p nucmer_output/file3_vs_file4 file3.fna file4.fna",
],
[
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file2.delta "
"nucmer_output/file1_vs_file2.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file3.delta "
"nucmer_output/file1_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file4.delta "
"nucmer_output/file1_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file3.delta "
"nucmer_output/file2_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file4.delta "
"nucmer_output/file2_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file3_vs_file4.delta "
"nucmer_output/file3_vs_file4.filter"
),
],
)
|
65262a16f47b952796f79dcb9bba37c5dcbaed0b
| 3,640,706
|
def Exponweibull(a=1, c=1, scale=1, shift=0):
"""
Expontiated Weibull distribution.
Args:
a (float, Dist) : First shape parameter
c (float, Dist) : Second shape parameter
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
"""
dist = cores.exponweibull(a, c)*scale + shift
dist.addattr(str="Exponweibull(%s,%s,%s,%s)"%(a, c, scale,shift))
return dist
|
64871830101df96f8148ef6ee0b8735813793306
| 3,640,707
|
def authed_request_for_id(gplus_id, request):
"""Adds the proper access credentials for the specified user and then makes an HTTP request."""
# Helper method to make retry easier
def make_request(retry=True):
token = get_access_token_for_id(gplus_id)
request.headers['Authorization'] = 'Bearer %s' % token
prepared_request = request.prepare()
response = session.send(prepared_request, timeout=GOOGLE_API_TIMEOUT)
if response.status_code == 401:
# Our access token is invalid. If this is the first failure,
# try forcing a refresh of the access token.
if retry:
Cache.delete(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
return make_request(retry=False)
return response
response = make_request()
if response.status_code == 403:
# Typically used to indicate that Google is rate-limiting the API call
raise UnavailableException('API 403 response: %r' % api_response.json(), 503)
elif response.status_code == 401:
raise UnavailableException('Invalid access token.', 401)
elif response.status_code != 200:
raise UnavailableException(
'Unknown API error (code=%d): %r' % (response.status_code, response.json()), 502)
return response
|
f727cc818fd3d5b70fba80b00dfb09cf1f182275
| 3,640,708
|
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
|
d6dec673d9a0f8384c3510bdda449f8e4157c96e
| 3,640,709
|
def get_playlist_by_id(playlist_id):
""" Returns a playlist by playlist id """
return Playlist.query.filter(Playlist.playlist_id == playlist_id).first()
|
28fd295a5d096b1da40391193e6333cc48b14ea2
| 3,640,710
|
def section_cfield(xs, x_a, c_field, rmax = 60e3):
"""
extract a section of a sound speed transcet for use in xmission calculation
"""
x_i = np.bitwise_and(x_a >= xs, x_a <= xs + rmax)
return x_a[x_i], c_field[:, x_i]
|
c4c213293f7aee7735a9a6209f671aae6d8e3989
| 3,640,711
|
def shared_dropout(shape, use_noise, trng, value):
"""
Shared dropout mask (pervasive dropout)
:param shape:
:param use_noise:
:param trng:
:param value:
:return:
"""
return tensor.switch(use_noise,
trng.binomial(shape, p=value, n=1,
dtype=floatX),
theano.shared(np.float32(value)))
|
51373285b3c708cedd1ebf2a613237deaa7b6dab
| 3,640,712
|
def setup_flow_assembler(gb, method, data_key=None, coupler=None):
"""Setup a standard assembler for the flow problem for a given grid bucket.
The assembler will be set up with primary variable name 'pressure' on the
GridBucket nodes, and mortar_flux for the mortar variables.
Parameters:
gb: GridBucket.
method (EllipticDiscretization).
data_key (str, optional): Keyword used to identify data dictionary for
node and edge discretization.
Coupler (EllipticInterfaceLaw): Defaults to RobinCoulping.
Returns:
Assembler, ready to discretize and assemble problem.
"""
if data_key is None:
data_key = "flow"
if coupler is None:
coupler = pp.RobinCoupling(data_key, method)
if isinstance(method, pp.MVEM) or isinstance(method, pp.RT0):
mixed_form = True
else:
mixed_form = False
for g, d in gb:
if mixed_form:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1, "faces": 1}}
else:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1}}
d[pp.DISCRETIZATION] = {"pressure": {"diffusive": method}}
for e, d in gb.edges():
g1, g2 = gb.nodes_of_edge(e)
d[pp.PRIMARY_VARIABLES] = {"mortar_flux": {"cells": 1}}
d[pp.COUPLING_DISCRETIZATION] = {
"lambda": {
g1: ("pressure", "diffusive"),
g2: ("pressure", "diffusive"),
e: ("mortar_flux", coupler),
}
}
d[pp.DISCRETIZATION_MATRICES] = {"flow": {}}
assembler = pp.Assembler(gb)
num_blocks = assembler.full_dof.size
block_info = np.zeros((num_blocks, 5))
block_start = np.hstack((0, np.cumsum(assembler.full_dof)))
# map from grids to block dof index. Will be unique, since there is a single
# dof per subdomain
subdom_block_map = {}
for (g, var), ind in assembler.block_dof.items():
is_mortar = 0
if var == "mortar_flux":
is_mortar = 1
dim = g[0].dim
else:
dim = g.dim
subdom_block_map[g] = ind
block_info[ind, :3] = np.array([dim, is_mortar, block_start[ind]], dtype=np.int)
# Second loop over the blocks. This time, we will fill in the two last
# columns, on neighboring subdomains.
for (g, var), ind in assembler.block_dof.items():
if var == "mortar_flux":
block_info[ind, 3] = subdom_block_map[g[0]]
block_info[ind, 4] = subdom_block_map[g[1]]
else:
block_info[ind, 3:] = np.array([-1, -1])
return assembler, block_info
|
6e1baaf91e06679ef760932f6ae27e0c606e4f21
| 3,640,713
|
def get_article(article_id: str, db: Session = Depends(deps.get_db),
current_user: schemas.UserVerify = Depends(
deps.get_current_user)) -> JSONResponse:
""" Return Single Article"""
data = crud_articles.get_article(article_id=article_id, db=db)
if data is None:
return JSONResponse(status_code=500,
content={"message": "No Records Found"})
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(status_code=200, content=json_compatible_item_data)
|
e78af6052b112c5da5811a0c92fe462743bb5c7e
| 3,640,714
|
def _get_sp_instance():
"""Create an spotify auth_manager and check whether the current user has
a token (has been authorized already). If the user has a token, then they
are authenticated -- return their spotipy instance. If the user does not have
a token, then they are not authenticated -- raise an exception
"""
auth_manager = _get_auth_manager()
if auth_manager.get_cached_token():
return spotipy.Spotify(auth_manager=auth_manager)
else:
raise SpotifyUserAuthFailure(get_auth_url(show_dialog=True))
|
b2117c709169192626efdf2b699a9a1c2c501ecc
| 3,640,715
|
def get_func_global(op_type, dtype):
"""Generate function for global address space
Used as `generator(op_type, dtype)`.
"""
op = getattr(dppy.atomic, op_type)
def f(a):
op(a, 0, 1)
return f
|
72816c78bc36f7aa630551ae161fa0870acefe36
| 3,640,716
|
def klucb(x, d, div, upperbound, lowerbound=-float("inf"), precision=1e-6):
"""The generic klUCB index computation.
Input args.:
x,
d,
div:
KL divergence to be used.
upperbound,
lowerbound=-float('inf'),
precision=1e-6,
"""
l = max(x, lowerbound)
u = upperbound
while u - l > precision:
m = (l + u) / 2
if div(x, m) > d:
u = m
else:
l = m
return (l + u) / 2
|
82aa51e248568d201e0d9d5621bf043532df8572
| 3,640,717
|
def convert_pk_to_index(pk_tuples, indices):
"""
For a list of tuples with elements referring to pk's of indices,
convert pks to 0-index values corresponding to order of queryset
:param pk_tuples: list of tuples [(row_pk, col_pk), ... ]
:param indices: list of querysets
:return: list of tuples [(row_idx, col_idx), ... ]
"""
output_tuples = []
maps = [pk_index_map(idx) for idx in indices]
for pk_tuple in pk_tuples:
try:
idxs = tuple(maps[axis][pk] for axis, pk in enumerate(pk_tuple))
output_tuples.append(idxs)
except KeyError:
# pk may not be in index scope which is fine
pass
return output_tuples
|
81837ded50d4cd086b9330ea5c709fb3bd93ca0f
| 3,640,718
|
from typing import Union
def device_path_to_str(path: Union[bytes, str]) -> str:
"""
Converts a device path as returned by the fido2 library to a string.
Typically, the path already is a string. Only on Windows, a bytes object
using an ANSI encoding is used instead. We use the ISO 8859-1 encoding to
decode the string which should work for all systems.
"""
if isinstance(path, bytes):
return path.decode("iso-8859-1", errors="ignore")
else:
return path
|
76d0d3d50e978d998ef68e0c509c3933f94778d9
| 3,640,719
|
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov)
|
67c8c1f42590ee6c8d56f5f1e53253c5eff74376
| 3,640,720
|
def emitir_extrato(contas, numero_conta, movimentacoes, data_inicial):
"""
Retorna todas as movimentações de <movimentacoes> feitas pela conta
com o <numero_conta> a partir da <data_inicial>
"""
historico_movimentacoes = []
if numero_conta in contas:
minhas_movimentacoes = movimentacoes_da_conta(numero_conta, movimentacoes)
inicial = -1
#Verifica a partir de qual data o extrato vai ser emitido
for i, movimentacao in enumerate(minhas_movimentacoes):
data_movimentacao = movimentacao[5]
#Verifica qual data é mais recente, a data da emissão do extrato ou a data
#da movimentação em questão
if verificar_data_mais_recente(data_inicial, data_movimentacao):
continue
inicial = i
break
#Verifica se há alguma movimentação após a data pedida, se sim, guarda todas essas
#movimentações para retornar depois
if(inicial >= 0):
historico_movimentacoes = minhas_movimentacoes[inicial:]
return historico_movimentacoes
else:
return 0
|
0caa46aaed0ccfa506f8caa9b82625649d116ce1
| 3,640,721
|
def wavelength_to_energy(wavelength):
"""
Converts wavelength (A) to photon energy (keV)
"""
return 12.39842/wavelength
|
4e2d11f2de8ed4890df5d885801cd492644817d8
| 3,640,722
|
def calculate_hash_512(filepath, verbose):
"""
SHA512 Hash Digest
"""
if verbose:
print 'Calculating hash...'
sha512_hash = hashlib.sha512()
with open(filepath, 'rb') as f:
statinfo = os.stat(filepath)
block_size = 100 * (2**20) #Magic number: 100 * 1MB blocks
nb_blocks = (statinfo.st_size / block_size) + 1
cnt_blocks = 0
while True:
block = f.read(block_size)
if not block: break
sha512_hash.update(block)
cnt_blocks = cnt_blocks + 1
progress = 100 * cnt_blocks / nb_blocks
if verbose:
draw_progress_bar(progress)
f.close()
return sha512_hash.digest()
|
4bf153275d9791112f39d3629e9cc94f54177dc4
| 3,640,723
|
def _crop_after_rotation(im, angle, xres, yres, surroundings):
"""Crop image to the bounding box of bite's surroundings.
Arguments:
im: PIL.Image, rotated map part
angle: by which the map has been rotated, in degrees (counterclockwise)
xres: width of one tile in pixels
yres: height of one tile in pixels
surroundings: shapely.geometry.polygon.Polygon
"""
#before rotation
x1, y1, x2, y2 = surroundings.bounds
old_bb_upper_left = Point(x1, y1)
old_bb_upper_right = Point(x2, y1)
old_bb_bottom_left = Point(x1, y2)
old_bb_center = ((x1+x2)/2, (y1+y2)/2)
#shapely y-axis goes upwards
shapely_angle = -angle
#after rotation
x1, y1, x2, y2 = affinity.rotate(surroundings, shapely_angle, origin=old_bb_center).bounds
crop_upper_left = Point(x1, y1)
crop_width = x2 - x1
crop_height = y2 - y1
#points where old bounding box of surroundings (i.e. the old image) touches
#its bounding box after rotation
tl = None #touch at the left side of the new bounding box
tt = None #touch at the top side of the new bounding box
if angle > 0:
tl = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_right, shapely_angle, origin=old_bb_center)
else:
tl = affinity.rotate(old_bb_bottom_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
#upper left corner of ther new bounding box
new_bb_upper_left = Point(tl.x, tt.y)
#from these we get b: upper left corner of the crop area relative to new_bb_upper_left
b = (crop_upper_left.x - new_bb_upper_left.x, crop_upper_left.y - new_bb_upper_left.y)
#crop rectangle in pixels relative to new_bb_upper_left
crop_box = [int(x) for x in [
b[0] * xres,
b[1] * yres,
(b[0] + crop_width) * xres,
(b[1] + crop_height) * yres
]]
cropped = im.crop(box=crop_box)
cropped.load()
return cropped
|
eeeda2c5c8d868e813a67584c72561560409e1b3
| 3,640,724
|
import copy
def get_custom_scorer(metric, gib=True, needs_proba=False, needs_threshold=False):
"""Get a scorer from a str, func or scorer.
Scorers used by ATOM have a name attribute.
Parameters
----------
metric: str, func or scorer
Name, metric or scorer to get ATOM's scorer from.
gib: bool, optional (default=True)
whether the metric is a score function or a loss function,
i.e. if True, a higher score is better and if False, lower is
better. Is ignored if the metric is a string or a scorer.
needs_proba: bool, optional (default=False)
Whether the metric function requires probability estimates of
a classifier. Is ignored if the metric is a string or a scorer.
needs_threshold: bool, optional (default=False)
Whether the metric function takes a continuous decision
certainty. Is ignored if the metric is a string or a scorer.
Returns
-------
scorer: scorer
Custom sklearn scorer with name attribute.
"""
# Copies are needed to not alter SCORERS
if isinstance(metric, str):
metric = metric.lower()
if metric in SCORERS:
scorer = copy(SCORERS[metric])
scorer.name = metric
elif metric in SCORERS_ACRONYMS:
scorer = copy(SCORERS[SCORERS_ACRONYMS[metric]])
scorer.name = SCORERS_ACRONYMS[metric]
elif metric in CUSTOM_SCORERS:
scorer = make_scorer(copy(CUSTOM_SCORERS[metric]))
scorer.name = scorer._score_func.__name__
else:
raise ValueError(
"Unknown value for the metric parameter, got "
f"{metric}. Choose from: {', '.join(SCORERS)}."
)
elif hasattr(metric, "_score_func"): # Scoring is a scorer
scorer = copy(metric)
# Some scorers use default kwargs
default_kwargs = ("precision", "recall", "f1", "jaccard")
if any(name in scorer._score_func.__name__ for name in default_kwargs):
if not scorer._kwargs:
scorer._kwargs = {"average": "binary"}
for key, value in SCORERS.items():
if scorer.__dict__ == value.__dict__:
scorer.name = key
break
else: # Scoring is a function with signature metric(y, y_pred)
scorer = make_scorer(
score_func=metric,
greater_is_better=gib,
needs_proba=needs_proba,
needs_threshold=needs_threshold,
)
scorer.name = scorer._score_func.__name__
return scorer
|
a698798302ec0ed7ad469b76d6893e85e669905e
| 3,640,725
|
def julian_day(t='now'):
"""
Wrap a UTC -> JD conversion from astropy.
"""
return Time(parse_time(t)).jd
|
fa2f0d707798227e8e7f67b21cf2e4dc42308093
| 3,640,726
|
from typing import Counter
def add_stop_words(dataframe: pd.DataFrame,
k_words: int) -> list:
"""
Получить список стоп-слов, которые наиболее часто встречаются в документе
:param dataframe:
:param k_words: кол-во наиболее часто повторяющихся уникальных слов
:return:
"""
split_words = dataframe['text'].values
split_words = " ".join(split_words)
split_words = split_words.split()
_counter = Counter(split_words).most_common(k_words)
n_words = [i[0] for i in _counter]
return list(set(n_words))
|
3ca7fbe1221b55e2a072d49f01c553af1786ca8f
| 3,640,727
|
import torch
def get_batch(data_iterator):
"""Build the batch."""
# Items and their type.
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
datatype = torch.int64
# Broadcast data.
data = next(data_iterator) if data_iterator is not None else None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['text'].long()
types = data_b['types'].long()
sentence_order = data_b['is_random'].long()
loss_mask = data_b['loss_mask'].float()
lm_labels = data_b['labels'].long()
padding_mask = data_b['padding_mask'].long()
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
|
fad3b181c685e3e57fa185c1eb790517536527ec
| 3,640,728
|
def pool(sparkdf, start_column, end_column, var):
"""
Generate pools and calculate maximum var unpooled.
:param sparkdf: Input Spark dataframe.
:param start_column: Start time column name.
:param end_column: End time column name.
:param var: Variable for which to calculate metric.
:return: A Spark dataframe with pools (sizes and counts).
:return: Maximum active metric for var.
"""
starts_dict, ends_dict, starts_sorted, ends_sorted = sorted_dicts(sparkdf, start_column, end_column, var)
size_groups = {s:{'current': 0, 'max': 0} for s in [r.size for r in sparkdf.select(var).distinct().collect()]}
active = {'current': 0, 'max': 0}
start_index, end_index = 0, 0
while start_index < len(starts_sorted) or end_index < len(ends_sorted):
start, end = None, ends_sorted[end_index]
if start_index < len(starts_sorted):
start = starts_sorted[start_index]
if start is None or start > end:
group = size_groups[ends_dict[end]]
group['current'] -= 1
active['current'] -= ends_dict[end]
end_index += 1
else:
group = size_groups[starts_dict[start]]
group['current'] += 1
if group['current'] > group['max']:
group['max'] = group['current']
active['current'] += starts_dict[start]
if active['current'] > active['max']:
active['max'] = active['current']
start_index += 1
pool_counts = [{var: int(s), 'count': int(size_groups[s]['max'])} for s in size_groups.keys()]
max_unpooled = active['max']
return pool_counts, max_unpooled
|
913094bebc6f91ad023d83186084d858a7332531
| 3,640,730
|
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt返回
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
:param token:
:param user:
:param request:
:return:
"""
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
|
972f4cbd39d9bd049fcd7a99bfc168e6c825572a
| 3,640,731
|
import requests
def query_yelp_lookup(biz_id):
""" Lookup resturant using id """
headers = {'Authorization': ('Bearer '
'w5JFtwCUKq05GlSpm8cKo51dBYDQ6r9tyzo-qRsKt4wDyB5'
'_ro6gW5gnG9hS6bvnNHNxOQLHfw7o_9S1e86nkvgcU7DQI_'
'sM6GVt9rqcq_rRYKtagQrexuH0zsU0WXYx')}
url = 'https://api.yelp.com/v3/businesses/' + biz_id
query = requests.get(url, headers=headers)
return query.json()
|
ab2087d42833f0092229870ab3208a24bd041b95
| 3,640,732
|
def dashboard(request, condition='recent'):
"""Dashboard"""
post_count = settings.DASHBOARD_POST_COUNT
comment_count = settings.DASHBOARD_COMMENT_COUNT
if condition == 'recent':
order = '-id'
elif condition == 'view':
order = '-view_count'
elif condition == 'like':
order = '-like_count'
elif condition == 'comment':
order = '-comment_count'
else:
return error_page(request)
posts = Blog.objects.filter(status='1normal').order_by(order)[:post_count]
comments = Comment.objects.filter(
status='1normal').order_by('-id')[:comment_count]
total_posts = Blog.objects.filter(status='1normal').count()
total_comments = Comment.objects.filter(status='1normal').count()
total_spams = Comment.objects.filter(status='7spam').count()
total_users = User.objects.count()
return render(
request,
"blogs/dashboard.html",
{
'posts': posts,
'comments': comments,
'condition': condition,
'total_posts': total_posts,
'total_comments': total_comments,
'total_spams': total_spams,
'total_users': total_users,
}
)
|
fc5422bf580a4608e921b7d59caf7f0ea58a50fd
| 3,640,733
|
def read_manifest(path):
"""Read dictionary of workflows from the Packal manifest.xml file."""
workflows = {}
tree = ET.parse(path)
root = tree.getroot()
for workflow in root:
data = {"packal": True}
for child in workflow:
if child.tag == "short":
data["description"] = child.text.strip()
else:
data[child.tag] = child.text.strip() if child.text else None
# print(child.tag, ':', child.text)
data["author_url"] = packal_user_url(data["author"])
if "bundle" in data:
workflows[data["bundle"]] = data
return workflows
|
8f91126f4a48c0b1af357487ffe791ba790c7745
| 3,640,734
|
def _load_v1_txt(path):
"""Parses a SIF V1 text file, returning numpy arrays.
Args:
path: string containing the path to the ASCII file.
Returns:
A tuple of 4 elements:
constants: A numpy array of shape (element_count). The constant
associated with each SIF element.
centers: A numpy array of shape (element_count, 3). The centers of the
SIF elements.
radii: A numpy array of shape (element_count, 3). The axis-aligned
radii of the gaussian falloffs.
rotations: A numpy array of shape (element_count, 3). The euler-angle
rotations of the SIF elements.
symmetry_count: An integer. The number of elements which are left-right
symmetric.
features: A numpy array of shape (element_count, implicit_len). The LDIF
neural features, if they are present.
"""
lines = file_util.readlines(path)
if lines[0] != 'SIF':
raise ValueError(f'Could not parse {path} as a sif txt. First line was {lines[0]}')
shape_count, version, implicit_len = [int(x) for x in lines[1].split(' ')]
version += 1
if version != 1:
raise ValueError(f'This function can only parse v1 files. This version: {version}.')
symmetry_count = 0
last_was_symmetric = True
constants = []
centers = []
radii = []
rotations = []
features = []
for row in lines[2:]:
elts = row.split(' ')
if len(elts) != 11 + implicit_len:
raise ValueError('Failed to parse the following row with '
f'implicit_len {implicit_len}: {row}')
explicit_params = np.array([float(x) for x in elts[:10]], dtype=np.float32)
is_symmetric = bool(int(elts[10]))
if is_symmetric:
symmetry_count += 1
if not last_was_symmetric:
raise ValueError(f'File not supported by parser: row {row} is '
'symmetric but follows an asymmetric element.')
constants.append(explicit_params[0])
centers.append(explicit_params[1:4])
radii.append(explicit_params[4:7])
rotations.append(explicit_params[7:10])
if implicit_len > 0:
implicit_params = np.array([float(x) for x in elts[11:]], dtype=np.float32)
features.append(implicit_params)
constants = np.stack(constants)
centers = np.stack(centers)
radii = np.stack(radii)
rotations = np.stack(rotations)
features = np.stack(features) if features else None
# Radii have their sqrt stored for GAPS:
radii = radii * radii
return constants, centers, radii, rotations, symmetry_count, features
|
9f7ea3f0059ef3688cc962e9836a558debebf80f
| 3,640,735
|
def split_model_name(model):
"""
Split model names by _
Takes into account packages with _ and processor types with _
"""
model = model[:-3].replace('.', '_')
# sort by key length so that nertagger is checked before tagger, for example
for processor in sorted(ending_to_processor.keys(), key=lambda x: -len(x)):
if model.endswith(processor):
model = model[:-(len(processor)+1)]
processor = ending_to_processor[processor]
break
else:
raise AssertionError(f"Could not find a processor type in {model}")
lang, package = model.split('_', 1)
return lang, package, processor
|
305a70899eb8eb3c5beca4c7e7403010a008a80d
| 3,640,736
|
from .divine1983 import JupiterD4Field
from .distributions import DG83Distribution
from .integrate import FormalRTIntegrator
from .synchrotron import NeuroSynchrotronCalculator
def dg83_setup(
ghz = 95,
lat_of_cen = 10,
cml = 20,
n_alpha = 10,
n_E = 10,
E0 = 0.1,
E1 = 10.,
nn_dir = None,
no_synch = False,
):
"""Create and return a VanAllenSetup object prepared to use the Divine &
Garrett 1983 model of Jupiter's magnetic field and plasma.
ghz
The observing frequency, in GHz.
lat_of_cen
The body's latitude-of-center, in degrees.
cml
The body's central meridian longitude, in degrees.
n_alpha
Number of pitch angles to sample when deriving p/k distribution parameters.
n_E
Number of energies to sample when deriving p/k distribution parameters.
E0
Low end of energy sampling regime, in MeV.
E1
High end of energy sampling regime, in MeV.
nn_dir
The directory with the neural-network data used to generate synchrotron
radiative transfer coefficients.
no_synch
If true, ignore `nn_dir` and do not load synchrotron computatation info.
Makes things faster if you just want to evaluate the DG83 model and not
actually do any radiative transfer.
"""
lat_of_cen *= astutil.D2R
cml *= astutil.D2R
o2b = ObserverToBodycentric(lat_of_cen, cml)
bfield = JupiterD4Field()
distrib = DG83Distribution()
distrib.n_alpha = n_alpha
distrib.n_E = n_E
distrib.E0 = E0
distrib.E1 = E1
ray_tracer = FormalRayTracer()
ray_tracer.ne0_cutoff = 1e-6
rad_trans = FormalRTIntegrator()
if no_synch:
synch_calc = None
else:
synch_calc = NeuroSynchrotronCalculator(nn_dir=nn_dir)
return VanAllenSetup(o2b, bfield, distrib, ray_tracer, synch_calc,
rad_trans, cgs.rjup, ghz * 1e9)
|
94aea682df900d600e922ff560109255e2b69ac7
| 3,640,737
|
def compute() -> int:
"""
Returns the sum of all numbers whose
sum of the factorials of all digits
add up to the number itself.
>>> compute()
40730
"""
return sum(
num
for num in range(3, 7 * factorial(9) + 1)
if sum_of_digit_factorial(num) == num
)
|
c2460158eb7d32142b4f59801bdc307a0ba1d4ff
| 3,640,738
|
import heapq
def dijkstra(matrix, start=None, end=None):
"""
Implementation of Dijkstra algorithm to find the (s,t)-shortest path between top-left and bottom-right nodes
on a nxn grid graph (with 8-neighbourhood).
NOTE: This is an vertex variant of the problem, i.e. nodes carry weights, not edges.
:param matrix (np.ndarray [grid_dim, grid_dim]): Matrix of node-costs.
:return: matrix (np.ndarray [grid_dim, grid_dim]), indicator matrix of nodes on the shortest path.
"""
if start is None:
start = (0, 0)
def neighbors_func(pos):
pos = np.array(pos)
neighbors = get_neighbor_pattern(dim=2)
for off in neighbors:
new_pos = pos+off
if np.all(new_pos > 0) and np.all(new_pos < matrix.shape):
yield new_pos
costs = np.full_like(matrix, 1.0e10)
costs[start] = matrix[start]
priority_queue = [(matrix[0][0], start)]
certain = set()
transitions = dict()
while priority_queue:
_, (cur_x, cur_y) = heapq.heappop(priority_queue)
if (cur_x, cur_y) in certain:
pass
for x, y in neighbors_func(cur_x, cur_y):
if (x, y) not in certain:
if matrix[x][y] + costs[cur_x][cur_y] < costs[x][y]:
costs[x][y] = matrix[x][y] + costs[cur_x][cur_y]
heapq.heappush(priority_queue, (costs[x][y], (x, y)))
transitions[(x, y)] = (cur_x, cur_y)
certain.add((cur_x, cur_y))
if end is None:
return transitions
# retrieve the path
cur_x, cur_y = end
on_path = np.zeros_like(matrix)
on_path[-1][-1] = 1
while (cur_x, cur_y) != start:
cur_x, cur_y = transitions[(cur_x, cur_y)]
on_path[cur_x, cur_y] = 1.0
return on_path
|
96338e6c65e1ff88025971361e2b36c0f1efe2af
| 3,640,739
|
def is_finally_visible_segm(*args):
"""is_finally_visible_segm(segment_t s) -> bool"""
return _idaapi.is_finally_visible_segm(*args)
|
9050bd583208824859e71e84f02169237b3ac9f2
| 3,640,740
|
def get_undisbursed_principal(loan):
"""Gets undisbursed principal"""
principal = frappe.get_value("Microfinance Loan", loan, "loan_principal")
if not principal:
raise frappe.DoesNotExistError("Loan: {} not found".format(loan))
return principal - get_disbursed(loan)
|
7829b93eb1e6298e8640290c94b2b2aacb0de8bd
| 3,640,742
|
def northing_and_easting(dictionary):
"""
Retrieve and return the northing and easting strings to be used as
dictionary keys
Parameters
----------
dictionary : dict
Returns
-------
northing, easting : tuple
"""
if not 'x' and 'y' in dictionary.keys():
northing = 'latitude'
easting = 'longitude'
else:
northing = 'x'
easting = 'y'
return northing, easting
|
2f41d8b681d27f6ef29265c1945591ea18bba79f
| 3,640,743
|
import math
def affine(p, scale, theta, offset):
""" Scale, rotate and translate point """
return arcpy.Point((p.X * math.cos(theta) - p.Y * math.sin(theta)) * scale.X + offset.X,
(p.X * math.sin(theta) + p.Y * math.cos(theta)) * scale.Y + offset.Y)
|
2d1cd34ed94ee0c4e7ecbb786510c0165b9fca9d
| 3,640,746
|
def GetMarkedPos(slot):
"""
Get marked position
@param slot: slot number: 1..1024 if the specifed value is <= 0
range, IDA will ask the user to select slot.
@return: BADADDR - the slot doesn't contain a marked address
otherwise returns the marked address
"""
curloc = idaapi.curloc()
intp = idaapi.int_pointer()
intp.assign(slot)
return curloc.markedpos(intp)
|
2c6fc7bac4a389c0cafd119fbef537e135b7f745
| 3,640,747
|
def elslib_CylinderParameters(*args):
"""
* parametrization P (U, V) = Location + V * ZDirection + Radius * (Cos(U) * XDirection + Sin (U) * YDirection)
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param U:
:type U: float &
:param V:
:type V: float &
:rtype: void
"""
return _ElSLib.elslib_CylinderParameters(*args)
|
5fa697d09866747be2ef98b1b913b7aeb59fcf79
| 3,640,748
|
def totaled_no_review_url(cc, sql_time_specification): # pragma: no cover
"""Counts the number of commits with no review url in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
count(int): a count of all commits with no review_url
results(list): a list of lists with all tbr'ed commits with no lgtm in the
format [rietveld_url, git_timestamp, git_subject, git_hash]
"""
cc.execute("""SELECT git_commit.review_url, git_commit.timestamp,
git_commit.subject, git_commit.hash
FROM git_commit
WHERE git_commit.review_url = ''
AND %s""" % sql_time_specification)
result = cc.fetchall()
count = len(result)
formatted_data = []
for data in result:
subject = data[2]
formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"),
subject.replace('-', ' '), data[3]])
results = sorted(formatted_data, key=lambda x: x[1], reverse=True)
return count, results
|
027f49b13316ecb36eed3e7dde880848b261e3b4
| 3,640,749
|
import warnings
def is_sat(formula, solver_name=None, logic=None, portfolio=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used
:type solver_name: string
:param logic: Specify the logic that is going to be used
:param portfolio: A list of solver names to perform portfolio solving.
:type portfolio: An iterable of solver names
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic,
portfolio=portfolio)
|
9121747de68aa531c7c7e9c9f683cd1f1518e54b
| 3,640,750
|
import math
def bounds(*tile):
"""Returns the bounding box of a tile
Parameters
----------
tile : Tile or tuple
May be be either an instance of Tile or 3 ints (X, Y, Z).
Returns
-------
LngLatBbox
"""
tile = _parse_tile_arg(*tile)
xtile, ytile, zoom = tile
Z2 = math.pow(2, zoom)
ul_lon_deg = xtile / Z2 * 360.0 - 180.0
ul_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / Z2)))
ul_lat_deg = math.degrees(ul_lat_rad)
lr_lon_deg = (xtile + 1) / Z2 * 360.0 - 180.0
lr_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * (ytile + 1) / Z2)))
lr_lat_deg = math.degrees(lr_lat_rad)
return LngLatBbox(ul_lon_deg, lr_lat_deg, lr_lon_deg, ul_lat_deg)
|
ed2eb5865d21033029ddfcdd133663c9d222687d
| 3,640,751
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.