content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def interpret_go_point(s, size):
"""Convert a raw SGF Go Point, Move, or Stone value to coordinates.
s -- 8-bit string
size -- board size (int)
Returns a pair (row, col), or None for a pass.
Raises ValueError if the string is malformed or the coordinates are out of
range.
Only supports board sizes up to 26.
The returned coordinates are in the GTP coordinate system (as in the rest
of gomill), where (0, 0) is the lower left.
"""
if s == b"" or (s == b"tt" and size <= 19):
return None
# May propagate ValueError
col_s, row_s = s
col = _bytestring_ord(col_s) - 97 # 97 == ord("a")
row = size - _bytestring_ord(row_s) + 96
if not ((0 <= col < size) and (0 <= row < size)):
raise ValueError
return row, col
|
6b15b141e9fe5fc4195133f24925672522cdcb35
| 3,638,218
|
def get_domain_name_for(host_string):
"""
Replaces namespace:serviceName syntax with serviceName.namespace one,
appending default as namespace if None exists
"""
return ".".join(
reversed(
("%s%s" % (("" if ":" in host_string else "default:"), host_string)).split(
":"
)
)
)
|
6084e299f31d9c2eb922783d0488e9672051443f
| 3,638,219
|
def bbox_classify(bboxes, possible_k):
"""bbox: x, y, w, h
return: best kmeans score anchor classes [(w1, h1), (w2, h2), ...]
"""
anchors = [bbox[2:4] for bbox in bboxes]
return anchors_classify(anchors, possible_k)
|
5387c1441c94f4af0633b9cf73b0e5e53ce1bc9b
| 3,638,220
|
def cleanFAAText(origText):
"""Take FAA text message and trim whitespace from end.
FAA text messages have all sorts of trailing whitespace
issues. We split the message into lines and remove all
right trailing whitespace. We then recombine them into
a uniform version with no trailing whitespace.
The final line will not have a newline character at the
end.
Args:
origText (str): Message text as it comes from the FAA.
Returns:
str: Cleaned up text as described above.
"""
lines = origText.split('\n')
numLines = len(lines)
# Remove empty line at end if present
if lines[-1] == '':
numLines -= 1
for i in range(0, numLines):
lines[i] = lines[i].rstrip()
newText = '\n'.join(lines).rstrip()
return newText
|
ea9882e24c60acaa35cae97f8e95acb48f5fd2a6
| 3,638,221
|
def LoadModel(gd_file, ckpt_file):
"""Load the model from GraphDef and Checkpoint.
Args: gd_file: GraphDef proto text file. ckpt_file: TensorFlow Checkpoint file.
Returns: TensorFlow session and tensors dict."""
with tf.Graph().as_default():
#class FastGFile: File I/O wrappers without thread locking.
with tf.gfile.FastGFile(gd_file, 'r') as f:
# Py 2: s = f.read().decode()
s = f.read()
# Serialized version of Graph
gd = tf.GraphDef()
# Merges an ASCII representation of a protocol message into a message.
text_format.Merge(s, gd)
tf.logging.info('Recovering Graph %s', gd_file)
t = {}
[t['states_init'], t['lstm/lstm_0/control_dependency'],
t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'],
t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'],
t['targets_in'], t['target_weights_in'], t['char_inputs_in'],
t['all_embs'], t['softmax_weights'], t['global_step']
] = tf.import_graph_def(gd, {}, ['states_init',
'lstm/lstm_0/control_dependency:0',
'lstm/lstm_1/control_dependency:0',
'softmax_out:0',
'class_ids_out:0',
'class_weights_out:0',
'log_perplexity_out:0',
'inputs_in:0',
'targets_in:0',
'target_weights_in:0',
'char_inputs_in:0',
'all_embs_out:0',
'Reshape_3:0',
'global_step:0'], name='')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run('save/restore_all', {'save/Const:0': ckpt_file})
sess.run(t['states_init'])
return sess, t
|
08089910da145141df8446c1aab9d697b15a3aa6
| 3,638,222
|
from bs4 import BeautifulSoup
import re
def get_additional_rent(offer_markup):
""" Searches for additional rental costs
:param offer_markup:
:type offer_markup: str
:return: Additional rent
:rtype: int
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
table = html_parser.find_all(class_="item")
for element in table:
if "Czynsz" in element.text:
return int(("".join(re.findall(r'\d+', element.text))))
return
|
8836beda16e21fe214344d647de9260195afa6a7
| 3,638,223
|
def make_known_disease_variants_filter(sample_ids_list=None):
""" Function for retrieving known disease variants by presence in Clinvar and Cosmic."""
result = {
"$or":
[
{
"$and":
[
{"clinvar.rcv.accession": {"$exists": True}},
{"clinvar.rcv.clinical_significance": {"$nin": ["Benign", "Likely benign"]}}
]
},
{"cosmic.cosmic_id": {"$exists": True}}
]
}
if sample_ids_list is not None:
result = _append_sample_id_constraint_if_needed([result], sample_ids_list)
return result
|
288e5a0daa254016f9c1e1ee8e3106ea532008ec
| 3,638,224
|
import multiprocessing
def sharedArray(dtype, dims):
"""Create a shared numpy array."""
mpArray = multiprocessing.Array(dtype, int(np.prod(dims)), lock=False)
return np.frombuffer(mpArray, dtype=dtype).reshape(dims)
|
e01b20f0f21386dd2ec8e1952547fbc9fc15cb65
| 3,638,225
|
def _read_hyperparameters(idx, hist):
"""Read hyperparameters as a dictionary from the specified history dataset."""
return hist.iloc[idx, 2:].to_dict()
|
b2a036a739ec3e45c61289655714d9b59b2f5490
| 3,638,226
|
def parse_time(date_time, time_zone):
"""Returns the seconds between now and the scheduled time."""
now = pendulum.now(time_zone)
update = pendulum.parse(date_time, tz=time_zone)
# If a time zone is not specified, it will be set to local.
# When passing only time information the date will default to today.
# The time will be set to 00:00:00 if it's not specified.
# A future date is needed.
secs = update - now
if secs.seconds < 0:
raise ScheduleError(ScheduleError.pastDateError)
return secs.seconds
|
5ca2f5dad85e3492bd9909808990aaef0587343a
| 3,638,229
|
def upper_bounds_max_ppr_target(adj, alpha, fragile, local_budget, target):
"""
Computes the upper bound for x_target for any teleport vector.
Parameters
----------
adj : sp.spmatrix, shape [n, n]
Sparse adjacency matrix.
alpha : float
(1-alpha) teleport[v] is the probability to teleport to node v.
fragile : np.ndarray, shape [?, 2]
Fragile edges that are under our control.
local_budget : np.ndarray, shape [n]
Maximum number of local flips per node.
target : int
Target node.
Returns
-------
upper_bounds: np.ndarray, shape [n]
Computed upper bounds.
"""
n = adj.shape[0]
z = np.zeros(n)
z[target] = 1
opt_fragile, _ = policy_iteration(adj=adj, alpha=alpha, fragile=fragile, local_budget=local_budget,
reward=z, teleport=z)
adj_flipped = flip_edges(adj, opt_fragile)
# gets one column from the PPR matrix
# corresponds to the PageRank score value of target for any teleport vector (any row)
pre_inv = sp.eye(n) - alpha * sp.diags(1 / adj_flipped.sum(1).A1) @ adj_flipped
ppr = (1 - alpha) * gmres(pre_inv, z)[0]
correction = correction_term(adj, opt_fragile, fragile)
upper_bounds = ppr / correction
return upper_bounds
|
5bab951605ad5181e2fb696836219167dd78a30e
| 3,638,231
|
def cramers_corrected_stat(contingency_table):
"""
Computes corrected Cramer's V statistic for categorial-categorial association
"""
try:
chi2 = chi2_contingency(contingency_table)[0]
except ValueError:
return np.NaN
n = contingency_table.sum().sum()
phi2 = chi2/n
r, k = contingency_table.shape
r_corrected = r - (((r-1)**2)/(n-1))
k_corrected = k - (((k-1)**2)/(n-1))
phi2_corrected = max(0, phi2 - ((k-1)*(r-1))/(n-1))
return (phi2_corrected / min( (k_corrected-1), (r_corrected-1)))**0.5
|
89581fbcc306afdf34dac8cb30d3e7b316a47f48
| 3,638,233
|
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
|
525ea19b78cb2a360165085720d42df58aa72500
| 3,638,234
|
def collection_tail(path_string):
"""Walk the path, return the tail collection"""
# pylint: disable=consider-using-enumerate
coll = None
parts = extract_path(path_string)
if parts:
try:
last_i = len(parts) - 1
coll = bpy.data.collections[parts[0]]
for i in range(1, len(parts)):
if i != last_i or \
is_path_terminated(path_string) or \
coll.children.get(parts[i]):
coll = coll.children[parts[i]] # Collection
else:
break # Blender Object
except KeyError:
return None
return coll
|
9a9d4e594c654b35f15870d33bc24314f1c48e5c
| 3,638,236
|
from pyadlml.dataset.devices import most_prominent_categorical_values
def create_raw(df_dev, most_likely_values=None):
"""
return df:
| time | dev_1 | .... | dev_n |
--------------------------------
| ts1 | 1 | .... | 0 |
"""
df_dev = df_dev.copy()
df = df_dev.pivot(index=TIME, columns=DEVICE, values=VAL)
df = df.reset_index()
dev_dtypes = _infer_types(df)
dev_cat = dev_dtypes['categorical']
dev_bool = dev_dtypes['boolean']
dev_num = dev_dtypes['numerical']
# set the first element for each boolean device to the opposite value of the
# first occurrence
for dev in dev_bool:
fvi = df[dev].first_valid_index()
if fvi != 0:
value = df[dev].iloc[fvi]
df.loc[0, dev] = not value
# set the first element of each categorical device to the most likely value
if len(dev_cat) != 0:
if most_likely_values is None:
tmp = df_dev[df_dev[DEVICE].isin(dev_cat)]
most_likely_values = most_prominent_categorical_values(tmp)
mlv = most_likely_values.set_index(DEVICE)
for dev in dev_cat:
new_val = mlv.loc[dev]['ml_state']
df.loc[0,dev] = new_val
df_num = df[dev_num]
df_cat_bool = df[dev_bool + dev_cat]
# fill from start to end NaNs with the preceeding correct value
df_cat_bool = df_cat_bool.ffill()
df = pd.concat([df[TIME], df_num, df_cat_bool], axis=1)
return df
|
e6659e70bf91876a3cbfcc98aaa71e4e97837a7f
| 3,638,237
|
def p1_marker_loc(p1_input, board_list, player1):
"""Take the location of the marker for Player 1."""
# verify if the input is not in range or in range but in a already taken spot
while p1_input not in range(1, 10) or (
p1_input in range(1, 10) and board_list[p1_input] != " "
):
try:
p1_input = int(
input("Player 1: Where would you like to place the marker (1 - 9)? ")
)
# if a marker is already placed on that board location, display a message
# warning player 1 and ask for their input again
if board_list[p1_input] != " ":
print(
"There is already a marker there, please choose another location."
)
input("Press Enter to continue. ")
print()
# input the player for another location for the marker
continue
except ValueError:
print("This is not a number, please try again!")
print()
print(f"Player 1 is placing {player1} in position {p1_input}.")
# return the variable to reassign it locally on the game_logic() function
return p1_input
|
ea8cfd35e56d7e34efa7319667f1a655b597cf39
| 3,638,238
|
def chord(tones, dur, phrasing="", articulation="", ornamentation="", dynamics="", markup="", markdown="", prefix="", suffix=""):
""" Returns a list containing a single Point that prints as a chord with the specified tones and duration. """
tones = flatten([tonify(tones)])
return [Point(tones, dur, phrasing, articulation, ornamentation, dynamics, markup, markdown, prefix, suffix)]
|
b6fc7ba5c7e8541eeea540a869b1697c15c5ea47
| 3,638,239
|
def build_gem_graph():
"""Builds a gem graph, F4,1.
Ref: http://mathworld.wolfram.com/GemGraph.html"""
graph = build_5_cycle_graph()
graph.new_edge(1, 3)
graph.new_edge(1, 4)
return graph
|
4979ae5643ca44d6fb5eadd4fff18489fd3b5629
| 3,638,240
|
import select
async def get_forecasts_by_user_year_epic(
user_id, epic_id, year, month, session: Session = Depends(get_session)
):
"""Get forecast by user, epic, year, month"""
statement = (
select(Forecast.id, Forecast.month, Forecast.year, Forecast.days)
.where(Forecast.user_id == user_id)
.where(Forecast.epic_id == epic_id)
.where(Forecast.year == year)
.where(Forecast.month == month)
)
results = session.exec(statement).all()
return results
|
655863588ece0800d220386282d620d7296fc8a2
| 3,638,241
|
def face_xyz_to_uv(face, p):
"""(face, XYZ) to UV
see :cpp:func:`S2::FaceXYZtoUV`
"""
if face < 3:
if p[face] <= 0:
return False, 0, 0
else:
if p[face - 3] >= 0:
return False, 0, 0
u, v = valid_face_xyz_to_uv(face, p)
return True, u, v
|
3483f918ed511c8fdf3c43e147c6cc605633754b
| 3,638,242
|
import re
def cleanupString(string, replacewith="_", regex="([^A-Za-z0-9])"):
"""Remove all non-numeric or alphanumeric characters"""
# Please don't use the logging system here. The logging system
# needs this method, using the logging system here would
# introduce a circular dependency. Be careful not to call other
# functions that use the logging system.
return re.sub(regex, replacewith, string)
|
b327879a345a4236b871f824937997f6bd43d55b
| 3,638,243
|
import socket
import json
def send_message(data, header_size=8):
"""Send data over socket."""
@_retry()
def _connect(socket_path):
"""Connect socket."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(socket_path)
sock.settimeout(SOCKET_TIMEOUT)
return sock
def _check_response(response):
if response is None:
raise RuntimeError(
"No response received when sending message: {}.".format(data)
)
if "type_data" not in response:
raise ValueError(
"Response {} does not contain key 'type_data'.".format(response)
)
return response["type_data"] == "OK"
try:
sock = _connect(str(COMMUNICATOR_SOCKET))
message = json.dumps(data).encode()
message_length = len(message).to_bytes(header_size, byteorder="big")
sock.sendall(message_length)
sock.sendall(message)
response = _receive_data(sock)
if not _check_response(response):
logger.error("Error in respone to %s: %s.", data, response)
raise RuntimeError("Wrong response received, terminating processing.")
finally:
sock.close()
|
bbe9c11e5b29ac2b0f0d2d9dd357f806a682156b
| 3,638,244
|
from typing import Optional
from typing import Set
from typing import Literal
from typing import Any
def _get_mapping_keys_in_condition(
condition: Expression, column_name: str
) -> Optional[Set[str]]:
"""
Finds the top level conditions that include filter based on the arrayJoin.
This is meant to be used to find the keys the query is filtering the arrayJoin
on.
We can only apply the arrayFilter optimization to arrayJoin conditions
that are not in OR with other columns. To simplify the problem, we only
consider those conditions that are included in the first level of the query:
[['tagskey' '=' 'a'],['col' '=' 'b'],['col2' '=' 'c']] works
[[['tagskey' '=' 'a'], ['col2' '=' 'b']], ['tagskey' '=' 'c']] does not
If we encounter an OR condition we return None, which means we cannot
safely apply the optimization. Empty set means we did not find any
suitable arrayJoin for optimization in this condition but that does
not disqualify the whole query in the way the OR condition does.
"""
keys_found = set()
conditions = get_first_level_and_conditions(condition)
for c in conditions:
if is_binary_condition(c, BooleanFunctions.OR):
return None
match = FunctionCall(
String(ConditionFunctions.EQ),
(array_join_pattern(column_name), Literal(Param("key", Any(str)))),
).match(c)
if match is not None:
keys_found.add(match.string("key"))
match = is_in_condition_pattern(array_join_pattern(column_name)).match(c)
if match is not None:
function = match.expression("tuple")
assert isinstance(function, FunctionCallExpr)
keys_found |= {
lit.value
for lit in function.parameters
if isinstance(lit, LiteralExpr) and isinstance(lit.value, str)
}
return keys_found
|
7d890e4b68aeba9caca30e5a140214072c781a66
| 3,638,245
|
import requests
from bs4 import BeautifulSoup
def make_request(method, url, **kwargs):
"""Make HTTP request, raising an exception if it fails.
"""
request_func = getattr(requests, method)
response = request_func(url, **kwargs)
# raise an exception if request is not successful
if not response.status_code == requests.codes.ok:
response.raise_for_status()
return BeautifulSoup(response.text)
|
1f47b178b66efe31fd78a4affc76a87d5be428bc
| 3,638,246
|
def hold(source):
"""Place the active call on the source phone on hold"""
print("Holding call on {0}".format(source.Name))
return operation(source,'Hold')
|
297cef77a3630bf3b9cab6256547ec43a5ba797c
| 3,638,247
|
import math
def make_grid(batch, grid_height=None, zoom=1, old_buffer=None, border_size=1):
"""Creates a grid out an image batch.
Args:
batch: numpy array of shape [batch_size, height, width, n_channels]. The
data can either be float in [0, 1] or int in [0, 255]. If the data has
only 1 channel it will be converted to a grey 3 channel image.
grid_height: optional int, number of rows to have. If not given, it is
set so that the output is a square. If -1, then tiling will only be
vertical.
zoom: optional int, how much to zoom the input. Default is no zoom.
old_buffer: Buffer to write grid into if possible. If not set, or if shape
doesn't match, we create a new buffer.
border_size: int specifying the white spacing between the images.
Returns:
A numpy array corresponding to the full grid, with 3 channels and values
in the [0, 255] range.
Raises:
ValueError: if the n_channels is not one of [1, 3].
"""
batch_size, height, width, n_channels = batch.shape
if grid_height is None:
n = int(math.ceil(math.sqrt(batch_size)))
grid_height = n
grid_width = n
elif grid_height == -1:
grid_height = batch_size
grid_width = 1
else:
grid_width = int(math.ceil(batch_size/grid_height))
if n_channels == 1:
batch = np.tile(batch, (1, 1, 1, 3))
n_channels = 3
if n_channels != 3:
raise ValueError('Image batch must have either 1 or 3 channels, but '
'was {}'.format(n_channels))
# We create the numpy buffer if we don't have an old buffer or if the size has
# changed.
shape = (height * grid_height + border_size * (grid_height - 1),
width * grid_width + border_size * (grid_width - 1),
n_channels)
if old_buffer is not None and old_buffer.shape == shape:
buf = old_buffer
else:
buf = np.full(shape, 255, dtype=np.uint8)
multiplier = 1 if np.issubdtype(batch.dtype, np.integer) else 255
for k in range(batch_size):
i = k // grid_width
j = k % grid_width
arr = batch[k]
x, y = i * (height + border_size), j * (width + border_size)
buf[x:x + height, y:y + width, :] = np.clip(multiplier * arr,
0, 255).astype(np.uint8)
if zoom > 1:
buf = buf.repeat(zoom, axis=0).repeat(zoom, axis=1)
return buf
|
72bbcebd121b13bce31d760b9d8890966155b603
| 3,638,248
|
def _get_patterns_map(resolver, default_args=None):
"""
Cribbed from http://www.djangosnippets.org/snippets/1153/
Recursively generates a map of
(pattern name or path to view function) -> (view function, default args)
"""
patterns_map = {}
if default_args is None:
default_args = {}
for pattern in resolver.url_patterns:
pattern_args = default_args.copy()
if isinstance(pattern, RegexURLResolver):
pattern_args.update(pattern.default_kwargs)
patterns_map.update(_get_patterns_map(pattern, pattern_args))
else:
pattern_args.update(pattern.default_args)
if pattern.name is not None:
patterns_map[pattern.name] = (pattern.callback, pattern_args)
# HACK: Accessing private attribute of RegexURLPattern
callback_str = getattr(pattern, '_callback_str', None)
if callback_str is not None:
patterns_map[pattern._callback_str] = (pattern.callback, pattern_args)
return patterns_map
|
21f149773457b075ba984b028d2c44ac41f09a6a
| 3,638,249
|
def encoder_package_to_options(encoder_package, post_url=None,
extra_numerics=None,
extra_categoricals=None,
omitted_fields=None):
"""
:param encoder_package: one hot encoder package
:param post_url: url to send form data to on submission
default is ''
for testing purposes, you may use PUBLIC and it will use
"http://httpbin.org/post" which prints the result
this is not secure so don't do that with sensitive data
:return:
"""
extra_numerics, extra_categoricals, omitted_fields = process_extras(extra_numerics,
extra_categoricals,
omitted_fields)
if post_url is None:
post_url = ''
if post_url == 'PUBLIC':
post_url = "http://httpbin.org/post"
fields = {}
numeric_cols = encoder_package['numeric_cols'] + list(extra_numerics.keys())
for field in numeric_cols:
if field in omitted_fields:
continue
fields[field] = {
"size": 20
}
encoder_dicts = encoder_package['one_hot_encoder_dicts']
for field, value_dicts in encoder_dicts.items():
if field in omitted_fields:
continue
values = sorted(value_dicts.items(), key=lambda x: x[1])
levels = [v[0] for v in values]
n_levels = len(levels)
levels = levels + [unknown_level_value]
if n_levels < LEVELS_MAX_FOR_DROP_DOWN:
fields[field] = {
"type": "select",
"optionLabels": levels,
"sort": False}
else:
fields[field] = {"size": 20}
for field, levels in extra_categoricals.items():
fields[field] = {
"type": "select",
"optionLabels": levels,
"sort": False
}
options = {
"form": {
"attributes": {
"action": post_url,
"method": "post"
},
"buttons": {
"submit": {}
}
},
"helper": "Hit submit to update the prediction",
"fields": fields}
return options
|
1286aefef87b547d7a09db8fec3b50f7082e64f8
| 3,638,250
|
def get_subset_values(request, pk):
"""Return the numerical values of a subset as a formatted list."""
values = models.NumericalValue.objects.filter(
datapoint__subset__pk=pk).select_related(
'error').select_related('upperbound').order_by(
'qualifier', 'datapoint__pk')
total_len = len(values)
y_len = total_len
# With both x- and y-values, the y-values make up half the list.
if values.last().qualifier == models.NumericalValue.SECONDARY:
y_len = int(y_len/2)
response = []
for i in range(y_len):
response.append({'y': values[i].formatted()})
for i in range(y_len, total_len):
response[i-y_len]['x'] = values[i].formatted()
return JsonResponse(response, safe=False)
|
1bc34a534a56a7f75742f455aad5575224ce976f
| 3,638,251
|
import time
def timestamp(format_key: str) -> str:
"""
格式化时间
:Args:
- format_key: 转化格式方式, STR TYPE.
:Usage:
timestamp('format_day')
"""
format_time = {
'default':
{
'format_day': '%Y-%m-%d',
'format_now': '%Y-%m-%d-%H_%M_%S',
'unix_now': '%Y-%m-%d %H:%M:%S',
}
}
return time.strftime(format_time['default'][format_key], time.localtime(time.time()))
|
dab77afb630193d45fbc5b07c08fd82c3dfa3050
| 3,638,252
|
def _save_conn_form(
request: HttpRequest,
form: SQLConnectionForm,
template_name: str,
) -> JsonResponse:
"""Save the connection provided in the form.
:param request: HTTP request
:param form: form object with the collected information
:param template_name: To render the response
:return: AJAX response
"""
# Type of event to record
if form.instance.id:
event_type = Log.SQL_CONNECTION_EDIT
is_add = False
else:
event_type = Log.SQL_CONNECTION_CREATE
is_add = True
# If it is a POST and it is correct
if request.method == 'POST' and form.is_valid():
if not form.has_changed():
return JsonResponse({'html_redirect': None})
conn = form.save()
# Log the event
Log.objects.register(
request.user,
event_type,
None,
{
'name': conn.name,
'description': conn.description_text,
'conn_type': conn.conn_type,
'conn_driver': conn.conn_driver,
'db_user': conn.db_user,
'db_passwd': _('<PROTECTED>') if conn.db_password else '',
'db_host': conn.db_host,
'db_port': conn.db_port,
'db_name': conn.db_name,
'db_table': conn.db_table,
},
)
return JsonResponse({'html_redirect': ''})
# Request is a GET
return JsonResponse({
'html_form': render_to_string(
template_name,
{
'form': form,
'id': form.instance.id,
'add': is_add},
request=request,
),
})
|
ee2639e1ab354b6ca722e35167bf6ab7cc57b351
| 3,638,253
|
def client() -> GivEnergyClient:
"""Supply a client with a mocked modbus client."""
# side_effects = [{1: 2, 3: 4}, {5: 6, 7: 8}, {9: 10, 11: 12}, {13: 14, 15: 16}, {17: 18, 19: 20}]
return GivEnergyClient(host='foo')
|
9d419927ebcb5a39df27e92e3a378cd5448acf1e
| 3,638,254
|
def test_bus(test_system):
"""Create the test system."""
test_system.run_load_flow()
return test_system.buses["bus3"]
|
fea4880446059171dae5d6fffc24bdc98eede5cd
| 3,638,255
|
def mask_target(y_true, bbox_true, mask_true, mask_regress, proposal, assign = cls_assign, sampling_count = 256, positive_ratio = 0.25, mean = [0., 0., 0., 0.], std = [0.1, 0.1, 0.2, 0.2], method = "bilinear"):
"""
y_true = label #(padded_num_true, 1 or num_class)
bbox_true = [[x1, y1, x2, y2], ...] #(padded_num_true, bbox)
mask_true = mask #(padded_num_true, h, w)
mask_regress = mask regress #(num_proposals, h, w, num_class)
proposal = [[x1, y1, x2, y2], ...] #(num_proposals, bbox)
mask_true = targeted mask true #(sampling_count, h, w)
mask_pred = targeted mask regress #(sampling_count, h, w)
"""
if tf.keras.backend.ndim(mask_true) == 3:
mask_true = tf.expand_dims(mask_true, axis = -1)
pred_count = tf.shape(proposal)[0]
valid_true_indices = tf.where(tf.reduce_max(tf.cast(0 < bbox_true, tf.int32), axis = -1))
y_true = tf.gather_nd(y_true, valid_true_indices)
bbox_true = tf.gather_nd(bbox_true, valid_true_indices)
valid_pred_indices = tf.where(tf.reduce_max(tf.cast(0 < proposal, tf.int32), axis = -1))
proposal = tf.gather_nd(proposal, valid_pred_indices)
mask_true = tf.gather_nd(mask_true, valid_true_indices)
mask_regress = tf.gather_nd(mask_regress, valid_pred_indices)
true_indices, positive_indices, negative_indices = assign(bbox_true, proposal)
if isinstance(sampling_count, int) and 0 < sampling_count:
positive_count = tf.cast(sampling_count * positive_ratio, tf.int32)
indices = tf.range(tf.shape(positive_indices)[0])
indices = tf.random.shuffle(indices)[:positive_count]
positive_indices = tf.gather(positive_indices, indices)
true_indices = tf.gather(true_indices, indices)
positive_count = tf.cast(tf.shape(positive_indices)[0], tf.float32)
negative_count = tf.cast(1 / positive_ratio * positive_count - positive_count, tf.int32)
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
else:
sampling_count = pred_count
pred_indices = tf.concat([positive_indices, negative_indices], axis = 0)
y_true = tf.gather(y_true, true_indices)
proposal = tf.gather(proposal, positive_indices)
mask_true = tf.gather(mask_true, true_indices)
mask_pred = tf.gather(mask_regress, positive_indices)
n_class = tf.shape(y_true)[-1]
if tf.keras.backend.int_shape(true_indices)[0] != 0:
label = tf.cond(tf.equal(n_class, 1), true_fn = lambda: y_true, false_fn = lambda: tf.expand_dims(tf.cast(tf.argmax(y_true, axis = -1), y_true.dtype), axis = -1))
indices = tf.stack([tf.range(tf.shape(label)[0]), tf.cast(label[:, 0], tf.int32)], axis = -1)
if mask_true is not None and mask_regress is not None:
x1, y1, x2, y2 = tf.split(proposal, 4, axis = -1)
mask_bbox = tf.concat([y1, x1, y2, x2], axis = -1)
mask_shape = tf.shape(mask_pred)
mask_true = tf.image.crop_and_resize(image = tf.cast(mask_true, mask_pred.dtype), boxes = mask_bbox, box_indices = tf.range(0, tf.cast(positive_count, tf.int32)), crop_size = mask_shape[1:3], method = method)
mask_true = mask_true[..., 0]
mask_true = tf.clip_by_value(tf.round(mask_true), 0., 1.)
mask_pred = tf.transpose(mask_pred, [0, 3, 1, 2])
mask_pred = tf.gather_nd(mask_pred, indices)
else:
mask_pred = mask_pred[..., 0]
mask_true = tf.zeros_like(mask_pred, dtype = mask_pred.dtype)
negative_count = tf.shape(negative_indices)[0]
pad_count = tf.maximum(sampling_count - tf.shape(pred_indices)[0], 0)
mask_true = tf.pad(mask_true, [[0, negative_count + pad_count], [0, 0], [0, 0]])
mask_pred = tf.pad(mask_pred, [[0, negative_count + pad_count], [0, 0], [0, 0]])
return mask_true, mask_pred
|
b161178716d890721a7f3cd0bfd61fdcc3efffb4
| 3,638,256
|
from rowgenerators.exceptions import DownloadError
def display_context(doc):
"""Create a Jinja context for display"""
# Make a naive dictionary conversion
context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'}
mandatory_sections = ['documentation', 'contacts']
# Remove section names
deletes = []
for k, v in context.items():
try:
del v['@value']
except KeyError:
pass # Doesn't have the value
except TypeError:
# Is actually completely empty, and has a scalar value. Delete and re-create
deletes.append(k)
if isinstance(v, str): # Shouldn't ever happen, but who knows ?
deletes.append(k)
for d in deletes:
try:
del context[d]
except KeyError:
# Fails in TravisCI, no idea why.
pass
for ms in mandatory_sections:
if ms not in context:
context[ms] = {}
# Load inline documentation
inline = ''
for d in context.get('documentation', {}).get('documentation', []):
try:
u = parse_app_url(d['url'])
except TypeError:
continue
if u.target_format == 'md': # The README.md file
inline = ''
if u.proto == 'file':
# File really ought to be relative
t = doc.package_url.join_target(u).get_resource().get_target()
else:
try:
t = u.get_resource().get_target()
except DownloadError as e:
raise e
try:
with open(t.fspath) as f:
inline += f.read()
except FileNotFoundError:
pass
del d['title'] # Will cause it to be ignored in next section
# Strip off the leading title, if it exists, because it will be re-applied
# by the templates
lines = inline.strip().splitlines()
if lines and lines[0].startswith('# '):
lines = lines[1:]
context['inline_doc'] = '\n'.join(lines)
# Convert doc section
doc_links = {}
images = {}
for term_name, terms in context['documentation'].items():
if term_name == 'note':
context['notes'] = terms
elif terms:
for i, term in enumerate(terms):
try:
if term_name == 'image':
images[term['title']] = term
else:
doc_links[term['title']] = term
except AttributeError: # A scalar
pass # There should not be any scalars in the documentation section
except KeyError:
pass # ignore entries without titles
except TypeError:
pass # Also probably a ascalar
context['doc_links'] = doc_links
context['images'] = images
del context['documentation']
#
# Update contacts
origin = None
for term_name, terms in context['contacts'].items():
if isinstance(terms, dict):
origin = terms # Origin is a scalar in roort, must be converted to sequence here
else:
for t in terms:
try:
t.update(process_contacts_html(t))
except AttributeError:
pass # Probably got a scalar
if origin:
origin.update(process_contacts_html(origin))
context['contacts']['origin'] = [origin]
# For resources and references, convert scalars into lists of dicts, which are the
# default for Datafiles and References.
for section in ('references', 'resources'):
if section not in context:
context[section] = {}
for term_key, term_vals in context[section].items():
if isinstance(term_vals, dict):
if '@value' in term_vals:
term_vals['url'] = term_vals['@value']
del term_vals['@value']
new_term_vals = [term_vals]
elif isinstance(term_vals, list):
new_term_vals = None
else:
new_term_vals = [{'url': term_vals, 'name': term_vals}]
if new_term_vals:
context[section][term_key] = new_term_vals
# Add in other properties to the resources
for term in context.get('resources', {}).get('datafile', []):
r = doc.resource(term['name'])
if r is not None:
term['isgeo'] = r.isgeo
context['distributions'] = {}
for dist in doc.find('Root.Distribution'):
context['distributions'][dist.type] = dist.value
if doc.find('Root.Giturl'):
context['distributions']['source'] = doc.get_value('Root.Giturl')
context['schema'] = {}
if 'Schema' in doc:
for t in doc['Schema'].find('Root.Table'):
context['schema'][t.name] = []
for c in t.find('Table.Column'):
context['schema'][t.name].append(c.as_dict())
return context
|
53d455448b37a1236e640a66436525fa9369e575
| 3,638,257
|
import torch
def _get_triplet_mask(labels: torch.Tensor) -> torch.BoolTensor:
"""Return a 3D mask where mask[a, p, n] is True if the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels (torch.Tensor): `Tensor` with shape [batch_size]
Returns:
torch.BoolTensor: `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices = torch.logical_not(torch.eye(labels.size(0)).bool()).to(labels.device)
i_not_equal_j = indices.unsqueeze(2)
i_not_equal_k = indices.unsqueeze(1)
j_not_equal_k = indices.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices
|
91e4e88507979bacde12c4c2dd9725b4d52e0e90
| 3,638,258
|
import re
def analyse_registration_output(output_string):
"""Parse the registration command output and return appropriate error"""
parse_error="ERROR:Unable to parse error message:" + output_string
success=0
fail=1
status_regex = re.compile("Status\s*:\s*(?P<status>[A-Z]+).*")
try:
status = status_regex.search(output_string).groupdict()['status']
except:
return fail, parse_error
if status == "FAILED":
return_exit = fail
code_regex = re.compile("Result Code\s*:\s*CLI_(?P<code>[0-9]).*")
try:
code = code_regex.search(output_string).groupdict()['code']
except:
return fail, parse_error
if code == '0':
message = "CLI_0: Authentication error"
elif code == '1':
message = "CLI_1: Error reading file references from the properties file"
elif code == '2':
message = "CLI_2: Invalid user input"
elif code == '3':
message = "CLI_3: No input files to process"
elif code == '4':
message = "CLI_4: Failed to process collection"
elif code == '5':
message = "CLI_5: Failed to process data file"
else:
message = "Unknown error"
elif status == "COMPLETED":
return_exit = success
message = "Successful registration"
else:
return_exit = fail
message = parse_error
return return_exit, message
|
e6e90b9a55a8631bcb1c0963c943b08df82f03f4
| 3,638,259
|
import random
def randomrandrange(x, y=None):
"""Method randomRandrange.
return a randomly selected element from
range(start, stop). This is equivalent to
choice(range(start, stop)),
but doesnt actually build a range object.
"""
if isinstance(y, NoneType):
return random.randrange(x) # nosec
else:
return random.randrange(x, y)
|
5c6304f20e6e1ddcfda931278defdc0c8867553f
| 3,638,260
|
from typing import Callable
def int_domains(ecoords: np.ndarray, qpos: np.ndarray,
qweight: np.ndarray, dshpfnc: Callable):
"""
Returns the measure (length, area or volume in 1d, 2d and 3d) of
several domains.
"""
nE = ecoords.shape[0]
res = np.zeros(nE, dtype=ecoords.dtype)
nG = len(qweight)
for iG in prange(nG):
dshp = dshpfnc(qpos[iG])
for i in prange(nE):
jac = ecoords[i].T @ dshp
djac = np.linalg.det(jac)
res[i] += qweight[iG] * djac
return res
|
64ebe6dea6b86b4d391064b100a159c9641dcdde
| 3,638,262
|
def pg_conn(postgresql):
"""Runs the sqitch plan and loads seed data before returning db connection.
"""
with postgresql:
# Loads data from blogdb fixture data
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with USERS_DATA_PATH.open() as fp:
cur.copy_from(fp, "users", sep=",", columns=["username", "firstname", "lastname"])
with BLOGS_DATA_PATH.open() as fp:
cur.copy_from(
fp, "blogs", sep=",", columns=["userid", "title", "content", "published"]
)
return postgresql
|
df3245eecad1c8f0fd1228ff8f3bf8a57701dfef
| 3,638,263
|
def partial_with_hound_context(hound, func, *args, **kwargs):
"""
Retuns a partially bound function
Propagates the currently active hound reason (if any)
Useful for capturing the current contextual hound reason when queueing a background action
"""
if hound is not None:
reason = hound.get_current_reason()
return partial(
call_with_context,
partial(hound.with_reason, reason),
func,
*args,
_context_callable=True,
**kwargs
)
return partial(
func,
*args,
**kwargs
)
|
e2547f3c59ac4168e0961db7216903c7fdca16af
| 3,638,264
|
def rssfeed_edit(request, feed, ret_path):
""" Eigenschaften des RSS-Feeds aendern """
def save_values(feed, old, new):
""" geaenderte Werte des RSS-Feeds speichern """
has_changed = False
key = 'title'
if old[key] != new[key]:
feed.title = encode_html(new[key])
has_changed = True
key = 'text'
if old[key] != new[key]:
feed.description = encode_html(new[key])
has_changed = True
key = 'url_more'
if old[key] != new[key]:
feed.link = new[key]
has_changed = True
key = 'section'
if old[key] != new[key]:
feed.general_mode = new[key]
has_changed = True
if has_changed:
feed.last_modified = get_last_modified()
feed.save()
class DmsItemForm(forms.Form):
""" Elemente des Eingabeformulars """
title = forms.CharField(max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
text = forms.CharField(max_length=180,
widget=forms.TextInput(attrs={'size':60}) )
url_more = forms.CharField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
section = forms.ChoiceField(choices=get_global_choices(),
widget=forms.RadioSelect() )
data_init = {
'title' : decode_html(feed.title),
'text' : remove_link_icons(feed.description),
'url_more' : feed.link,
'section' : feed.general_mode,
}
app_name = 'rssfeed'
if request.method == 'POST' :
data = request.POST.copy()
else :
data = data_init
f = DmsItemForm(data)
my_title = _(u'RSS-Feed ändern')
tabs = [ ('tab_base', [ 'title', 'text', 'url_more', 'section', ]), ]
content = get_tabbed_form(tabs, help_form, app_name, f)
if request.method == 'POST' and not f.errors :
save_values(feed, data_init, f.data)
return HttpResponseRedirect(ret_path)
else:
path = request.path
n_pos = path[:-1].rfind('/')
path = path[:n_pos]
n_pos = path.rfind('/')
path = path[:n_pos+1]
item_container = get_item_container(path, '')
vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f)
return render_to_response ( 'app/base_edit.html', vars )
|
0643c6ca976d448bf3faf5539e90e59ea7d06bd7
| 3,638,265
|
def disassemble_pretty(self, addr=None, insns=1,
arch=None, mode=None):
"""
Wrapper around disassemble to return disassembled instructions as string.
"""
ret = ""
disas = self.disassemble(addr, insns, arch, mode)
for i in disas:
ret += "0x%x:\t%s\t%s\n" % (i.address, i.mnemonic, i.op_str)
return ret
|
39bddf246b880decbc84015ef20c5664f88d917e
| 3,638,266
|
import torch
def overlay_boxes(image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 2
)
return image
|
99905ae0206d285fa878b0063f227a9152600fad
| 3,638,268
|
def convert_tilt_convention(iconfig, old_convention,
new_convention):
"""
convert the tilt angles from an old convention to a new convention
This should work for both configs with statuses and without
"""
if new_convention == old_convention:
return
def _get_tilt_array(data):
# This works for both a config with statuses, and without
if isinstance(data, dict):
return data.get('value')
return data
def _set_tilt_array(data, val):
# This works for both a config with statuses, and without
if isinstance(data, dict):
data['value'] = val
else:
data.clear()
data.extend(val)
old_axes, old_extrinsic = old_convention
new_axes, new_extrinsic = new_convention
det_keys = iconfig['detectors'].keys()
if old_axes is not None and old_extrinsic is not None:
# First, convert these to the matrix invariants
rme = RotMatEuler(np.zeros(3), old_axes, old_extrinsic)
for key in det_keys:
tilts = iconfig['detectors'][key]['transform']['tilt']
rme.angles = np.array(_get_tilt_array(tilts))
phi, n = angleAxisOfRotMat(rme.rmat)
_set_tilt_array(tilts, (phi * n.flatten()).tolist())
if new_axes is None or new_extrinsic is None:
# We are done
return
# Update to the new mapping
rme = RotMatEuler(np.zeros(3), new_axes, new_extrinsic)
for key in det_keys:
tilts = iconfig['detectors'][key]['transform']['tilt']
tilt = np.array(_get_tilt_array(tilts))
rme.rmat = makeRotMatOfExpMap(tilt)
# Use np.ndarray.tolist() to convert back to native python types
_set_tilt_array(tilts, np.array(rme.angles).tolist())
|
a24126a20453cf7a7c42a74e71618643215ad5c9
| 3,638,269
|
def _type_of_plot(orientation, n_var, i, j):
"""internal helper function for determining plot type in a corner plot
Parameters
----------
orientation : str
the orientation
options: 'lower left', 'lower right', 'upper left', 'upper right'
i, j : int
the row, column index
Returns
-------
plot type : str
'remove' : do not show this plot
'same' : the axes are the same
'compare' : compare the two different axes
"""
if orientation == "lower left":
if j > i:
return i, j, "remove"
elif j == i:
return i, j, "same"
else: # j < i
return i, j, "compare"
elif orientation == "lower right":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'remove'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
elif orientation == "upper left":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'compare'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'remove'
elif orientation == "upper right":
raise ValueError("not yet supported orientation")
# if j < i:
# return i, j, 'remove'
# elif j == i:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
else:
raise ValueError("not supported orientation")
|
9629af21f1995ccd1b582d4f9a7b1ecf2c621c84
| 3,638,270
|
def t2_function(t, M_0, T2, p):
"""Calculate stretched or un-stretched (p=1) exponential T2 curve
.. math::
f(t) = M_{0} e^{(-2(t/T_{2})^{p}}
Args:
t (array): time series
M_{0} (float): see equation
T_{2} (float): T2 value
p (float): see equation
Returns:
array: T2 curve
"""
return M_0 * _np.exp(-2.0 * (t / T2) ** p)
|
be4dabf4436832ca3dde9289610070ad41a3632b
| 3,638,271
|
def ry(phi):
"""Returns the rotational matrix for an angle phi around the y-axis
"""
if type(phi) == np.ndarray:
m11 = np.cos(phi)
m12 = np.full(len(phi), 0)
m13 = np.sin(phi)
m22 = np.full(len(phi), 1)
m1 = np.stack((m11, m12, m13), axis=0)
m2 = np.stack((m12, m22, m12), axis=0)
m3 = np.stack((-m13, m12, m11), axis=0)
y_rot_mat = np.stack((m1, m2, m3), axis=0)
else:
y_rot_mat = np.array(([np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]))
return y_rot_mat
|
06a22e478a0912ac3aeba53ba5b565690b94e652
| 3,638,272
|
def hashed_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner="mean",
default_value=None,
name=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor` or `list` of `Tensors`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, ops.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.op_scope(params + [sparse_values], name,
"hashed_sparse_embedding_lookup") as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = hashed_embedding_lookup(params, values, dimension)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
|
e7b4e803d04336e1d0a88d4051473b895a422f08
| 3,638,273
|
def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None):
"""Extract DELF features from input image.
Args:
boxes: [N, 4] float array which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
descriptors: [N, input_dim] float array.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations.
final_descriptors: [N, output_dim] float array with DELF descriptors after
normalization and (possibly) PCA/whitening.
"""
# Get center of descriptor boxes, corresponding to feature locations.
locations = CalculateKeypointCenters(boxes)
final_descriptors = PostProcessDescriptors(descriptors, use_pca,
pca_parameters)
return locations, final_descriptors
|
dbd55fa19085179fae3f6695c3fb529666c4550d
| 3,638,274
|
def render_field(field, **kwargs):
"""Render a field to a Bootstrap layout."""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render()
|
35a5586991072ba4772df48f5b2b649b1c2d62fd
| 3,638,275
|
def bisection(a, b, poly, tolerance):
"""
Assume that poly(a) <= 0 and poly(b) >= 0.
Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0.
Return (a+b)/2
:param a: poly(a) <= 0
:param b: poly(b) >= 0
:param poly: polynomial coefficients, low order first
:param tolerance: greater than 0
:return: an approximate root of the polynomial
"""
if evaluate(a, poly) > 0:
raise Exception("poly(a) must be <= 0")
if evaluate(b,poly) < 0:
raise Exception("poly(b) must be >= 0")
mid = (a+b) / 2
if abs(b-a) <= tolerance:
return mid
else:
val = evaluate(mid,poly)
if val <= 0:
return bisection(mid, b, poly, tolerance)
else:
return bisection(a, mid, poly, tolerance)
|
9ff1961a95a63af587c9469dd2f987657f1661a9
| 3,638,276
|
def decrypt_message(key, message):
""" returns the decrypted message """
return translate_message(key, message, 'decrypt')
|
74b590d493b21928880e43e5f8ae55acd8265bb2
| 3,638,277
|
def IOU(a_wh, b_wh):
"""
Intersection over Union
Args:
a_wh: (width, height) of box A
b_wh: (width, height) of box B
Returns float.
"""
aw, ah = a_wh
bw, bh = b_wh
I = min(aw, bw) * min(ah, bh)
area_a = aw * ah
area_b = bw * bh
U = area_a + area_b - I
return I / U
|
92580147eac219d77e6c8a38875c5ee809783790
| 3,638,278
|
import base64
def decode_image(img_b64):
"""Decode image from base64.
https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/
"""
img_bytes = base64.b64decode(img_b64)
im_arr = np.frombuffer(img_bytes, dtype=np.uint8)
img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img
|
22547d43fe1a20032ee095f3fe16d5550a4f08c8
| 3,638,279
|
from datetime import datetime
def date_from_string(date_str, format_str):
"""
returns a date object by a string
"""
return datetime.strptime(date_str, format_str).date()
|
7ba2fa5652264c62e2a6711210a39613cf565e37
| 3,638,280
|
import re
def fix_sensor_name(name):
"""Cleanup sensor name, returns str."""
name = re.sub(r'^(\w+)-(\w+)-(\w+)', r'\1 (\2 \3)', name, re.IGNORECASE)
name = name.title()
name = name.replace('Acpi', 'ACPI')
name = name.replace('ACPItz', 'ACPI TZ')
name = name.replace('Coretemp', 'CoreTemp')
name = name.replace('Cpu', 'CPU')
name = name.replace('Id ', 'ID ')
name = name.replace('Isa ', 'ISA ')
name = name.replace('Pci ', 'PCI ')
name = name.replace('Smc', 'SMC')
name = re.sub(r'(\D+)(\d+)', r'\1 \2', name, re.IGNORECASE)
name = re.sub(r'^K (\d+)Temp', r'AMD K\1 Temps', name, re.IGNORECASE)
name = re.sub(r'T(ccd\s+\d+|ctl|die)', r'CPU (T\1)', name, re.IGNORECASE)
name = re.sub(r'\s+', ' ', name)
return name
|
6a346ece5f03c60a2b5d23d5a66c52735aef2939
| 3,638,281
|
def get_relevant_coordinates():
"""Returns a numpy ndarray specifying the pixel a lidar ray hits when shot
through the near plane."""
coords_and_angles = np.genfromtxt('coords_and_angles.csv', delimiter=',')
return np.hsplit(coords_and_angles,2)
|
ad814528122777c99aab13652dfb708282993374
| 3,638,282
|
def _expand_host_port_user(lst):
"""
Input: list containing hostnames, (host, port)-tuples or (host, port, user)-tuples.
Output: list of (host, port, user)-tuples.
"""
def expand(v):
if isinstance(v, basestring):
return (v, None, None)
elif len(v) == 1:
return (v[0], None, None)
elif len(v) == 2:
return (v[0], v[1], None)
return v
return [expand(x) for x in lst]
|
82cfc80f916ef739fc50d8d79a5e19b4aa4a8fa6
| 3,638,283
|
def noise(line, wl=11):
""" Return the noise after smoothing. """
signal = smooth_and_trim(line, window_len=wl)
noise = np.sqrt((line - signal) ** 2)
return noise
|
009f05d1eeabf4d0218d78b6c41ff4877f66a5f5
| 3,638,284
|
from typing import Optional
from typing import Iterator
from typing import Tuple
import itertools
import tqdm
import torch
def _evaluate(
limit_batches: Optional[int],
train_pipeline: TrainPipelineSparseDist,
iterator: Iterator[Batch],
next_iterator: Iterator[Batch],
stage: str,
) -> Tuple[float, float]:
"""
Evaluates model. Computes and prints metrics including AUROC and Accuracy. Helper
function for train_val_test.
Args:
limit_batches (Optional[int]): number of batches.
train_pipeline (TrainPipelineSparseDist): pipelined model.
iterator (Iterator[Batch]): Iterator used for val/test batches.
next_iterator (Iterator[Batch]): Iterator used for the next phase (either train
if there are more epochs to train on or test if all epochs are complete).
Used to queue up the next TRAIN_PIPELINE_STAGES - 1 batches before
train_val_test switches to the next phase. This is done so that when the
next phase starts, the first output train_pipeline generates an output for
is the 1st batch for that phase.
stage (str): "val" or "test".
Returns:
Tuple[float, float]: auroc and accuracy result
"""
model = train_pipeline._model
model.eval()
device = train_pipeline._device
if limit_batches is not None:
limit_batches -= TRAIN_PIPELINE_STAGES - 1
# Because TrainPipelineSparseDist buffer batches internally, we load in
# TRAIN_PIPELINE_STAGES - 1 batches from the next_iterator into the buffers so that
# when train_val_test switches to the next phase, train_pipeline will start
# producing results for the TRAIN_PIPELINE_STAGES - 1 buffered batches (as opposed
# to the last TRAIN_PIPELINE_STAGES - 1 batches from iterator).
combined_iterator = itertools.chain(
iterator
if limit_batches is None
else itertools.islice(iterator, limit_batches),
itertools.islice(next_iterator, TRAIN_PIPELINE_STAGES - 1),
)
auroc = metrics.AUROC(compute_on_step=False).to(device)
accuracy = metrics.Accuracy(compute_on_step=False).to(device)
# Infinite iterator instead of while-loop to leverage tqdm progress bar.
for _ in tqdm(iter(int, 1), desc=f"Evaluating {stage} set"):
try:
_loss, logits, labels = train_pipeline.progress(combined_iterator)
preds = torch.sigmoid(logits)
auroc(preds, labels)
accuracy(preds, labels)
except StopIteration:
break
auroc_result = auroc.compute().item()
accuracy_result = accuracy.compute().item()
if dist.get_rank() == 0:
print(f"AUROC over {stage} set: {auroc_result}.")
print(f"Accuracy over {stage} set: {accuracy_result}.")
return auroc_result, accuracy_result
|
f0550b60c3d53192acb9ddd2d5057ade118fa79d
| 3,638,285
|
import re
def check_pre_release(tag_name):
"""
Check the given tag to determine if it is a release tag, that is, whether it
is of the form rX.Y.Z. Tags that do not match (e.g., because they are
suffixed with someting like -beta# or -rc#) are considered pre-release tags.
Note that this assumes that the tag name has been validated to ensure that
it starts with something like rX.Y.Z and nothing else.
"""
release_re = re.compile('^r[0-9]+\\.[0-9]+\\.[0-9]+')
return False if release_re.match(tag_name) else True
|
8e24a0a61bfa6fe84e936f004b4228467d724616
| 3,638,286
|
def _get_target_connection_details(target_connection_string):
"""
Returns a tuple with the raw connection details for the target machine extracted from the connection string provided
in the application arguments. It is a specialized parser of that string.
:param target_connection_string: the connection string provided in the arguments for the application.
:return: A tuple in the form of (user, password, host, port) if a password is present in the connection string or
(user, host, port) if a password is not present
"""
password = None
connection_string_format_error = 'Invalid connection string provided. Expected: user[/password]@host[:port]'
if '@' not in target_connection_string:
raise TypeError(connection_string_format_error)
connection_string_parts = target_connection_string.split('@')
if len(connection_string_parts) != 2:
raise TypeError(connection_string_parts)
authentication_part = connection_string_parts[0]
target_part = connection_string_parts[1]
if '/' in authentication_part:
auth_parts = authentication_part.split('/')
if len(auth_parts) != 2:
raise TypeError(connection_string_format_error)
user, password = auth_parts
else:
user = authentication_part
if ':' in target_part:
conn_parts = target_part.split(':')
if len(conn_parts) != 2:
raise TypeError(connection_string_format_error)
host, port = conn_parts
try:
port = int(port)
except ValueError:
raise TypeError(connection_string_format_error)
else:
host = target_part
port = 22
if not len(user) or not len(host):
raise TypeError(connection_string_format_error)
if password:
return user, password, host, int(port)
else:
return user, host, int(port)
|
5e6ee870c0e196f54950f26ee6e551476688dce9
| 3,638,287
|
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up an Arlo IP sensor."""
arlo = hass.data.get(DATA_ARLO)
if not arlo:
return False
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == 'total_cameras':
sensors.append(ArloSensor(hass,
SENSOR_TYPES[sensor_type][0],
arlo,
sensor_type))
else:
for camera in arlo.cameras:
name = '{0} {1}'.format(SENSOR_TYPES[sensor_type][0],
camera.name)
sensors.append(ArloSensor(hass, name, camera, sensor_type))
async_add_devices(sensors, True)
return True
|
875ddac74d1e1d8dd10136214f8487d750094e61
| 3,638,288
|
from .tfr import _compute_tfr
def tfr_array_multitaper(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=True, time_bandwidth=None, use_fft=True,
decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_multitaper`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float
Number of cycles in the wavelet. Fixed number or one per
frequency. Defaults to 7.0.
zero_mean : bool
If True, make sure the wavelets have a mean of zero. Defaults to True.
time_bandwidth : float
If None, will be set to 4.0 (3 tapers). Time x (Full) Bandwidth
product. The number of good tapers (low-bias) is chosen automatically
based on this to equal floor(time_bandwidth - 1). Defaults to None.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data, freqs, sfreq=sfreq,
method='multitaper', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
|
28a6f998fdaa9acde77a521b5e0c5c51a4709887
| 3,638,289
|
import logging
def card(id: int):
"""
Show the selected card data (by id).
"""
for card in cards["cards"]:
if card["id"] == id:
logging.info("card")
return card
logging.info("card")
return "Card not found."
|
8a26ea6add0d3ebe539b8a3c0c5dcbf0a458e923
| 3,638,290
|
def build_norm_layer(cfg, num_features, postfix=""):
""" Build normalization layer
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
norm_cfg = {
# format: layer_type: (abbreviation, module)
"BN": ("bn", nn.BatchNorm2d),
"BN1d": ("bn1d", nn.BatchNorm1d),
"GN": ("gn", nn.GroupNorm),
}
assert isinstance(cfg, dict) and "type" in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop("type")
if layer_type not in norm_cfg:
raise KeyError("Unrecognized norm type {}".format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop("requires_grad", True)
cfg_.setdefault("eps", 1e-5)
if layer_type != "GN":
layer = norm_layer(num_features, **cfg_)
# if layer_type == 'SyncBN':
# layer._specify_ddp_gpu_num(1)
else:
assert "num_groups" in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer
|
ef57209bfbd9ead48585ef478a0c74d74127f42f
| 3,638,291
|
def scalarProd(v,w):
""" A sum of 2 vectors in n-space.
Params: A 2 tuple point (V)
another 2 tuple point (W)
returns: Distance of (V,W)
"""
v = x[0] + x[1]
w = y[0] + y[1]
return np.array(v*w)
|
604750efbef53dfb21468fcc7d4f41bd07af502d
| 3,638,293
|
def sample_summary(df, extra_values=None, params=SummaryParams()):
"""
Returns table showing statistical summary from the sample parameters:
mean, std, mode, hpdi.
Parameters
------------
df : Panda's dataframe
Contains parameter sample values: each column is a parameter.
extra_values : Panda's dataframe
Additional values to be shown for parameters. Indexes are
parameter names, and columns contain additional values to
be shown in summary.
Returns
-------
Panda's dataframe
Panda's dataframe containing the summary for all parameters.
str
text of the summary table
"""
rows = []
for column in df:
values = df[column].to_numpy()
mean = df[column].mean()
std = df[column].std()
mode = get_mode(df[column])
summary_values = [column, mean, std, mode]
for i, probability in enumerate(params.hpdis):
hpdi_value = hpdi(values, probability=probability)
if i == 0:
# For the first interval, calculate upper and
# lower uncertainties
uncert_plus = hpdi_value[1] - mode
uncert_minus = mode - hpdi_value[0]
summary_values.append(uncert_plus)
summary_values.append(uncert_minus)
summary_values.append(hpdi_value[0])
summary_values.append(hpdi_value[1])
if extra_values is not None:
# Add extra columns
summary_values += extra_values.loc[column].values.tolist()
rows.append(summary_values)
headers = ['Name', 'Mean', 'Std', 'Mode', '+', '-']
for hpdi_percent in params.hpdi_percent():
headers.append(f'{hpdi_percent}CI-')
headers.append(f'{hpdi_percent}CI+')
if extra_values is not None:
headers += extra_values.columns.values.tolist()
formats = [".2f"] * len(headers)
if 'N_Eff' in headers:
formats[headers.index('N_Eff')] = ".0f"
table = tabulate(rows, headers=headers, floatfmt=formats, tablefmt="pipe")
df_summary = pd.DataFrame(rows, columns=headers, index=df.columns.values)
df_summary.drop('Name', axis=1, inplace=True)
return df_summary, table
|
0fcedfb54f7a72c3811f7cb4c5df559b4d313383
| 3,638,294
|
from .gnat import GNAT
def classFactory(iface): # pylint: disable=invalid-name
"""Load GNAT class from file GNAT.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return GNAT(iface)
|
54036f9fa18d901426d45771409a48ab803302ef
| 3,638,295
|
from aiida.common.hashing import get_random_string
def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argument
"""Determine the password to be used as default for the Postgres connection in `verdi quicksetup`
If a value is explicitly passed, that value is returned. If there is no value, the current username in the context
will be scanned for in currently existing profiles. If it does, the corresponding password will be used. If no such
user already exists, a random password will be generated.
:param ctx: click context which should contain the contextual parameters
:return: the password
"""
if value is not None:
return value
username = ctx.params['db_username']
config = get_config()
for available_profile in config.profiles:
if available_profile.storage_config['database_username'] == username:
value = available_profile.storage_config['database_password']
break
else:
value = get_random_string(16)
return value
|
6ec0a8548bc632bdf008ba3e8e8b8d2bfdd5244b
| 3,638,296
|
from typing import List
def convert(day_input: List[str]) -> List[List[str]]:
"""Breaks down the input into a list of directions for each tile"""
def dirs(line: str) -> List[str]:
dirs, last_c = [], ''
for c in line:
if c in ['e', 'w']:
dirs.append(last_c + c)
last_c = ''
else:
last_c = c
return dirs
return [dirs(line) for line in day_input]
|
fd1d683e69dbff8411cecdaa184355f2311d3e8a
| 3,638,297
|
import codecs
def read(filepath):
"""Read file content from provided filepath."""
with codecs.open(filepath, encoding='utf-8') as f:
return f.read()
|
bff53fbb9b1ebe85c6a1fa690d28d6b6bec71f84
| 3,638,298
|
def trend_indicator(trend, style):
"""Get the trend indicator and corresponding color."""
if trend == 0.00042 or np.isnan(trend):
return '?', (0, 0, 0, 0)
arrows = ('→', '↗', '↑', '↓', '↘')
trend = min(max(trend, -1), 1) # limit the trend
trend_color = (1, 0, 0, trend * trend) if (trend > 0) != ("_up" in style) else (0, 1, 0, trend * trend)
return arrows[round(trend * 2)], trend_color
|
009e95e45c3ba6f4e459f024c09511a7952053e4
| 3,638,299
|
from typing import Callable
from re import T
from typing import Iterable
def space(fn: Callable[[State], T], verbose: bool=False) -> Iterable[T]:
"""
Return an iterable that generates values from ``fn``
fully exhausting the state space.
During iteration, the function ``fn`` is called repeatedly with a
:class:`~exhaust.State` instance as only argument.
:param fn: The function to generate values from.
:param verbose: If True, print the state of the generator.
"""
return SpaceIterable(fn, verbose=verbose)
|
b901a3936b6e1020db123bce9f72b600117f5825
| 3,638,300
|
import json
def get_menu_as_json(menu):
"""Build Tree-like JSON structure from the top menu.
From the top menu items, its children and its grandchildren.
"""
top_items = menu.items.filter(parent=None)
menu_data = []
for item in top_items:
top_item_data = get_menu_item_as_dict(item)
top_item_data["child_items"] = []
children = item.children.all()
for child in children:
child_data = get_menu_item_as_dict(child)
grand_children = child.children.all()
grand_children_data = [
get_menu_item_as_dict(grand_child) for grand_child in grand_children
]
child_data["child_items"] = grand_children_data
top_item_data["child_items"].append(child_data)
menu_data.append(top_item_data)
return json.dumps(menu_data)
|
f191d883f44b5cbed729ebcee7670ba99e28d941
| 3,638,301
|
import numpy
def vortex_contribution_normal(panels):
"""
Builds the vortex contribution matrix for the normal velocity.
Parameters
----------
panels: 1D array of Panel objects
List of panels.
Returns
-------
A: 2D Numpy array of floats
Vortex contribution matrix.
"""
A = numpy.empty((panels.size, panels.size), dtype=float)
# vortex contribution on a panel from itself
numpy.fill_diagonal(A, 0.0)
# vortex contribution on a panel from others
for i, panel_i in enumerate(panels):
for j, panel_j in enumerate(panels):
if i != j:
A[i, j] = -0.5/numpy.pi*integral(panel_i.xc, panel_i.yc,
panel_j,
numpy.sin(panel_i.beta),
-numpy.cos(panel_i.beta))
print(A)
return A
|
e5089509646be80307210cad528357d3f85774e9
| 3,638,302
|
def app():
"""Required by pytest-tornado's http_server fixture"""
return tornado.web.Application()
|
556ac2b69eaca3d8c4f934fba0deea820ab4e1ff
| 3,638,304
|
import inspect
def is_bound_builtin_method(meth):
"""Helper returning True if meth is a bound built-in method"""
return (inspect.isbuiltin(meth)
and getattr(meth, '__self__', None) is not None
and getattr(meth.__self__, '__class__', None))
|
a7a45f0f519119d795e91723657a1333eb6714e4
| 3,638,305
|
def normalize(adj):
"""Row-normalize sparse matrix"""
rowsum = np.array(adj.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = np.diag(r_inv)
mx = r_mat_inv.dot(adj)
return mx
|
c342890befeddd3db01403914e80b9e89dc4f20d
| 3,638,306
|
def get_recommendation_and_prediction_from_text(input_text, num_feats=10):
"""
Gets a score and recommendations that can be displayed in the Flask app
:param input_text: input string
:param num_feats: number of features to suggest recommendations for
:return: current score along with recommendations
"""
global MODEL
feats = get_features_from_input_text(input_text)
pos_score = MODEL.predict_proba([feats])[0][1]
print("explaining")
exp = EXPLAINER.explain_instance(
feats, MODEL.predict_proba, num_features=num_feats, labels=(1,)
)
print("explaining done")
parsed_exps = parse_explanations(exp.as_list())
recs = get_recommendation_string_from_parsed_exps(parsed_exps)
output_str = """
Current score (0 is worst, 1 is best):
<br/>
%s
<br/>
<br/>
Recommendations (ordered by importance):
<br/>
<br/>
%s
""" % (
pos_score,
recs,
)
return output_str
|
6f5737c5ac293a3e33fed7a95119c30c72fafa1e
| 3,638,307
|
def set_title(title, uid='master'):
"""
Sets a new title of the window
"""
try:
_webview_ready.wait(5)
return gui.set_title(title, uid)
except NameError:
raise Exception('Create a web view window first, before invoking this function')
except KeyError:
raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid))
|
e2ad0fd3673ab2ad0966527b394e9afdf8e2a531
| 3,638,308
|
def FK42FK5MatrixOLDATTEMPT():
"""
----------------------------------------------------------------------
Experimental.
Create matrix to precess from an epoch in FK4 to an epoch in FK5
So epoch1 is Besselian and epoch2 is Julian
1) Do an epoch transformation in FK4 from input epoch to
1984 January 1d 0h
2) Apply a zero point correction for the right ascension
w.r.t. B1950. The formula is:
E = E0 + E1*(jd-jd1950)/Cb
E0 = 0.525; E1 = 1.275 and Cb = the length of the tropical
century (ES 3.59 p 182) = 36524.21987817305
For the correction at 1984,1,1 the ES lists 0.06390s which is
0.06390*15=0.9585"
This function calculated E = 0.958494476885" which agrees with the
literature.
3) Transform in FK5 from 1984 January 1d 0h to epoch2
Note that we do not use the adopted values for the precession angles,
but use the Woolward and Clemence expressions to calculate the angles.
These are one digit more accurate than the adopted values.
----------------------------------------------------------------------
"""
# Epoch transformation from B1950 to 1984, 1,1 in FK4
jd = JD(1984,1,1)
epoch1984 = JD2epochBessel(jd)
M1 = BMatrixEpoch12Epoch2(1950.0, epoch1984)
# Equinox correction to the right ascension
jd1950 = epochBessel2JD(1950.0)
E0 = 0.525; E1 = 1.275
Cb = 36524.21987817305 # In days = length of the tropical century
E = E0 + E1*(jd-jd1950)/Cb
E /= 3600.0 # From seconds of arc to degree
M2 = rotZ(-E) # The correction is positive so we have to rotate
# around the z-axis in the negative direction.
# Epoch transformation from 1984,1,1 to J2000
epoch1984 = JD2epochJulian(jd)
M3 = JMatrixEpoch12Epoch2(epoch1984, 2000.0)
return M3*M2*M1
|
bbf98f3073fda4a248190e417332d72645faf5c1
| 3,638,309
|
def _lg_undirected(G, selfloops=False, create_using=None):
"""Return the line graph L of the (multi)graph G.
Edges in G appear as nodes in L, represented as sorted tuples of the form
(u,v), or (u,v,key) if G is a multigraph. A node in L corresponding to
the edge {u,v} is connected to every node corresponding to an edge that
involves u or v.
Parameters
----------
G : graph
An undirected graph or multigraph.
selfloops : bool
If `True`, then self-loops are included in the line graph. If `False`,
they are excluded.
create_using : None
A graph instance used to populate the line graph.
Notes
-----
The standard algorithm for line graphs of undirected graphs does not
produce self-loops.
"""
if create_using is None:
L = G.__class__()
else:
L = create_using
# Graph specific functions for edges and sorted nodes.
get_edges = _edge_func(G)
sorted_node = _node_func(G)
# Determine if we include self-loops or not.
shift = 0 if selfloops else 1
edges = set([])
for u in G:
# Label nodes as a sorted tuple of nodes in original graph.
nodes = [ sorted_node(*x) for x in get_edges(u) ]
if len(nodes) == 1:
# Then the edge will be an isolated node in L.
L.add_node(nodes[0])
# Add a clique of `nodes` to graph. To prevent double adding edges,
# especially important for multigraphs, we store the edges in
# canonical form in a set.
for i, a in enumerate(nodes):
edges.update([ _sorted_edge(a,b) for b in nodes[i+shift:] ])
L.add_edges_from(edges)
return L
|
172fbe2e1d2ec425c3b37c97429df67d789f2c9c
| 3,638,310
|
def get_utxo_provider_client(utxo_provider, config_file):
"""
Get or instantiate our blockchain UTXO provider's client.
Return None if we were unable to connect
"""
utxo_opts = default_utxo_provider_opts( utxo_provider, config_file )
try:
utxo_provider = connect_utxo_provider( utxo_opts )
return utxo_provider
except Exception, e:
log.exception(e)
return None
|
79d72221f707f36bdb07a57b634a57bb42942b2e
| 3,638,311
|
from typing import Dict
from typing import Any
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = cbers_parser(sceneid)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
bands = scene_params["bands"]
addresses = [f"{cbers_prefix}_BAND{band}.tif" for band in bands]
responses = reader.multi_metadata(
addresses,
indexes=[1],
nodata=0,
percentiles=(pmin, pmax),
hist_options=hist_options,
**kwargs,
)
info: Dict[str, Any] = dict(sceneid=sceneid)
info["instrument"] = scene_params["instrument"]
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(bands)]
info["bounds"] = [
r["bounds"]
for b, r in zip(bands, responses)
if b == scene_params["reference_band"]
][0]
info["statistics"] = {b: d["statistics"][1] for b, d in zip(bands, responses)}
return info
|
c3b5203ddbec575f791bef1fb6689088dfa666a2
| 3,638,312
|
def size_to_string(volume_size):
# type: (int) -> str
"""
Convert a volume size to string format to pass into Kubernetes.
Args:
volume_size: The size of the volume in bytes.
Returns:
The size of the volume in gigabytes as a passable string to Kubernetes.
"""
if volume_size >= Gi:
return str(volume_size >> 30) + 'Gi'
elif volume_size >= Mi:
return str(volume_size >> 20) + 'Mi'
else:
return str(volume_size >> 10) + 'Ki'
|
b1b30f4a383d29951d12189180271a9752e5ba61
| 3,638,313
|
def argToDic(arg):
"""
Converts a parameter sequence into a dict.
Args:
arg (string): specified simulation parameters."""
params = dict()
options = arg.split("_")
if "=" in options[0]:
params["mode"] = ""
else:
params["mode"] = options.pop(0)
# parse arguments such as "M=2"
for op in options:
pair = op.split("=")
pv = parseValue(pair[1])
# exception
if "IT" in pair[0]:
pv = int(pv)
params[pair[0]] = pv
return params
|
173284e8ee45d9e61d786be33d6d6df60e0f9389
| 3,638,314
|
def geth2hforplayer(matches,name):
"""get all head-to-heads of the player"""
matches = matches[(matches['winner_name'] == name) | (matches['loser_name'] == name)]
h2hs = {}
for index, match in matches.iterrows():
if (match['winner_name'] == name):
if (match['loser_name'] not in h2hs):
h2hs[match['loser_name']] = {}
h2hs[match['loser_name']]['l'] = 0
h2hs[match['loser_name']]['w'] = 1
else:
h2hs[match['loser_name']]['w'] = h2hs[match['loser_name']]['w']+1
elif (match['loser_name'] == name):
if (match['winner_name'] not in h2hs):
h2hs[match['winner_name']] = {}
h2hs[match['winner_name']]['w'] = 0
h2hs[match['winner_name']]['l'] = 1
else:
h2hs[match['winner_name']]['l'] = h2hs[match['winner_name']]['l']+1
#create list
h2hlist = []
for k, v in h2hs.items():
h2hlist.append([k, v['w'],v['l']])
#sort by wins and then by losses + print
#filter by h2hs with more than 6 wins:
#h2hlist = [i for i in h2hlist if i[1] > 6]
if (len(h2hlist) == 0):
return ''
else:
return sorted(h2hlist, key=itemgetter(1,2))
#for h2h in h2hlist:
# print(name+';'+h2h[0]+';'+str(h2h[1])+';'+str(h2h[2]))
|
5bcf3e520085acd00e607cad386708b490937e9f
| 3,638,316
|
import random
def backtracking_solver(
starting_event: Event,
**kwargs) -> FiniteSequence:
"""Compose a melodic sequence based upon the
domain and constraints given.
starting_event: Event dictate the starting pitch.
All subsequent events will be of similar duration.
constraints - list of constraint functions
(see composerstoolkit.composers.constraints)
heuristics - list of heuristics (weight maps)
that can be used to provide a rough shape to the line
(see composerstoolkit.composers.heuristics)
n_events - the number of notes of the desired target
sequence. (Default 1)
"""
opts = {
"constraints": [],
"heuristics": [],
"n_events": 1
}
opts.update(kwargs)
constraints = opts["constraints"]
heuristics = opts["heuristics"]
n_events = opts["n_events"]
tick = 0
seq = FiniteSequence([starting_event])
use_weights = len(heuristics) > 0
if n_events == 1:
return FiniteSequence(seq)
results = set()
for constraint in constraints:
results.update([constraint(seq)])
if results != {True}:
raise InputViolatesConstraints("Unable to solve!")
choices = list(range(NOTE_MIN, NOTE_MAX))
dead_paths = []
while tick < n_events-1:
if use_weights:
weights= [1.0 for i in range(len(choices))]
for heuristic in heuristics:
weights = heuristic(tick, choices, weights)
try:
if use_weights:
note = Event([random.choices(choices, weights)[0]], starting_event.duration)
else:
note = Event([random.choice(choices)], starting_event.duration)
except IndexError:
# this was thrown because we ran out of choices (we have reached a dead-end)
dead_paths.append(seq[:])
seq = seq[:-1]
tick = tick -1
choices = list(range(NOTE_MIN, NOTE_MAX))
if tick == 0:
raise AllRoutesExhausted("Unable to solve!")
continue
context = FiniteSequence(seq.events[:])
context.events.append(note)
results = set()
for constraint in constraints:
results.update([constraint(context)])
candidate = seq[:]
candidate.events.append(note)
if results == {True} and candidate not in dead_paths:
seq.events.append(note)
tick = tick + 1
choices = list(range(NOTE_MIN, NOTE_MAX))
else:
#this choice was bad, so we must exclude it
choices.remove(note.pitches[-1])
return seq
|
86f33615a2bb72e0f656ba7e021ab3f49dcc79e2
| 3,638,317
|
def jdos(bs, f, i, occs, energies, kweights, gaussian_width, spin=Spin.up):
"""
Args:
bs: bandstructure object
f: final band
i: initial band
occs: occupancies over all bands.
energies: energy mesh (eV)
kweights: k-point weights
gaussian_width: width of gaussian plot.
spin: Which spin channel to include.
Returns:
Cumulative JDOS value for a specific i->f transition, with consideration of
partial occupancy and spin polarisation.
"""
jdos = np.zeros(len(energies))
for k in range(len(bs.bands[spin][i])):
final_occ = occs[f][k]
init_energy = bs.bands[spin][i][k]
final_energy = bs.bands[spin][f][k]
init_occ = occs[i][k]
k_weight = kweights[k]
factor = k_weight * (
(init_occ * (1 - final_occ)) - (final_occ * (1 - init_occ))
)
jdos += factor * gaussian(
energies, gaussian_width, center=final_energy - init_energy
)
return jdos
|
adc2a9c6c91da91b02c0ed9823016b3f256625fb
| 3,638,318
|
def findConstantMetrics(inpath):
"""
Simple function that checks which metrics in a dictionary (read from a CSV) are constant and which change over time.
As a reference, the first record read from the file is used
:param inpath: The path to the CSV file that must be analyzed
:return: The list of metrics (keys) that are constant in the file
"""
infile = open(inpath, 'r')
reader = DictReader(infile)
try:
metricSet = next(reader)
except (StopIteration, IOError):
infile.close()
return []
line = metricSet
while line is not None:
metricsToRemove = []
for k in metricSet.keys():
if line[k] != metricSet[k]:
metricsToRemove.append(k)
for m in metricsToRemove:
metricSet.pop(m)
try:
line = next(reader)
except (StopIteration, IOError):
line = None
infile.close()
return list(metricSet.keys())
|
0faefe77cfea5e1d74d2bb0dda33ed622ce87f02
| 3,638,319
|
def scoreGold(playerList, iconCount, highScore):
"""Update each players' score based on the amount of gold that they have collected.
Args:
playerList: A list of all PlayerSprite objects in the game.
iconCount: A list of integers representing how many times each player has gained points from the
scoreGold function this level.
highScore: An integer showing the current high score.
Returns:
looping: A boolean indicating if scoreLevel should call this function again.
iconCount: A list of integers representing how many times each player has gained points from the
scoreGold function this level.
scoreText: A list of the current scores for each of the players.
iconCountText: A list of text objects representing each player's iconCount value.
highScore: An integer showing the current high score.
"""
scoreText = []
iconCountText = []
checkQuitGame()
checkPauseGameWithInput(playerList)
if any(player.goldCollectedCount > 0 for player in playerList):
playSound("count_points.wav")
# All living players increase their score by 100 points each time this function is called, until it has been called
# as many times as they've collected gold this level.
for num, player in enumerate(playerList):
if player.goldCollectedCount > iconCount[num]:
player.score += 100
iconCount[num] += 1
scoreText.append(c.FONT.render("{:06d}PTS.".format(player.score % 1000000), False, c.WHITE))
iconCountText.append(c.FONT.render("+{:02d}".format(iconCount[num] % 100), False, c.WHITE))
highScore = compareHighScore(playerList, highScore)
# Once iconCount has reached the correct number of collected gold for each player, looping is set to False so
# scoreGold will not be called again.
if all(iconCount[num] == player.goldCollectedCount for num, player in enumerate(playerList)):
return False, iconCount, scoreText, iconCountText, highScore
else:
return True, iconCount, scoreText, iconCountText, highScore
|
255b4ee987a6ac4a5274ad5ae7b5bf6698840407
| 3,638,320
|
def image_2d_transformer(pretrained=False, **kwargs):
"""
modified copy from timm
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_transformer_2d('vit_deit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
|
c5891105446ffc4fac5f19f73b5f401bfc769827
| 3,638,321
|
import torch
def create_fourier_heatmap_from_error_matrix(
error_matrix: torch.Tensor,
) -> torch.Tensor:
"""Create Fourier Heat Map from error matrix (about quadrant 1 and 4).
Note:
Fourier Heat Map is symmetric about the origin.
So by performing an inversion operation about the origin, Fourier Heat Map is created from error matrix.
Args:
error_matrix (torch.Tensor): The size of error matrix should be (H, H/2+1). Here, H is height of image.
This error matrix shoud be about quadrant 1 and 4.
Returns:
torch.Tensor (torch.Tensor): Fourier Heat Map created from error matrix.
"""
assert len(error_matrix.size()) == 2
assert error_matrix.size(0) == 2 * (error_matrix.size(1) - 1)
fhmap_rightside = error_matrix[1:, :-1]
fhmap_leftside = torch.flip(fhmap_rightside, (0, 1))
return torch.cat([fhmap_leftside[:, :-1], fhmap_rightside], dim=1)
|
25a4a4e2aa2ffda317f28d85c3798682fd72c466
| 3,638,322
|
def get_state_name(state):
"""Maps a mongod node state id to a human readable string."""
if state in REPLSET_MEMBER_STATES:
return REPLSET_MEMBER_STATES[state][0]
else:
return 'UNKNOWN'
|
ddfbfa53c05941747ebedc242baa8e29bddf6771
| 3,638,324
|
def compute_resilience(ugraph, attack_order):
"""
Alias to bfs or union find
:param ugraph:
:param attack_order:
:return:
"""
if USE_UF:
return uf.compute_resilience_uf(ugraph, attack_order)
else:
return bfs_visited.compute_resilience(ugraph, attack_order)
|
db623ae30b20a076ff8e0f45fb84a9bb24fa414a
| 3,638,326
|
def NumericalFlux(b, r, c):
"""Compute the flux by numerical integration of the surface integral."""
# I'm only coding up a specific case here
assert r <= 1, "Invalid range."
if b < 0:
b = np.abs(b)
# No occ
if b >= 1 + r:
return 1
# Get points of intersection
if b > 1 - r:
yi = (1. + b ** 2 - r ** 2) / (2. * b)
xi = (1. / (2. * b)) * np.sqrt(4 * b ** 2 - (1 + b ** 2 - r ** 2) ** 2)
else:
yi = np.inf
xi = r
# Specific intensity map
def I(y, x):
mu = np.sqrt(1 - x ** 2 - y ** 2)
return 1 - c[0] * (1 - mu ** 0.5) - c[1] * (1 - mu) - c[2] * (1 - mu ** 1.5) - c[3] * (1 - mu ** 2)
# Total flux
total, _ = dblquad(I, -1, 1, lambda x: 0, lambda x: np.sqrt(1 - x ** 2), epsabs=1e-12, epsrel=1e-12)
total *= 2
# Lower integration limit
def y1(x):
if yi <= b:
# Lower occultor boundary
return b - np.sqrt(r ** 2 - x ** 2)
elif b <= 1 - r:
# Lower occultor boundary
return b - np.sqrt(r ** 2 - x ** 2)
else:
# Tricky: we need to do this in two parts
return b - np.sqrt(r ** 2 - x ** 2)
# Upper integration limit
def y2(x):
if yi <= b:
# Upper occulted boundary
return np.sqrt(1 - x ** 2)
elif b <= 1 - r:
# Upper occultor boundary
return b + np.sqrt(r ** 2 - x ** 2)
else:
# Tricky: we need to do this in two parts
return np.sqrt(1 - x ** 2)
# Compute the total flux
flux, _ = dblquad(I, -xi, xi, y1, y2, epsabs=1e-12, epsrel=1e-12)
# Do we need to solve an additional integral?
if not (yi <= b) and not (b <= 1 - r):
def y1(x):
return b - np.sqrt(r ** 2 - x ** 2)
def y2(x):
return b + np.sqrt(r ** 2 - x ** 2)
additional_flux, _ = dblquad(I, -r, -xi, y1, y2,
epsabs=1e-12, epsrel=1e-12)
flux += 2 * additional_flux
return (total - flux) / total
|
c2e5918702dfd99f7710adf29eb2e8d668cb1cc0
| 3,638,327
|
from typing import Container
def build_volume_from(volume_from_spec):
"""
volume_from can be either a service or a container. We want to return the
container.id and format it into a string complete with the mode.
"""
if isinstance(volume_from_spec.source, Service):
containers = volume_from_spec.source.containers(stopped=True)
if not containers:
return "{}:{}".format(
volume_from_spec.source.create_container().id,
volume_from_spec.mode)
container = containers[0]
return "{}:{}".format(container.id, volume_from_spec.mode)
elif isinstance(volume_from_spec.source, Container):
return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
|
ee5b997ea1832aa490501da3556faa52c611ada9
| 3,638,328
|
def generate_peripheral(csr, name, **kwargs):
""" Generates definition of a peripheral.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameterss, including
'model' and 'properties'
Returns:
string: repl definition of the peripheral
"""
peripheral = get_descriptor(csr, name)
model = kwargs['model']
if csr['constants']['config_csr_data_width'] == 32 and 'model_CSR32' in kwargs:
model = kwargs['model_CSR32']
result = '\n{}: {} @ {}\n'.format(
kwargs['name'] if 'name' in kwargs else name,
model,
generate_sysbus_registration(peripheral))
for constant, val in peripheral['constants'].items():
if 'ignored_constants' not in kwargs or constant not in kwargs['ignored_constants']:
if constant == 'interrupt':
result += ' -> cpu@{}\n'.format(val)
else:
result += ' {}: {}\n'.format(constant, val)
if 'properties' in kwargs:
for prop, val in kwargs['properties'].items():
result += ' {}: {}\n'.format(prop, val(csr))
if 'interrupts' in kwargs:
for prop, val in kwargs['interrupts'].items():
result += ' {} -> {}\n'.format(prop, val())
return result
|
154428b153b804c23eb9b2a99380e987402c9fb4
| 3,638,329
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.