content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import math
def calc_val_resize_value(input_image_size=(224, 224),
resize_inv_factor=0.875):
"""
Calculate image resize value for validation subset.
Parameters:
----------
input_image_size : tuple of 2 int
Main script arguments.
resize_inv_factor : float
Resize inverted factor.
Returns:
-------
int
Resize value.
"""
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return resize_value
|
5a8bcb77d849e62ef5ecfad74f5a3470ab4cfe59
| 3,648,341
|
import requests
def fetch_http(url, location):
"""
Return a `Response` object built from fetching the content at a HTTP/HTTPS based `url` URL string
saving the content in a file at `location`
"""
r = requests.get(url)
with open(location, 'wb') as f:
f.write(r.content)
content_type = r.headers.get('content-type')
size = r.headers.get('content-length')
size = int(size) if size else None
resp = Response(location=location, content_type=content_type, size=size, url=url)
return resp
|
b1229bec9c09528f5fb9dcdd14ee2cc6678410c4
| 3,648,342
|
def mat_normalize(mx):
"""Normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -0.5).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx).dot(r_mat_inv)
return mx
|
9caeaf660e7a11b7db558248deb3097e9cca2f57
| 3,648,343
|
def user_syntax_error(e, source_code):
"""Returns a representation of the syntax error for human consumption.
This is only meant for small user-provided strings. For input files,
prefer the regular Python format.
Args:
e: The SyntaxError object.
source_code: The source code.
Returns:
A multi-line error message, where the first line is the summary, and the
following lines explain the error in more detail.
"""
summary = 'Failed to parse Python-like source code ({msg}).'.format(
msg=e.msg or '<unknown reason>')
if e.text is None:
# Only output the source code.
return '\n'.join([summary, _indent(source_code)])
# Alternatively, we could use the middle two lines from
# traceback.format_exception_only(SyntaxError, e), but it isn't clear that
# this is an improvement in terms of maintainability. (e.g. we do not then
# control the indent, and if the format changes in the future the output
# becomes nonsense).
error_information = '\n'.join([
e.text.rstrip('\r\n'), # \n is added by ast.parse but not exec/eval.
' ' * (e.offset - 1) + '^', # note: offset is 1-based.
])
if '\n' in source_code:
return '\n'.join([
summary,
'',
'Source:',
_indent(source_code),
'',
'Location:',
_indent(error_information),
])
else:
return '\n'.join([summary, _indent(error_information)])
|
79272de37844b043656a98d796913769e89ebb17
| 3,648,345
|
import re
def check_comment(comment, changed):
""" Check the commit comment and return True if the comment is
acceptable and False if it is not."""
sections = re.match(COMMIT_PATTERN, comment)
if sections is None:
print(f"The comment \"{comment}\" is not in the recognised format.")
else:
indicator = sections.group(1)
if indicator == "M":
# Allow modification comments to have practically any format
return True
elif indicator == "A" or indicator == "P":
if not changed:
print(
"You have indicated that you have added or removed a rule, but no changes were initially noted by "
"the repository.")
else:
address = sections.group(4)
if not valid_url(address):
print("Unrecognised address \"{address}\".".format(address=address))
else:
# The user has changed the subscription and has written a suitable comment
# message with a valid address
return True
print()
return False
|
6ad96bb465e2079895ad87d35b4bc7a000312eaf
| 3,648,346
|
def GetBankTaskSummary(bank_task):
""" Summarizes the bank task
params: bank_task = value of the object of type bank_task_t
returns: String with summary of the type.
"""
format_str = "{0: <#020x} {1: <16d} {2: <#020x} {3: <16d} {4: <16d} {5: <16d} {6: <16d} {7: <16d}"
out_string = format_str.format(bank_task, bank_task.bt_proc_persona.pid, bank_task.bt_ledger, unsigned(bank_task.bt_elem.be_refs), unsigned(bank_task.bt_elem.be_made), bank_task.bt_proc_persona.persona_id, bank_task.bt_proc_persona.uid, bank_task.bt_proc_persona.gid)
#if DEVELOPMENT
format_str = "{0: <#020x} {1: <20s}"
if hasattr(bank_task.bt_elem, 'be_task'):
out_string += " " + format_str.format(bank_task.bt_elem.be_task, GetProcNameForTask(bank_task.bt_elem.be_task))
#endif
return out_string
|
afbc3b4e8707428dc951d5d199441923e477ac0c
| 3,648,347
|
def angle(o1,o2):
"""
Find the angles between two DICOM orientation vectors
"""
o1 = np.array(o1)
o2 = np.array(o2)
o1a = o1[0:3]
o1b = o1[3:6]
o2a = o2[0:3]
o2b = o2[3:6]
norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)
norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)
dot_a = np.dot(o1a,o2a) / norm_a
dot_b = np.dot(o1b,o2b) / norm_b
if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:
dot_a = 1.0
if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:
dot_b = 1.0
angle_a = np.arccos(dot_a) * (180.0 / np.pi)
angle_b = np.arccos(dot_b) * (180.0 / np.pi)
return (angle_a, angle_b)
|
db6211f4067339b7740eb52cc3f101f6ef69f08c
| 3,648,348
|
def get_project_id_v3(user_section='user'):
"""Returns a project ID."""
r = authenticate_v3_config(user_section, scoped=True)
return r.json()["token"]["project"]["id"]
|
7cbb004609e3623d6a5d4bbf45766ea753027f5c
| 3,648,349
|
def get_platform_arches(pkgs_info, pkg_name):
"""."""
package_info = get_package_info(pkgs_info, pkg_name)
platforms_info = package_info.get('platforms', {})
platform_arches = platforms_info.get('arches', [])
return platform_arches
|
d6da2a95592f1ecf1e89935dfecef84fe2ee9313
| 3,648,350
|
from typing import Optional
def unformat_number(new_str: str, old_str: Optional[str], type_: str) -> str:
"""Undoes some of the locale formatting to ensure float(x) works."""
ret_ = new_str
if old_str is not None:
if type_ in ("int", "uint"):
new_str = new_str.replace(",", "")
new_str = new_str.replace(".", "")
ret_ = new_str
else:
end_comma = False
if new_str.endswith(",") or new_str.endswith("."):
# Si acaba en coma, lo guardo
end_comma = True
ret_ = new_str.replace(",", "")
ret_ = ret_.replace(".", "")
if end_comma:
ret_ = ret_ + "."
# else:
# comma_pos = old_str.find(".")
# if comma_pos > -1:
print("Desformateando", new_str, ret_)
# else:
# pos_comma = old_str.find(".")
# if pos_comma > -1:
# if pos_comma > new_str.find("."):
# new_str = new_str.replace(".", "")
# ret_ = new_str[0:pos_comma] + "." + new_str[pos_comma:]
# print("l2", ret_)
return ret_
|
419698cf46c1f6d3620dbb8c6178f0ba387ef360
| 3,648,352
|
def is_triplet(tiles):
"""
Checks if the tiles form a triplet.
"""
return len(tiles) == 3 and are_all_equal(tiles)
|
a0223a0fde80c147de7e255db0a5563424d1a427
| 3,648,354
|
def cli(ctx, comment, metadata=""):
"""Add a canned comment
Output:
A dictionnary containing canned comment description
"""
return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)
|
bacfab650aac1a1785a61756a7cbf84aab7df77a
| 3,648,355
|
def get_latest_slot_for_resources(latest, task, schedule_set):
"""
Finds the latest opportunity that a task may be executed
:param latest: type int
A maximum bound on the latest point where a task may be executed
:param task: type DAGSubTask
The task to obtain the latest starting slot for
:param schedule_set: type list
List of occupied time slots of the resources used for the task
:return: type int
The latest slot where task may begin
"""
# Obtain set of occupied slots across resources
occupied_slots = set()
for rs in schedule_set:
occupied_slots |= set(rs)
# Filter ones that are earlier than latest opportunity
occupied_slots = list(filter(lambda s: s <= latest, list(sorted(occupied_slots))))
# Settle for latest if nothing else found
if not occupied_slots or occupied_slots[-1] < latest:
return latest
else:
occupied_ranges = list(reversed(list(to_ranges(occupied_slots))))
for (s1, e1), (s2, e2) in zip(occupied_ranges, occupied_ranges[1:]):
if s1 - e2 >= task.c:
return e2 + 1
return occupied_ranges[-1][0] - ceil(task.c)
|
455f852152877c856d49882facabdd6faabad175
| 3,648,356
|
import requests
def _get_text(url: str):
"""
Get the text from a message url
Args:
url: rest call URL
Returns:
response: Request response
"""
response = requests.get(url["messageUrl"].split("?")[0])
return response
|
fe711167748ca6b0201da7501f3b38cc8af8651d
| 3,648,357
|
def divideFacet(aFacet):
"""Will always return four facets, given one, rectangle or triangle."""
# Important: For all facets, first vertex built is always the most south-then-west, going counter-clockwise thereafter.
if len(aFacet) == 5:
# This is a triangle facet.
orient = aFacet[4] # get the string expressing this triangle's orientation
# Cases, each needing subdivision:
# ______ ___ ___
# |\ /| \ / /\ | / \ | ^
# | \ / | \ / / \ | / \ | N
# |__\ /__| \/ /____\ |/ \|
#
# up up down up down down -- orientations, as "u" or "d" in code below.
# Find the geodetic bisectors of the three sides, store in sequence using edges defined
# by aFacet vertex indeces: [0]&[1] , [1]&[2] , [2]&[3]
newVerts = []
for i in range(3):
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0]) / 2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = (newLat, newLon)
newVerts.append(newVert)
if orient == "u":
# In the case of up facets, there will be one "top" facet
# and 3 "bottom" facets after subdivision; we build them in the sequence inside the triangles:
#
# 2
# /\ Outside the triangle, a number is the index of the vertex in aFacet,
# / 1\ and a number with an asterisk is the index of the vertex in newVerts.
# 2* /____\ 1*
# /\ 0 /\
# /2 \ /3 \
# /____\/____\
# 0or3 0* 1
newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"]
newFacet1 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "u"]
newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"]
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"]
if orient == "d":
# In the case of down facets, there will be three "top" facets
# and 1 "bottom" facet after subdivision; we build them in the sequence inside the triangles:
#
# 2_____1*_____1
# \ 2 /\ 3 /
# \ / 0\ / Outside the triangle, a number is the index of the vertex in aFacet,
# \/____\/ and a number with an asterisk is the index of the vertex in newVerts.
# 2*\ 1 /0*
# \ /
# \/
# 0or3
newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"]
newFacet1 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "d"]
newFacet2 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "d"]
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "d"]
if len(aFacet) == 6:
# This is a rectangle facet.
northBoolean = aFacet[5] # true for north, false for south
if northBoolean:
# North pole rectangular facet.
# Build new facets in the sequence inside the polygons:
# 3..........2 <-- North Pole
# | |
# | 1 | Outside the polys, a number is the index of the vertex in aFacet,
# | | and a number with an asterisk is the index of the vertex in newVerts.
# | |
# 2*|--------|1* /\
# |\ /| on globe /__\
# | \ 0 / | -------> /\ /\
# | \ / | /__\/__\
# | 2 \/ 3 |
# 0or4''''''''''1
# 0*
newVerts = []
for i in range(4):
if i != 2:
# on iter == 1 we're going across the north pole - don't need this midpoint.
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0])/2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = (newLat, newLon)
newVerts.append(newVert)
newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"] # triangle
newFacet1 = [newVerts[2], newVerts[1], aFacet[2], aFacet[3], newVerts[2], True] # rectangle
newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"] # triangle
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"] # triangle
else:
# South pole rectangular facet
# 1*
# 3..........2
# | 2 /\ 3 | Outside the polys, a number is the index of the vertex in aFacet,
# | / \ | and a number with an asterisk is the index of the vertex in newVerts.
# | / 0 \ |
# |/ \| ________
# 2*|--------|0* \ /\ /
# | | on globe \/__\/
# | 1 | -------> \ /
# | | \/
# | |
# 0or4'''''''''1 <-- South Pole
newVerts = []
for i in range(4):
if i != 0:
# on iter == 3 we're going across the south pole - don't need this midpoint
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0])/2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = newLat, newLon
newVerts.append(newVert)
newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"] # triangle
newFacet1 = [aFacet[0], aFacet[1], newVerts[0], newVerts[2], aFacet[0], False] # rectangle
newFacet2 = [newVerts[2], newVerts[1], aFacet[3], newVerts[2], "d"] # triangle
newFacet3 = [newVerts[1], newVerts[0], aFacet[2], newVerts[1], "d"] # triangle
# In all cases, return the four facets made in a list
return [newFacet0, newFacet1, newFacet2, newFacet3]
|
2e5891cb0ab7d23746ca18201be0f7360acc76b4
| 3,648,358
|
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = []
for k in range(n):
g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k])
return g
|
86aeb38e4ecec67f539586b0a96aa95b396d0639
| 3,648,359
|
def get_colden(theta_xy, theta_xz, theta_yz, n_sample_factor=1.0,
directory=None, file_name='save.npy', quick=False,
gridrate=0.5, shift=[0, 0, 0], draw=False, save=False, verbose=False):
"""
Rotate gas into arbitrary direction
"""
if gridrate < 2**(-7):
boxsize=10**2
elif gridrate < 2**(-6):
boxsize=3*10**2
else:
boxsize = 10**4
x = np.random.randint(1000, size=boxsize*n_sample_factor)
y = np.random.randint(1000, size=boxsize*n_sample_factor)
z = np.random.randint(1000, size=boxsize*n_sample_factor)
gridsize = 1000 * gridrate #### notice that gridsize is a half of box's side length
x, y, z = x - 500, y - 500, z - 500
x, y = rotation(x, y, theta_xy)
x, z = rotation(x, z, theta_xz)
y, z = rotation(y, z, theta_yz)
x, y, z = x + shift[0], y + shift[1], z + shift[2]
dsort = np.where((np.sqrt(np.square(x) + np.square(y)) < gridsize * np.sqrt(2))
& (abs(x) <= gridsize) & (abs(y) <= gridsize))
if draw:
plt.show()
else:
pass
z_sort = np.where( abs(z) <= gridsize )[0]
X_zsorted = x[z_sort]
Y_zsorted = y[z_sort]
min_xshift = min(X_zsorted)/2/gridsize
max_xshift = max(X_zsorted)/2/gridsize
min_yshift = min(Y_zsorted)/2/gridsize
max_yshift = max(Y_zsorted)/2/gridsize
min_xshi, min_yshi, min_zshi = -1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2
max_xshi, max_yshi, max_zshi = 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2
base_grid_ddx = int(max(max_xshi, abs(min_xshi)))+1
base_grid_ddy = int(max(max_yshi, abs(min_yshi)))+1
base_grid_ddz = int(max(max_zshi, abs(min_zshi)))+1
print("\n","######################","\n","base_grid_ddx is ",base_grid_ddx,"\n","#####################","\n")
base_grid = np.zeros([2*base_grid_ddz+2+1, 2*base_grid_ddy+1, 2*base_grid_ddx+1])
i = -base_grid_ddx
while i <= base_grid_ddx:
j = -base_grid_ddy
while j <= base_grid_ddy:
k = -base_grid_ddz
while k <= base_grid_ddz:
component_ijk = np.sum((abs(x + 2 * gridsize * i) <= gridsize) *
(abs(y + 2 * gridsize * j) <= gridsize) *
(abs(z + 2 * gridsize * k) <= gridsize))/boxsize
base_grid[0][j+base_grid_ddy][i+base_grid_ddx] = i
base_grid[1][j+base_grid_ddy][i+base_grid_ddx] = j
base_grid[k+base_grid_ddz+2][j+base_grid_ddy][i+base_grid_ddx] = component_ijk
#base_grid[i+base_grid_ddx][j+base_grid_ddy][k+base_grid_ddz] = component_ijk
k = k + 1
j = j +1
if i%10 == 1: print("{:.2f} % \r".format(100*abs(i+base_grid_ddx)/base_grid_ddx/2))
i = i +1
if verbose: print(base_grid)
if save:
save_route = directory
route_name = save_route+file_name
np.save(route_name,base_grid)
return len(dsort[0]), base_grid
|
bb19845492adfde70c85f3a8faf64784130ab7b9
| 3,648,362
|
def PyException_GetCause(space, w_exc):
"""Return the cause (another exception instance set by raise ... from ...)
associated with the exception as a new reference, as accessible from Python
through __cause__. If there is no cause associated, this returns
NULL."""
w_cause = space.getattr(w_exc, space.wrap('__cause__'))
if space.is_none(w_cause):
return None
return w_cause
|
dce5c1df12af7074ce25387e493ccac1aaac27ec
| 3,648,363
|
def row_interval(rows: int) -> Expression:
"""
Creates an interval of rows.
Example:
::
>>> tab.window(Over
>>> .partition_by(col('a'))
>>> .order_by(col('proctime'))
>>> .preceding(row_interval(4))
>>> .following(CURRENT_ROW)
>>> .alias('w'))
:param rows: the number of rows
"""
return _unary_op("rowInterval", rows)
|
eaa998eb3498eeed8034d43efb89eaa3cbaa5b2b
| 3,648,365
|
from typing import Any
def build_post_async_retry_failed_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Long running post request, service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/lro/postasync/retry/failed")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs)
|
867cf51bec949367ce2722ca16d35462947623f3
| 3,648,366
|
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor)
|
37c19ab17293b4fd0c46cff24c30e349459f7bd0
| 3,648,367
|
def change_to_local_price(us_fee):
"""Get us dollar change price from redis and apply it on us_fee.
"""
dollar_change = RedisClient.get('dollar_change')
if not dollar_change:
raise ValueError(ERRORS['CHANGE_PRICE'])
Rial_fee = float(us_fee) * int(dollar_change)
return int(Rial_fee)
|
bdd89a461e84a6acb6f49f2fb0159a9fa7404b17
| 3,648,368
|
def get_positive(data_frame, column_name):
"""
Query given data frame for positive values, including zero
:param data_frame: Pandas data frame to query
:param column_name: column name to filter values by
:return: DataFrame view
"""
return data_frame.query(f'{column_name} >= 0')
|
2aec7f611a1b181132f55f2f3ca73bf5025f2474
| 3,648,369
|
def axes(*args, **kwargs):
"""
Add an axes to the figure.
The axes is added at position *rect* specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============== ==============================================
kwarg Accepts Description
======= ============== ==============================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute
with otherax
sharey otherax current axes shares yaxis attribute
with otherax
polar [True|False] use a polar axes?
aspect [str | num] ['equal', 'auto'] or a number. If a number
the ratio of x-unit/y-unit in screen-space.
Also see
:meth:`~matplotlib.axes.Axes.set_aspect`.
======= ============== ==============================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args) == 0:
return subplot(111, **kwargs)
if nargs > 1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
return a
|
1277afb8b3a6513129632216d8c3a6c2b5718449
| 3,648,370
|
def scrape_options_into_new_groups(source_groups, assignments):
"""Puts options from the :py:class:`OptionParser` and
:py:class:`OptionGroup` objects in `source_groups` into the keys of
`assignments` according to the values of `assignments`. An example:
:type source_groups: list of :py:class:`OptionParser` and
:py:class:`OptionGroup` objects
:param source_groups: parsers/groups to scrape options from
:type assignments: dict with keys that are :py:class:`OptionParser` and
:py:class:`OptionGroup` objects and values that are
lists of strings
:param assignments: map empty parsers/groups to lists of destination names
that they should contain options for
"""
all_options = scrape_options_and_index_by_dest(*source_groups)
return populate_option_groups_with_options(assignments, all_options)
|
4524a975b604a146814c9c913d3727e0bd296368
| 3,648,371
|
def resnext56_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar10", **kwargs)
|
110b9b4443d4761b7f89a3d01b28d2c4ec8eba00
| 3,648,372
|
def is_internet_file(url):
"""Return if url starts with http://, https://, or ftp://.
Args:
url (str): URL of the link
"""
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("ftp://")
)
|
00f9d90d580da3fe8f6cbc3604be61153b17a154
| 3,648,374
|
from .observable.zip import zip_
from typing import Any
from typing import Tuple
def zip(*args: Observable[Any]) -> Observable[Tuple[Any, ...]]:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` whenever all of the
observable sequences have produced an element at a corresponding
index.
.. marble::
:alt: zip
--1--2---3-----4---|
-a----b----c-d------|
[ zip() ]
--1,a-2,b--3,c-4,d-|
Example:
>>> res = rx.zip(obs1, obs2)
Args:
args: Observable sources to zip.
Returns:
An observable sequence containing the result of combining
elements of the sources as a :class:`tuple`.
"""
return zip_(*args)
|
c33915df586bb2c337d7fee275d4df30364cd704
| 3,648,375
|
def GetFilter(image_ref, holder):
"""Get the filter of occurrences request for container analysis API."""
filters = [
# Display only packages
'kind = "PACKAGE_MANAGER"',
# Display only compute metadata
'has_prefix(resource_url,"https://www.googleapis.com/compute/")',
]
client = holder.client
resource_parser = holder.resources
if image_ref:
image_expander = image_utils.ImageExpander(client, resource_parser)
self_link, image = image_expander.ExpandImageFlag(
user_project=properties.VALUES.core.project.Get(),
image=image_ref.image,
image_project=image_ref.project,
return_image_resource=True
)
image_url = self_link+'/id/'+str(image.id)
filters.append('has_prefix(resource_url,"{}")'.format(image_url))
return ' AND '.join(filters)
|
276104cab3c9348151437548ecd69801f20e5363
| 3,648,376
|
def predict_image_paths(image_paths, model_path, target_size=(128, 128)):
"""Use a trained classifier to predict the class probabilities of a list of images
Returns most likely class and its probability
:param image_paths: list of path(s) to the image(s)
:param model_path: path to the pre-trained model
:param target_size:
:type image_paths: list
:return:
:rtype: list
"""
desired_size = target_size[0]
if model_path in LOADED_MODELS:
loaded_model = LOADED_MODELS[model_path]
else:
with open(json_path, 'r') as json_file:
loaded_model = model_from_json(json_file.read())
loaded_model.load_weights(model_path)
LOADED_MODELS[model_path] = loaded_model
img_list = []
for image_path in image_paths:
im = Image.open(image_path)
old_size = im.size
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (desired_size, desired_size), color='White')
new_im.paste(im, ((desired_size - new_size[0]) // 2,
(desired_size - new_size[1]) // 2))
img_array = np.asarray(new_im)
img_array = img_array.astype('float32')
img_array = (img_array / 255)
img_list.append(img_array)
predictions = loaded_model.predict(np.array(img_list))
return predictions
|
8071a178751ce25edbf664f1a69d1dd43b3e6290
| 3,648,377
|
def in_bounding_box(point):
"""Determine whether a point is in our downtown bounding box"""
lng, lat = point
in_lng_bounds = DOWNTOWN_BOUNDING_BOX[0] <= lng <= DOWNTOWN_BOUNDING_BOX[2]
in_lat_bounds = DOWNTOWN_BOUNDING_BOX[1] <= lat <= DOWNTOWN_BOUNDING_BOX[3]
return in_lng_bounds and in_lat_bounds
|
c4756c10bc45b81850f0e998be7bf420e355aa4d
| 3,648,378
|
def __DataContainerERT_addFourPointData(self, *args, **kwargs):
"""Add a new data point to the end of the dataContainer.
Add a new 4 point measurement to the end of the dataContainer and increase
the data size by one. The index of the new data point is returned.
Parameters
----------
*args: [int]
At least for index values for A, B, M and N.
**args: dict
Values for the actual data configuration.
Returns
-------
ret: int
Index of this new data point.
Examples
--------
>>> import pygimli as pg
>>> d = pg.DataContainerERT()
>>> d.setSensors(pg.utils.grange(0, 3, n=4))
>>> d.addFourPointData(0,1,2,3)
0
>>> d.addFourPointData([3,2,1,0], rhoa=1.0)
1
>>> print(d)
Data: Sensors: 4 data: 2
>>> print(d('rhoa'))
2 [0.0, 1.0]
"""
try:
if len(args) == 1:
idx = self.createFourPointData(self.size(),
args[0][0], args[0][1],
args[0][2], args[0][3])
else:
idx = self.createFourPointData(self.size(),
args[0], args[1],
args[2], args[3])
except:
print("args:", args)
critical("Can't interpret arguments:", *args)
for k, v in kwargs.items():
if not self.haveData(k):
self.add(k)
self.ref(k)[idx] = v
return idx
|
11d42774e3e422aaa9a8fe664e5e4641b51248d4
| 3,648,379
|
def refresh_remote_vpsa(session, rvpsa_id, return_type=None, **kwargs):
"""
Refreshes information about a remote VPSA - such as discovering new pools
and updating how much free space remote pools have.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type rvpsa_id: str
:param rvpsa_id: The remote VPSA 'name' value as returned by
get_all_remote_vpsas. For example: 'rvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_remote_vpsa_id(rvpsa_id)
path = '/api/remote_vpsas/{0}/refresh.json'.format(rvpsa_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
|
6acc4a049397862c72a21deb8e38f65af5c424a7
| 3,648,382
|
def zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return np.zeros(shape, dtype=np.int32 if int32 else np.float32)
|
68bb2960a3a364f01b8fcc39495e44562936c98f
| 3,648,383
|
def _connect():
"""Connect to a XMPP server and return the connection.
Returns
-------
xmpp.Client
A xmpp client authenticated to a XMPP server.
"""
jid = xmpp.protocol.JID(settings.XMPP_PRIVATE_ADMIN_JID)
client = xmpp.Client(server=jid.getDomain(), port=settings.XMPP_PRIVATE_SERVER_PORT)
client.connect()
client.auth(
user=jid.getNode(),
password=settings.XMPP_PRIVATE_SERVER_PASSWORD,
resource=jid.getResource(),
)
return client
|
1d407d80c22a371205c85e9223164cfa01063781
| 3,648,384
|
def index():
"""Return the main page."""
return send_from_directory("static", "index.html")
|
2dbbf0d103e78bcd503f8254aac7f8a1a45f9176
| 3,648,385
|
from typing import List
def get_groups(records_data: dict, default_group: str) -> List:
"""
Returns the specified groups in the
SQS Message
"""
groups = records_data["Groups"]
try:
if len(groups) > 0:
return groups
else:
return [default_group]
except IndexError as err:
raise err
|
29ffe05da86816750b59bab03041d8bf43ca8961
| 3,648,386
|
def build_stats(history, eval_output, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if time_callback:
timestamp_log = time_callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
|
3419bcc0b2441fd2ea67ddec0b50574017b71a75
| 3,648,387
|
def truncate_single_leafs(nd):
"""
>>> truncate_single_leafs(node(name='a', subs=[node(name='a', subs=None, layer='a')], layer=None))
node(name='a', subs=None, layer='a')
"""
if nd.layer:
return nd
if nd.subs and len(nd.subs) == 1:
if nd.subs[0].layer:
return node(nd.name, None, nd.subs[0].layer)
nd2 = truncate_single_leafs(nd.subs[0])
return node(name=(nd.name, nd.subs[0].name),
subs=nd2.subs,
layer=nd2.layer,
)
return node(nd.name, [truncate_single_leafs(n) for n in nd.subs], None)
|
46e837dbd84df3c2cad5c5597d56f3ba716146f8
| 3,648,388
|
def postprocess_output(output, example, postprocessor):
"""Applies postprocessing function on a translation output."""
# Send all parts to the postprocessing.
if postprocessor is None:
text = output.output[0]
score = None
align = None
else:
tgt_tokens = output.output
src_tokens = example.source_tokens
text = postprocessor.process_input(
src_tokens,
tgt_tokens,
metadata=example.metadata,
config=example.config,
options=example.options,
)
score = sum(output.score) if all(s is not None for s in output.score) else None
attention = output.attention
if attention and len(attention) == 1:
attention = attention[0]
align = (
align_tokens(src_tokens, tgt_tokens, attention) if attention else None
)
else:
align = None
result = {"text": text}
if score is not None:
result["score"] = score
if align is not None:
result["align"] = align
return result
|
57c30c1cf9178ae28ef97c8662ed2fe6559f5dd6
| 3,648,389
|
def get_aqua_timestamp(iyear,ichunk,branch_flag):
"""
outputs a timestamp string for model runs with a
predifined year-month-day timestamp split into
5 x 73 day chunks for a given year
"""
if branch_flag == 0:
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-01-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-15-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-27-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-08-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-20-00000'
else: # branch run chunk start days shifted by 1 day
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-02-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-16-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-28-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-09-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-21-00000'
return timestamp
|
7566da7f22ee31e7e17a86a908bb510c176d32ea
| 3,648,390
|
def aggregate_native(gradients, f, m=None, **kwargs):
""" Multi-Krum rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
m Optional number of averaged gradients for Multi-Krum
... Ignored keyword-arguments
Returns:
Aggregated gradient
"""
# Defaults
if m is None:
m = len(gradients) - f - 2
# Computation
return native.krum.aggregate(gradients, f, m)
|
6a0b6309c9296587f581d8d941896643e096a3d5
| 3,648,391
|
import types
def isNormalTmpVar(vName: types.VarNameT) -> bool:
"""Is it a normal tmp var"""
if NORMAL_TMPVAR_REGEX.fullmatch(vName):
return True
return False
|
4ca52c849f913d15ede4c3ed4d4888d68ca5cd8b
| 3,648,392
|
import time
def count_time(start):
"""
:param start:
:return: return the time in seconds
"""
end = time.time()
return end-start
|
1945f6e6972b47d7bbdb6941ee7d80b8a6eedd9a
| 3,648,394
|
def split_by_state(xs, ys, states):
"""
Splits the results get_frame_per_second into a list of continuos line segments,
divided by state. This is to plot multiple line segments with different color for
each segment.
"""
res = []
last_state = None
for x, y, s in zip(xs, ys, states):
if s != last_state:
res.append((s, [], []))
last_state = s
res[-1][1].append(x)
res[-1][2].append(y)
return res
|
0a872617bd935f7c52ee0d10e759674969a19c4e
| 3,648,395
|
def final_spectrum(t, age, LT, B, EMAX, R, V, dens, dist, Tfir, Ufir, Tnir, Unir, binss, tmin, ebreak, alpha1, alpha2):
"""
GAMERA computation of the particle spectrum (for the extraction of the
photon sed at the end of the evolution of the PWN)
http://libgamera.github.io/GAMERA/docs/time_dependent_modeling.html
Returns
-------
sed : array-like
Array with the evolved particle spectrum (erg/cm**2/s vs TeV) at the
last step
tot : array-like
Array with the total photon spectrum (erg/cm**2/s vs TeV)
ic : array-like
Array with the inverse compton photon spectrum (erg/cm**2/s vs TeV)
ic : array-like
Array with the inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_cmb : array-like
Array with the cmb inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_fir : array-like
Array with the fir inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_nir : array-like
Array with the nir inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_ssc : array-like
Array with the self-synchrotron compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_synch : array-like
Array with the synchrotron contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
"""
fp = gp.Particles()
p_spectrum = broken_powerlaw(ebreak,alpha1,alpha2,EMAX, 500)
e = np.logspace(np.log10(gp.m_e),np.log10(3*np.max(EMAX)),100) #particle escape
t_m, e_m = np.meshgrid(t, e) #particle escape
fp.SetTimeAndEnergyDependentEscapeTime(t, e, t_esc(e_m, t_m, B, R)) #particle escape
fp.SetCustomInjectionSpectrum(p_spectrum)
fp.SetLuminosity(list(zip(t,LT)))
fp.SetBField(list(zip(t,B)))
fp.SetEmax(list(zip(t,EMAX)))
fp.SetRadius(list(zip(t,R)))
fp.SetExpansionVelocity(list(zip(t,V)))
fp.SetAmbientDensity(dens)
fp.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg)
fp.AddThermalTargetPhotons(Tfir, Ufir)
fp.AddThermalTargetPhotons(Tnir, Unir)
fp.SetTmin(tmin)
erad = np.logspace(-21,4.,binss) * gp.TeV_to_erg # energies(in ergs) where radiation will be calculated
fr = gp.Radiation()
fr.SetDistance(dist)
fr.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg)
fr.AddThermalTargetPhotons(Tfir, Ufir)
fr.AddThermalTargetPhotons(Tnir, Unir)
fr.SetAmbientDensity(dens)
fp.SetAge(age)
fp.CalculateElectronSpectrum(binss)
sed = np.array(fp.GetParticleSED())
sp = np.array(fp.GetParticleSpectrum())
fr.SetElectrons(sp[:])
fr.SetBField(fp.GetBField())
fr.AddSSCTargetPhotons(fp.GetRadius())
fr.CalculateDifferentialPhotonSpectrum(erad)
tot = np.array(fr.GetTotalSED())
ic = np.array(fr.GetICSED())
ic_cmb = np.array(fr.GetICSED(0))
ic_fir = np.array(fr.GetICSED(1))
ic_nir = np.array(fr.GetICSED(2))
ic_ssc = np.array(fr.GetICSED(3))
synch = np.array(fr.GetSynchrotronSED())
return sed, tot, ic, ic_cmb, ic_fir, ic_nir, ic_ssc, synch
|
00ce850d759739ffcb69a5af5c62325b39bd5446
| 3,648,396
|
def modal():
"""Contributions input controller for modal view.
request.vars.book_id: id of book, optional
request.vars.creator_id: id of creator, optional
if request.vars.book_id is provided, a contribution to a book is presumed.
if request.vars.creator_id is provided, a contribution to a creator is
presumed.
if neither request.vars.book_id nor request.vars.creator_id are provided
a contribution to zco.mx is presumed.
request.vars.book_id takes precendence over request.vars.creator_id.
"""
book = None
creator = None
if request.vars.book_id:
book = Book.from_id(request.vars.book_id)
creator = Creator.from_id(book.creator_id)
elif request.vars.creator_id:
creator = Creator.from_id(request.vars.creator_id)
if not creator:
raise LookupError(
'Creator not found, id %s', request.vars.creator_id)
return dict(
book=book,
creator=creator,
)
|
13222d8e4d611fe0005f3df3db05f10c6c9fb057
| 3,648,397
|
def returns(data):
"""Returns for any number of days"""
try:
trading_days = len(data)
logger.info(
"Calculating Returns for {} trading days".format(trading_days))
df = pd.DataFrame()
df['daily_returns'] = data.pct_change(1)
mean_daily_returns = df['daily_returns'].mean()
returns_data = mean_daily_returns * trading_days
return returns_data * 100
except Exception as exception:
logger.error('Oops! An error Occurred ⚠️')
raise exception
|
c533433e23cb2f246cdb3f3d8f445afc9d0ea0bc
| 3,648,398
|
def do_rot13_on_input(input_string, ordered_radix=ordered_rot13_radix):
""" Perform a rot13 encryption on the provided message.
"""
encrypted_message = str()
for char in input_string:
# Two possibilities: in radix, or NOT in radix.
if char in ordered_radix:
# must find index of the char in the ordered_radix
char_index = ordered_radix.index(char)
mod_char_index = (char_index + 13) % len(ordered_radix)
mod_char = ordered_radix[mod_char_index]
encrypted_message += mod_char
else:
encrypted_message += char
return encrypted_message
|
ccf37364860a661498290245a408d7cc4edbf896
| 3,648,399
|
def pwr_y(x, a, b, e):
"""
Calculate the Power Law relation with a deviation term.
Parameters
----------
x : numeric
Input to Power Law relation.
a : numeric
Constant.
b : numeric
Exponent.
e : numeric
Deviation term.
Returns
-------
numeric
Output of Power Law relation.
Notes
-----
Power Law relation: :math:`y = a x^b + e`
"""
return a*x**b+e
|
e736d9bb2e4305ef0dc0a360143a611b805f7612
| 3,648,400
|
def file_update_projects(file_id):
""" Page that allows users to interact with a single TMC file """
this_file = TMCFile.query.filter_by(uid=file_id).first()
project_form = AssignProjectsToFile()
if project_form.validate_on_submit():
data = dict((key, request.form.getlist(key) if len(
request.form.getlist(key)) > 1 else request.form.getlist(key)[0])
for key in request.form.keys())
pid_list = []
for k in data:
if "project_" in k:
pid_list.append(int(k.replace("project_", "")))
# Make sure all selected projects are associated
for pid in pid_list:
project = Project.query.filter_by(uid=pid).first()
if project not in this_file.project_ids:
this_file.project_ids.append(project)
# Remove association with unchecked projects
for project in this_file.project_ids:
if project.uid not in pid_list:
this_file.project_ids.remove(project)
db.session.commit()
flash("Updated project associations", "success")
return redirect(url_for("single_file_bp.single_file", file_id=file_id))
|
172d0caccb3e7ba39282cb6860fb80fca0a050bb
| 3,648,401
|
def find_optimal_cut(edge, edge1, left, right):
"""Computes the index corresponding to the optimal cut such that applying
the function compute_blocks() to the sub-blocks defined by the cut reduces
the cost function comparing to the case when the function compute_blocks() is
applied to the whole matrix. If cutting point can not be find, the algorithm returns
the result from the function compute_blocks().
Parameters
----------
edge : ndarray
sparsity pattern profile of the matrix
edge1 : ndarray
conjugated sparsity pattern profile of the matrix
left : int
size of the leftmost diagonal block
right : int
size of the rightmost diagonal block
Returns
-------
"""
unique_indices = np.arange(left, len(edge) - right + 1)
blocks = []
seps = []
sizes = []
metric = []
size = len(edge)
for j1, item1 in enumerate(unique_indices):
seps.append(item1)
item2 = size - item1
# print(item1, item2)
# print(item1)
edge_1 = edge[:item1]
edge_2 = (edge1 - np.arange(len(edge1)))[item2:] + np.arange(item1)
edge_3 = edge1[:item2]
edge_4 = (edge - np.arange(len(edge)))[item1:] + np.arange(item2)
block1 = compute_blocks(left, (edge1 - np.arange(len(edge)))[item2],
edge_1, edge_2)
block2 = compute_blocks(right, (edge - np.arange(len(edge1)))[item1],
edge_3, edge_4)
block = block1 + block2[::-1]
blocks.append(block)
metric.append(np.sum(np.array(block) ** 3))
sizes.append((block1[-1], block2[-1]))
if len(metric) == 0:
return [left, right], np.nan, 0, 0
else:
best = np.argmin(np.array(metric))
blocks = blocks[best]
blocks = [item for item in blocks if item != 0]
sep = seps[best]
right_block, left_block = sizes[best]
return blocks, sep, right_block, left_block
|
63120c904a71b6dc40d75df6db19a5bdb619f9e2
| 3,648,402
|
def seq_to_networkx(header, seq, constr=None):
"""Convert sequence tuples to networkx graphs."""
graph = nx.Graph()
graph.graph['id'] = header.split()[0]
graph.graph['header'] = header
for id, character in enumerate(seq):
graph.add_node(id, label=character, position=id)
if id > 0:
graph.add_edge(id - 1, id, label='-')
assert(len(graph) > 0), 'ERROR: generated empty graph.\
Perhaps wrong format?'
graph.graph['sequence'] = seq
if constr is not None:
graph.graph['constraint'] = constr
return graph
|
7c44b3aa0fb30637eda9bc7e960db1e3d65e7907
| 3,648,403
|
def add_vertex_edge_for_load_support(network, sup_dic, load_dic, bars_len, key_removed_dic):
"""
Post-Processing Function:
Adds vertices and edges in accordance with supports and loads
returns the cured network
"""
if not key_removed_dic:
load_sup_dic=merge_two_dicts(sup_dic, load_dic)
else:
load_dic_2=load_dic.copy()
for key in key_removed_dic:
load_dic_2.pop(key)
load_dic_2=merge_two_dicts(load_dic_2, key_removed_dic[key])
load_sup_dic=merge_two_dicts(sup_dic, load_dic_2)
# define arbitrary r to be added to get leaf vertex coordinates
max_len=max(bars_len)
r=max_len/3.0
# make a polygon and polyline from outer vertices of network
points = network.to_points()
cycles = network_find_cycles(network)
mesh = Mesh.from_vertices_and_faces(points, cycles)
if 0 in mesh.face and len(mesh.face)>1:
mesh.delete_face(0)
if len(mesh.face)==1:
ver_lis=[key for key in mesh.vertices()]
else:
ver_lis=mesh.vertices_on_boundary(ordered=True)
ver_lis_plyln=ver_lis[:]
ver_lis_plyln.append(ver_lis[0])
pt_lis_plygn=[mesh.vertex_coordinates(key) for key in ver_lis]
pt_lis_plyln=[mesh.vertex_coordinates(key) for key in ver_lis_plyln]
plygn=Polygon(pt_lis_plygn)
plyln=Polyline(pt_lis_plyln)
# add leaf vertices
for key in load_sup_dic:
if load_sup_dic[key][0]!=0.0:
pt_1=add_vectors(network.node_coordinates(key), (+r, 0.0, 0.0))
plyln_bln=is_point_on_polyline(pt_1, plyln.points, tol=0.001)
plygn_bln=is_point_in_polygon_xy(pt_1, plygn.points)
if plyln_bln or plygn_bln:
pt_1=add_vectors(network.node_coordinates(key), (-r, 0.0, 0.0))
key_2=network.add_node(x=np.asscalar(pt_1[0]), y=pt_1[1], z=0.0)
network.add_edge(key, key_2)
if load_sup_dic[key][1]!=0.0:
pt_2=add_vectors(network.node_coordinates(key), (0.0,+r, 0.0))
plyln_bln=is_point_on_polyline(pt_2, plyln.points, tol=0.001)
plygn_bln=is_point_in_polygon_xy(pt_2, plygn.points)
if plyln_bln or plygn_bln:
pt_2=add_vectors(network.node_coordinates(key), (0.0,-r, 0.0))
key_2=network.add_node(x=pt_2[0], y=np.asscalar(pt_2[1]), z=0.0)
network.add_edge(key, key_2)
return network, plygn, plyln
|
ce52cfac5e3bb58b31cfc1b2e243c435c5926d0f
| 3,648,404
|
def mimicry(span):
"""Enrich the match."""
data = {'mimicry': span.lower_}
sexes = set()
for token in span:
if token.ent_type_ in {'female', 'male'}:
if token.lower_ in sexes:
return {}
sexes.add(token.lower_)
return data
|
724d09156e97961049cb29d9f3c1f02ab5af48b0
| 3,648,405
|
def LeftBinarySearch(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
low = 0
high = len(nums)
while low < high:
mid = (low + high) // 2
if nums[mid] < target:
low = mid + 1
else:
high = mid
assert low == high
if low == len(nums) or nums[low] != target:
return -1
return low
|
d08f72e1563ee91e9ca6c9cf95db4c794312aa59
| 3,648,406
|
async def security_rule_get(
hub, ctx, security_rule, security_group, resource_group, **kwargs
):
"""
.. versionadded:: 1.0.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
azurerm.network.network_security_group.security_rule_get testrule1 testnsg testgroup
"""
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
secrule = netconn.security_rules.get(
network_security_group_name=security_group,
resource_group_name=resource_group,
security_rule_name=security_rule,
)
result = secrule.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
return result
|
34fb0cc8c2399f3749970b1061e2d5d209b11750
| 3,648,408
|
def create_centroid_pos(Direction, Spacing, Size, position):
# dim0, dim1,dim2, label):
"""
:param Direction,Spacing, Size: from sitk raw.GetDirection(),GetSpacing(),GetSize()
:param position:[24,3]
:return:
"""
direction = np.round(list(Direction))
direc0 = direction[0:7:3]
direc1 = direction[1:8:3]
direc2 = direction[2:9:3]
dim0char = Dic[(np.argwhere((np.abs(direc0)) == 1))[0][0]]
dim1char = Dic[(np.argwhere((np.abs(direc1)) == 1))[0][0]]
dim2char = Dic[(np.argwhere((np.abs(direc2)) == 1))[0][0]]
resolution = Spacing
w, h, c = Size[0], Size[1], Size[2]
jsonlist = []
for i in range(24):
dim0, dim1, dim2 = position[i:i + 1, 0], position[i:i + 1, 1], position[i:i + 1, 2]
if dim0 >= 0:
label = i + 1
if np.sum(direc0) == -1:
if dim0char == 'X':
Jsondim0 = dim0 * resolution[0]
else:
Jsondim0 = (w - dim0) * resolution[0]
else:
if dim0char == 'X':
Jsondim0 = (w - dim0) * resolution[0]
else:
Jsondim0 = dim0 * resolution[0]
if np.sum(direc1) == -1:
if dim1char == 'X':
Jsondim1 = dim1 * resolution[1]
else:
Jsondim1 = (h - dim1) * resolution[1]
else:
if dim1char == 'X':
Jsondim1 = (h - dim1) * resolution[1]
else:
Jsondim1 = dim1 * resolution[1]
if np.sum(direc2) == -1:
if dim2char == 'X':
Jsondim2 = dim2 * resolution[2]
else:
Jsondim2 = (c - dim2) * resolution[2]
else:
if dim2char == 'X':
Jsondim2 = (c - dim2) * resolution[2]
else:
Jsondim2 = dim2 * resolution[2]
jsonlist.append({dim0char: Jsondim0, dim1char: Jsondim1, dim2char: Jsondim2, 'label': label})
return jsonlist
|
67f252a237f294bdf738bf0b5e9a89aad51201d7
| 3,648,409
|
from sklearn.model_selection import GroupKFold
def group_split_data_cv(df, cv=5, split=0):
"""
Args:
cv: number of cv folds
split: index of the cv fold to return
Note that GroupKFold is not random
"""
splitter = GroupKFold(n_splits=cv)
split_generator = splitter.split(df, groups=df['arpnum'])
for k, (train_idx, test_idx) in enumerate(split_generator):
if k == split:
return df.iloc[train_idx], df.iloc[test_idx]
|
4d2fb6c62bdd313aa9b16d52b637adbfd1adc654
| 3,648,410
|
def encode(valeur,base):
""" int*int -->String
hyp valeur >=0
hypothèse : base maxi = 16
"""
chaine=""
if valeur>255 or valeur<0 :
return ""
for n in range (1,9) :
calcul = valeur % base
if (calcul)>9:
if calcul==10:
bit='A'
if calcul==11:
bit='B'
if calcul==12:
bit='C'
if calcul==13:
bit='D'
if calcul==14:
bit='E'
if calcul==15:
bit='F'
else :
bit=calcul
chaine =str(bit)+chaine
valeur = valeur // base
n+=1
return (chaine)
|
c5fe7d129ab19d1f77ac9d5160f5d714a796c0a0
| 3,648,411
|
def main(request):
"""
Main admin page.
Displayes a paginated list of files configured source directory (sorted by
most recently modified) to be previewed, published, or prepared for
preview/publish.
"""
# get sorted archive list for this user
try:
archives = request.user.archivist.sorted_archives()
except ObjectDoesNotExist:
# i.e. no user -> archivist association
if request.user.is_superuser:
archives = Archive.objects.all()
else:
archives = []
# get current tab if set in session; default to first tab
current_tab = request.session.get('active_admin_tab', 0)
# files for publication now loaded in jquery ui tab via ajax
# get the 10 most recent task results to display status
recent_tasks = TaskResult.objects.order_by('-created')[:10]
# absolute path to login, for use in javascript if timeout occurs
login_url = request.build_absolute_uri(settings.LOGIN_URL)
return render(request, 'fa_admin/index.html', {
'archives': archives,
'current_tab': current_tab,
'login_url': login_url,
'task_results': recent_tasks})
|
9f10a3546dbbd209b8d91e812c4190c3498b1c03
| 3,648,412
|
def parse_char(char, invert=False):
"""Return symbols depending on the binary input
Keyword arguments:
char -- binary integer streamed into the function
invert -- boolean to invert returned symbols
"""
if invert == False:
if char == 0:
return '.'
elif char == 1:
return '@'
if char == 0:
return '@'
elif char == 1:
return '.'
|
38c0d1c150a1c8e8f7d2f3d1bde08ec3e5ceb65b
| 3,648,413
|
def get_transformer_dim(transformer_name='affine'):
""" Returns the size of parametrization for a given transformer """
lookup = {'affine': 6,
'affinediffeo': 6,
'homografy': 9,
'CPAB': load_basis()['d'],
'TPS': 32
}
assert (transformer_name in lookup), 'Transformer not found, choose between: ' \
+ ', '.join([k for k in lookup.keys()])
return lookup[transformer_name]
|
8e61b2e135c2f5933955082b4d951ff2f88283b7
| 3,648,415
|
def ListVfses(client_urns):
"""Lists all known paths for a list of clients.
Args:
client_urns: A list of `ClientURN` instances.
Returns:
A list of `RDFURN` instances corresponding to VFS paths of given clients.
"""
vfs = set()
cur = set()
for client_urn in client_urns:
cur.update([
client_urn.Add("fs/os"),
client_urn.Add("fs/tsk"),
client_urn.Add("temp"),
client_urn.Add("registry"),
])
while cur:
nxt = []
for _, children in aff4.FACTORY.MultiListChildren(cur):
nxt.extend(children)
vfs.update(nxt)
cur = nxt
return vfs
|
20bf77875d099106e5190d02c0c62d38eb1a6590
| 3,648,416
|
def delete_product(productId):
"""Deletes product"""
response = product2.delete_product(productId)
return response
|
394848c8b9c8803140744b8a1a1eb6995cd04bf7
| 3,648,417
|
def compute_face_normals(points, trilist):
"""
Compute per-face normals of the vertices given a list of
faces.
Parameters
----------
points : (N, 3) float32/float64 ndarray
The list of points to compute normals for.
trilist : (M, 3) int16/int32/int64 ndarray
The list of faces (triangle list).
Returns
-------
face_normal : (M, 3) float32/float64 ndarray
The normal per face.
:return:
"""
pt = points[trilist]
a, b, c = pt[:, 0], pt[:, 1], pt[:, 2]
norm = np.cross(b - a, c - a)
return _normalize(norm)
|
4bbe9f7311f6125fd73b028c984e09ee4f124791
| 3,648,418
|
def get_deletion_confirmation(poll):
"""Get the confirmation keyboard for poll deletion."""
delete_payload = f"{CallbackType.delete.value}:{poll.id}:0"
delete_all_payload = f"{CallbackType.delete_poll_with_messages.value}:{poll.id}:0"
locale = poll.user.locale
buttons = [
[
InlineKeyboardButton(
i18n.t("keyboard.permanently_delete", locale=locale),
callback_data=delete_payload,
)
],
[
InlineKeyboardButton(
i18n.t("keyboard.permanently_delete_with_messages", locale=locale),
callback_data=delete_all_payload,
)
],
[get_back_to_management_button(poll)],
]
return InlineKeyboardMarkup(buttons)
|
6d741aa13d3d5234c53115b8b74c353fdce9e87e
| 3,648,419
|
def ngram_tokenizer(lines, ngram_len=DEFAULT_NGRAM_LEN, template=False):
"""
Return an iterable of ngram Tokens of ngram length `ngram_len` computed from
the `lines` iterable of UNICODE strings. Treat the `lines` strings as
templated if `template` is True.
"""
if not lines:
return
ngrams = unigram_tokenizer(lines, template)
ngrams = tokens_ngram_processor(ngrams, ngram_len)
ngrams = ngram_to_token(ngrams)
return ngrams
|
fb7f079ddee8bac10b2ae9efd306a482042b8a0f
| 3,648,420
|
def list_datasets(service, project_id):
"""Lists BigQuery datasets.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
Returns:
List containing dataset names
"""
datasets = service.datasets()
response = datasets.list(projectId=project_id).execute()
dataset_list = []
for field in response['datasets']:
dataset_list.append(field['datasetReference']['datasetId'])
return dataset_list
|
2712e6a99427ce3b141e7948bba36e8e724f82bc
| 3,648,421
|
def tors(universe, seg, i):
"""Calculation of nucleic backbone dihedral angles.
The dihedral angles are alpha, beta, gamma, delta, epsilon, zeta, chi.
The dihedral is computed based on position of atoms for resid `i`.
Parameters
----------
universe : Universe
:class:`~MDAnalysis.core.universe.Universe` containing the trajectory
seg : str
segment id for base
i : int
resid of the first base
Returns
-------
[alpha, beta, gamma, delta, epsilon, zeta, chi] : list of floats
torsion angles in degrees
Notes
-----
If failure occurs be sure to check the segment identification.
.. versionadded:: 0.7.6
"""
a = universe.select_atoms(" atom {0!s} {1!s} O3\' ".format(seg, i - 1),
" atom {0!s} {1!s} P ".format(seg, i),
" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i))
b = universe.select_atoms(" atom {0!s} {1!s} P ".format(seg, i),
" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i))
g = universe.select_atoms(" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i))
d = universe.select_atoms(" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i))
e = universe.select_atoms(" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i),
" atom {0!s} {1!s} P ".format(seg, i + 1))
z = universe.select_atoms(" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i),
" atom {0!s} {1!s} P ".format(seg, i + 1),
" atom {0!s} {1!s} O5\' ".format(seg, i + 1))
c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i),
" atom {0!s} {1!s} C1\' ".format(seg, i),
" atom {0!s} {1!s} N9 ".format(seg, i),
" atom {0!s} {1!s} C4 ".format(seg, i))
if len(c) < 4:
c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i),
" atom {0!s} {1!s} C1\' ".format(seg, i),
" atom {0!s} {1!s} N1 ".format(seg, i),
" atom {0!s} {1!s} C2 ".format(seg, i))
alpha = a.dihedral.value() % 360
beta = b.dihedral.value() % 360
gamma = g.dihedral.value() % 360
delta = d.dihedral.value() % 360
epsilon = e.dihedral.value() % 360
zeta = z.dihedral.value() % 360
chi = c.dihedral.value() % 360
return [alpha, beta, gamma, delta, epsilon, zeta, chi]
|
1efcac83c7ec6689e33830daf011bead5199e5dd
| 3,648,422
|
def get_metric(metric,midi_notes,Fe,nfft,nz=1e4,eps=10,**kwargs):
"""
returns the optimal transport loss matrix from a list of midi notes (interger indexes)
"""
nbnotes=len(midi_notes)
res=np.zeros((nfft/2,nbnotes))
f=np.fft.fftfreq(nfft,1.0/Fe)[:nfft/2]
f_note=[2.0**((n-60)*1./12)*440 for n in midi_notes]
for i in range(nbnotes):
m=np.zeros((nfft/2,))
if metric=='square':
m=(f_note[i]-f)**2
elif metric=='psquare':
if midi_notes[i]==0:
m[:]=nz
else:
nmax=int(f.max()/f_note[i])
m[:]=np.inf
for j in range(1,nmax+1):
m=np.minimum(m,(j*f_note[i]-f)**2+j*eps)
res[:,i]=m
return res,f
|
f21717f239431fac2e37e6b59abfdcb6b964aa0c
| 3,648,423
|
def octave(track, note, dur):
"""Generate the couple of blanche"""
track.append(Message('note_on', note=note, velocity=100, time=0))
track.append(Message('note_on', note=note + 12, velocity=100, time=0))
track.append(Message('note_off', note=note, velocity=64, time=dur))
track.append(Message('note_off', note=note + 12, velocity=64, time=0))
return track
|
c94391677849b1aef58df1a08ade0bae3fe691f5
| 3,648,424
|
def solveTrajectoryPickle(dir_path, file_name, only_plot=False, solver='original', **kwargs):
""" Rerun the trajectory solver on the given trajectory pickle file. """
# Load the pickles trajectory
traj_p = loadPickle(dir_path, file_name)
# Run the PyLIG trajectory solver
if solver == 'original':
# Given the max time offset from the pickle file and input, use the larger one of the two
max_toffset = traj_p.max_toffset
if "max_toffset" in kwargs:
if (kwargs["max_toffset"] is not None) and (traj_p.max_toffset is not None):
max_toffset = max(traj_p.max_toffset, kwargs["max_toffset"])
# Remove the max time offset from the list of keyword arguments
kwargs.pop("max_toffset", None)
# Preserve the trajectory ID
if hasattr(traj_p, "traj_id"):
traj_id = traj_p.traj_id
else:
traj_id = None
# Reinitialize the trajectory solver
meastype = 2
traj = Trajectory(traj_p.jdt_ref, output_dir=dir_path, max_toffset=max_toffset, \
meastype=meastype, traj_id=traj_id, **kwargs)
# Fill the observations
for obs in traj_p.observations:
traj.infillWithObs(obs, meastype=meastype)
elif solver == 'gural':
# Init the Gural solver
traj = GuralTrajectory(len(traj_p.observations), traj_p.jdt_ref, velmodel=3, \
max_toffset=traj_p.max_toffset, meastype=2, output_dir=dir_path, verbose=True)
# Fill the observations
for obs in traj_p.observations:
traj.infillTrajectory(obs.azim_data, obs.elev_data, obs.time_data, obs.lat, obs.lon, obs.ele)
else:
print('Unrecognized solver:', solver)
if only_plot:
# Set saving results
traj_p.save_results = True
# Override plotting options with given options
traj_p.plot_all_spatial_residuals = kwargs["plot_all_spatial_residuals"]
traj_p.plot_file_type = kwargs["plot_file_type"]
# Show the plots
traj_p.savePlots(dir_path, traj_p.file_name, show_plots=kwargs["show_plots"])
# Recompute the trajectory
else:
# Run the trajectory solver
traj = traj.run()
return traj
|
a5b5dca042906e86eb153c8889466bff983af243
| 3,648,425
|
def load_data(path):
"""
读取.mat的原始eeg数据
:param path:
:return:
"""
data=scio.loadmat(path)
labels = data['categoryLabels'].transpose(1, 0)
X = data['X_3D'].transpose(2, 1, 0)
return X,labels
|
69d540529b93705b3fb3a34a607da469825185f5
| 3,648,426
|
def distance_along_glacier(nx, map_dx):
"""Calculates the distance along the glacier in km.
Parameters
----------
nx : int
number of grid points
map_dx : int
grid point spacing
Returns
-------
ndarray
distance along the glacier in km.
"""
return np.linspace(0, nx, nx) * map_dx * 1e-3
|
58acc7f48b0f901b1c3e800ea6e98046805f855a
| 3,648,427
|
def make_postdict_to_fetch_token(token_endpoint: str, grant_type: str,
code: str, client_id: str,
client_secret: str,
redirect_uri: str) -> dict:
"""POST dictionary is the API of the requests library"""
return {'url': token_endpoint,
'data': {
'grant_type': grant_type,
'code': code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
},
'headers': {
'Content-Type': 'application/x-www-form-urlencoded',
}}
|
f366fc140c70d094ff99b28a369ac96b4c2a8b49
| 3,648,428
|
def _haxe_std_lib(ctx):
"""
_haxe_std_lib implementation.
Args:
ctx: Bazel context.
"""
toolchain = ctx.toolchains["@rules_haxe//:toolchain_type"]
build_source_file = ctx.actions.declare_file("StdBuild.hx")
toolchain.create_std_build(
ctx,
ctx.attr.target,
build_source_file,
)
hxml = create_hxml_map(ctx, toolchain, for_std_build = True)
hxml["classpaths"].append(build_source_file.dirname)
hxml["args"].append("--dce no")
# Handle the case where we're building in an external directory.
if hxml["external_dir"] != "":
ext_idx = build_source_file.path.find("external/")
hxml["external_dir"] = build_source_file.path[ext_idx:-11]
build_file = ctx.actions.declare_file("{}-std-build.hxml".format(ctx.attr.name))
create_build_hxml(ctx, toolchain, hxml, build_file, suffix = "-intermediate")
intermediate = ctx.actions.declare_directory(hxml["output_dir"])
# Do the compilation.
runfiles = [build_source_file] + find_direct_sources(ctx) + find_direct_resources(ctx)
toolchain.compile(
ctx,
hxml = build_file,
runfiles = runfiles,
out = intermediate,
)
# Post process the output file.
output = ctx.actions.declare_file(hxml["output_dir"].replace("-intermediate", ""))
output_file = ctx.actions.declare_file("{}/{}".format(ctx.attr.name, hxml["output_file"])) if "output_file" in hxml else None
if hxml["target"] == "java":
toolchain.create_final_jar(
ctx,
find_direct_sources(ctx),
intermediate,
output,
hxml["output_file"],
False,
output_file = output_file,
)
else:
inputs = [intermediate]
hxcpp_include_dir = None
if hxml["target"] == "cpp":
hxcpp_include_dir = ctx.actions.declare_directory("hxcpp_includes")
toolchain.copy_cpp_includes(ctx, hxcpp_include_dir)
inputs.append(hxcpp_include_dir)
cmd = "mkdir -p {} && cp -r {}/* {}".format(output.path, intermediate.path, output.path)
if hxcpp_include_dir != None:
cmd += " && cp -r {}/* {}/{}/include".format(hxcpp_include_dir.path, output.path, hxml["name"])
ctx.actions.run_shell(
outputs = [output, output_file],
inputs = inputs,
command = cmd,
use_default_shell_env = True,
)
return calc_provider_response(ctx, toolchain, hxml, output, output_file = output_file, library_name = "StdBuild")
|
7a29757f7fa9fdcd1942b73221633a0eb7afc2f8
| 3,648,429
|
def spread_match_network(expr_df_in, node_names_in):
"""
Matches S (spreadsheet of gene expressions) and N (network)
The function returns expr_df_out which is formed by reshuffling columns of
expr_df_in. Also, node_names_out is formed by reshuffling node_names_in. The
intersection of node_names_out and column names of expr_df_out are placed at
the beginning of both lists.
Input:
expr_df_in: A pandas dataframe corresponding to gene expression
node_names_in: Name of the nodes in the network
Output:
expr_df_out: Reorganized dataframe of gene expressions
nodes_names_out: Reordered node names
nodes_genes_intersect: Sorted list of shared genes
"""
node_names_in_set = set(node_names_in)
gene_names_in_set = set(expr_df_in.columns.values)
nodes_genes_intersect = sorted(list(gene_names_in_set & node_names_in_set))
nodes_minus_genes = sorted(list(node_names_in_set - gene_names_in_set))
genes_minus_nodes = sorted(list(gene_names_in_set - node_names_in_set))
genes_names_out = nodes_genes_intersect + genes_minus_nodes
nodes_names_out = nodes_genes_intersect + nodes_minus_genes
expr_df_out = expr_df_in[genes_names_out]
return(expr_df_out, nodes_names_out, nodes_genes_intersect)
|
c0b78263a341d3b7682922eb9a948c21ab2e7e45
| 3,648,431
|
import io
def top_level(url, data):
"""Read top level names from compressed file."""
sb = io.BytesIO(data)
txt = None
with Archive(url, sb) as archive:
file = None
for name in archive.names:
if name.lower().endswith('top_level.txt'):
file = name
break
if file:
txt = archive.read(file).decode('utf-8')
sb.close()
return [name.replace('/', '.') for name in txt.splitlines()] if txt else []
|
0fe92b1d038248f5f759d19b1e27ad013b3592c2
| 3,648,432
|
import requests
def get_timeseries_data(request):
"""
AJAX Controller for getting time series data.
"""
return_obj = {}
# -------------------- #
# VERIFIES REQUEST #
# -------------------- #
if not (request.is_ajax() and request.method == "POST"):
return_obj["error"] = "Unable to establish a secure connection."
return JsonResponse(return_obj)
# -------------------------- #
# GETS DATA FROM REQUEST #
# -------------------------- #
layer_code = request.POST.get("layer_code")
site_code = request.POST.get("site_code")
variable_code = request.POST.get("var_code")
site_name = request.POST.get("site_name")
variable_name = request.POST.get("var_name")
# ------------------------- #
# GETS TIME SERIES DATA #
# ------------------------- #
network_id = layer_code.split(":")[0].split("-")[1]
database_id = ":".join(layer_code.split(":")[1:])
request_url = f"{hydroserver_url}/wof/{network_id}/{database_id}/values/"
params = {
"site_code": site_code,
"variable_code": variable_code
}
response = requests.get(request_url, params=params)
waterml = etree.fromstring(response.content)
no_data_value = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}noDataValue").text
try:
unit_name = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}unit").find("{http://www.cuahsi.org/waterML/1.1/}unitAbbreviation").text
except:
unit_name = None
timeseries_data = [[
x.get('dateTime'),
x.text if x.text != no_data_value else None
] for x in waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}values").iter("{http://www.cuahsi.org/waterML/1.1/}value")]
# -------------------------- #
# RETURNS DATA TO CLIENT #
# -------------------------- #
return_obj["timeseries_data"] = timeseries_data
return_obj["no_data_value"] = no_data_value
return_obj["site_name"] = site_name
return_obj["variable_name"] = variable_name
return_obj["unit_name"] = unit_name
return_obj["variable_code"] = variable_code
return_obj["site_code"] = site_code
return_obj["layer_code"] = layer_code
return JsonResponse(return_obj)
|
d8bb691f99d4a993d2b8e7c7e52f079566f45a63
| 3,648,433
|
from typing import List
from typing import Tuple
import bisect
def line_col(lbreaks: List[int], pos: int) -> Tuple[int, int]:
"""
Returns the position within a text as (line, column)-tuple based
on a list of all line breaks, including -1 and EOF.
"""
if not lbreaks and pos >= 0:
return 0, pos
if pos < 0 or pos > lbreaks[-1]: # one character behind EOF is still an allowed position!
raise ValueError('Position %i outside text of length %s !' % (pos, lbreaks[-1]))
line = bisect.bisect_left(lbreaks, pos)
column = pos - lbreaks[line - 1]
return line, column
|
6b99e3b19ed1a490e4a9cc284f99e875085f819a
| 3,648,434
|
def sample_from_script(script_path, num_lines, chars_per_line):
"""Sample num_lines from a script.
Parameters
----------
script_path : str
Path to the script
num_lines : int
Number of lines to sample.
chars_per_line : int
Numer of consecutive characters considered a line.
Returns
-------
lines : List
All the sampled lines.
"""
script = read_script(script_path)
script = split_n_lines(script, num_chars=chars_per_line)
# sample with replacement since some scripts are sparse.
lines = np.random.choice(script, num_lines, replace=True)
return lines
|
52e04582ec297ac512b2d2586524c7c4cb46b1d0
| 3,648,436
|
def is_valid_uuid(x):
"""Determine whether this is a valid hex-encoded uuid."""
if not x or len(x) != 36:
return False
return (parse_uuid(x) != None)
|
707618844ddb4375c855e12ca2f75966a91d7c5b
| 3,648,437
|
def wait_for_needle_list(
loops: int,
needle_list: list[tuple[str, tuple[int, int, int, int]]],
sleep_range: tuple[int, int],
):
"""
Works like vision.wait_for_needle(), except multiple needles can be
searched for simultaneously.
Args:
loops: The number of tries to look for each needle in needle_list.
needle_list: A list of filepaths to the needles to look for. Each
item in the list is a 2-tuple containing:
- The filepath to the needle.
- The region in which to search for that needle.
sleep_range: A 2-tuple containing the minimum and maximum number
of miliseconds to wait after each loop.
Returns:
If a needle in needle_list is found, returns a 2-tuple containing
the ltwh dimensions of the needle and the index of the needle in
needle_list (This is so the function knows which needle was found).
Returns false if no needles in needle_list could be found.
"""
for _ in range(1, loops):
for item in needle_list:
needle, region = item
needle_found = Vision(
region=region, needle=needle, loop_num=1
).wait_for_needle(get_tuple=True)
if needle_found is True:
return needle_found, needle_list.index(needle)
misc.sleep_rand(sleep_range[0], sleep_range[1])
return False
|
4f09801f54d2f29aea18eb868c7ef44ab0532627
| 3,648,438
|
import random
def get_word():
"""Returns random word."""
words = ['Charlie', 'Woodstock', 'Snoopy', 'Lucy', 'Linus',
'Schroeder', 'Patty', 'Sally', 'Marcie']
return random.choice(words).upper()
|
c4437edc3a1e91cd90c342eda40cfd779364d9c1
| 3,648,439
|
from datetime import datetime
def parsed_json_to_dict(parsed):
"""
Convert parsed dict into dict with python built-in type
param:
parsed parsed dict by json decoder
"""
new_bangumi = {}
new_bangumi['name'] = parsed['name']
new_bangumi['start_date'] = datetime.strptime(
parsed['start_date'], '%Y-%m-%d').date()
if 'translation_team' in parsed:
new_bangumi['translation_team'] = parsed['translation_team']
else:
new_bangumi['translation_team'] = []
if 'total_ep' in parsed:
new_bangumi['total_ep'] = int(parsed['total_ep'])
else:
new_bangumi['total_ep'] = 99
if 'dled_ep' in parsed:
new_bangumi['dled_ep'] = int(parsed['dled_ep'])
else:
new_bangumi['dled_ep'] = 0
if 'keyword' in parsed:
new_bangumi['keyword'] = parsed['keyword']
else:
new_bangumi['keyword'] = new_bangumi['name']
new_bangumi['folder'] = parsed['folder'] if 'folder' in parsed and parsed[
'folder'] is not '' else new_bangumi['name']
new_bangumi['offset'] = int(parsed['offset']) if 'offset' in parsed else 0
return new_bangumi
|
e3bb8306e19a16c9e82d5f6e96c9b4a3707c0446
| 3,648,441
|
import pandas
def plot_shift_type_by_frequency(tidy_schedule: pandas.DataFrame) -> tuple:
"""
Plots a bar graph of shift type frequencies.
:param tidy_schedule: A pandas data frame containing a schedule,
as loaded by load_tidy_schedule().
:type tidy_schedule: pandas.DataFrame
:return: A tuple with a figure and an axis containing a matplotlib bar
graph.
:rtype: tuple
"""
return_data = (
tidy_schedule
.groupby('shift_type')
.agg({'shift_type': 'count'})
.query('shift_type > 0')
.rename_axis(None)
.sort_values(by='shift_type', ascending=False)
)
dates = medinetparsepy.get_min_max_dates.get_min_max_dates(tidy_schedule)
fig, ax = matplotlib.pyplot.subplots()
ax.bar(return_data.index, return_data['shift_type'])
ax.set_xlabel('Shift Type')
ax.set_ylabel('Frequency')
ax.set_title(f'Shift Type by Frequency\nBetween {dates[0]} and {dates[1]}')
return (fig, ax)
|
81fb649cd8439932bbbbf27d9690c5ab9f96e410
| 3,648,443
|
def load_image(path, size=None):
"""
Load the image from the given file-path and resize it to the given size if not None.
Eg: size = (width, height)
"""
img = Image.open(path)
if (size != None) and (size != ''):
img = img.resize(size=size, resample=Image.LANCZOS)
img = np.array(img)
# Scale image-pixels so they fall between 0.0 and 1.0
# img = img / 255.0
# Convert 2-dim gray-scale array to 3-dim RGB array.
if (len(img.shape) == 2):
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
return np.array(img)
|
e770ea3447ce8a7d236c4712859707b8e3cd8248
| 3,648,444
|
import secrets
import ipaddress
def call_wifi(label):
"""Wifi connect function
Parameters
----------
label : str
Output label
Returns
-------
None
"""
try:
# Setup wifi and connection
print(wifi.radio.connect(secrets['ssid'], secrets['password']))
print('ip', wifi.radio.ipv4_address)
show_text("ip: {}".format(wifi.radio.ipv4_address), label)
ipv4 = ipaddress.ip_address('8.8.8.8')
ping_result = wifi.radio.ping(ipv4)
print('ping', ping_result)
show_text("ping: {}".format(ping_result), label)
except:
return False
|
a1514ff756b5217b8f79b4f9af882a234b1ad17d
| 3,648,445
|
def load_normalized_face_landmarks():
"""
Loads the locations of each of the 68 landmarks
:return:
"""
normalized_face_landmarks = np.float32([
(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
(0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
(0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
(0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
(0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
(0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
(0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
(0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
(0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
(0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
(0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
(0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
(0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
(0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
(0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
(0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
(0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
(0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
(0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
(0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
(0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
(0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
(0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
(0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
(0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
(0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
(0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
(0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
(0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
(0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
(0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
(0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
(0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
(0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
return normalized_face_landmarks
|
2dbd191371345c4382efa3573b54e281607da37c
| 3,648,446
|
from datetime import datetime
from shutil import copyfile
def backup_file(file):
"""Create timestamp'd backup of a file
Args:
file (str): filepath
Returns:
backupfile(str)
"""
current_time = datetime.now()
time_stamp = current_time.strftime("%b-%d-%y-%H.%M.%S")
backupfile = file +'.bkp_'+ time_stamp
copyfile(file, backupfile)
return(backupfile)
|
1c1b33028aab01b4e41ed3ef944202ecc53415df
| 3,648,448
|
def svn_client_mergeinfo_log_eligible(*args):
"""
svn_client_mergeinfo_log_eligible(char path_or_url, svn_opt_revision_t peg_revision,
char merge_source_path_or_url, svn_opt_revision_t src_peg_revision,
svn_log_entry_receiver_t receiver,
svn_boolean_t discover_changed_paths,
apr_array_header_t revprops, svn_client_ctx_t ctx,
apr_pool_t pool) -> svn_error_t
"""
return _client.svn_client_mergeinfo_log_eligible(*args)
|
9f372556d56e0fdc88afc5b3fd35218fb46f3768
| 3,648,449
|
def share_nodes_sockets():
"""
Create a shared node layout where the simulation and analysis ranks share
compute nodes. Furthermore, they share sockets of the node.
"""
shared_sockets = SummitNode()
for i in range(10):
shared_sockets.cpu[i] = "simulation:{}".format(i)
shared_sockets.cpu[21+i] = "simulation:{}".format(10+i)
for i in range(10):
shared_sockets.cpu[10+i] = "pdf_calc:{}".format(i)
shared_sockets.cpu[21+10+i] = "pdf_calc:{}".format(10+i)
return [shared_sockets]
|
d34bfb1b97e4e3b06dee54a89c084dd404c3c6ca
| 3,648,450
|
def _rle_decode(data):
"""
Decodes run-length-encoded `data`.
"""
if not data:
return data
new = b''
last = b''
for cur in data:
if last == b'\0':
new += last * cur
last = b''
else:
new += last
last = bytes([cur])
return new + last
|
8463ff6a20b3a39df7b67013d47fe81ed6d53477
| 3,648,452
|
def find_shift_between_two_models(model_1,model_2,shift_range=5,number_of_evaluations=10,rotation_angles=[0.,0.,0.],
cropping_model=0,initial_guess=[0.,0.,0.], method='brute_force',full_output=False):
"""
Find the correct shift alignment in 3D by using a different optimization algorithms to minimise the distance between the two models.
Args:
:model_1(float ndarray): 3d ndarray of the fixed object
:model_2(float ndarray): 3d ndarray ot the rotatable model
Kwargs:
:shift_range(float): absolute value of the range in which the brute should be applied
:number_of_evaluations(int): number of grid points on which the brute force optimises
:rotation_angles(list): set of euler angles for rotating model_2 before applying the shift
:method(str): is the optimisation method which is use to minimise the difference, default = brute_force, other option fmin_l_bfgs_b
:full_output(bool): returns full output as a dictionary, default = False
"""
def shifting(x,model_1,model_2):
x0, x1, x2 = x
#model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles)
model_2 = ndimage.interpolation.shift(model_2, shift=(x0, x1, x2), order=0, mode='wrap')
#model_2 = np.roll(np.roll(np.roll(model_2,int(x0),axis=0), int(x1), axis=1), int(x2), axis=2)
return np.sum(np.abs(model_1 - model_2) ** 2)
model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles)
# cropping the model
if cropping_model:
model_1 = model_1[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2]
model_2 = model_2[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2]
args = (model_1, model_2)
if method == 'brute_force':
# set parameters
r = slice(-float(shift_range),float(shift_range),2.*shift_range/number_of_evaluations)
ranges = [r,r,r]
# shift retrieval brute force
shift = optimize.brute(shifting, ranges=ranges, args=args, full_output=True, finish=optimize.fmin_bfgs)
shift = np.array(shift)
elif method == 'fmin_l_bfgs_b':
#parameter for fmin_l_bfgs_b
x0 = np.array(initial_guess)
# fmin_l_bfgs_b optimisation
shift = optimize.fmin_l_bfgs_b(shifting, x0, args=args, approx_grad=True)
shift = np.array(shift)
shift_values = shift[0]
if full_output:
if method == 'brute_force':
out = {'shift_values':shift[0],
'shift_fvalues':shift[1],
'shift_grid':shift[2],
'shift_jout':shift[3]}
elif method == 'fmin_l_bfgs_b':
out = {'shift_values':shift[0],
'shift_fvalues':shift[1]}
return out
else:
return shift_values
|
39dea881a5a00174b178d22910b5cee6d7ce48cd
| 3,648,453
|
from typing import Optional
import requests
def get_url(
url: str,
stream: bool = False,
session: Optional[requests.Session] = None
) -> requests.Response:
"""Call requests.get() on a url and return the requests.Response."""
if not session:
session = retry_session()
resp = session.get(url, stream=stream)
resp.raise_for_status()
return resp
|
c056446cbb1966f79b472de2f140b9962246fd75
| 3,648,454
|
from typing import Optional
def uploadFromPath(localFilePath: str,
resource,
bucketName: str,
fileID: str,
headerArgs: Optional[dict] = None,
partSize: int = 50 << 20):
"""
Uploads a file to s3, using multipart uploading if applicable
:param str localFilePath: Path of the file to upload to s3
:param S3.Resource resource: boto3 resource
:param str bucketName: name of the bucket to upload to
:param str fileID: the name of the file to upload to
:param dict headerArgs: http headers to use when uploading - generally used for encryption purposes
:param int partSize: max size of each part in the multipart upload, in bytes
:return: version of the newly uploaded file
"""
if headerArgs is None:
headerArgs = {}
client = resource.meta.client
file_size, file_time = fileSizeAndTime(localFilePath)
version = uploadFile(localFilePath, resource, bucketName, fileID, headerArgs, partSize)
info = client.head_object(Bucket=bucketName, Key=compat_bytes(fileID), VersionId=version, **headerArgs)
size = info.get('ContentLength')
assert size == file_size
# Make reasonably sure that the file wasn't touched during the upload
assert fileSizeAndTime(localFilePath) == (file_size, file_time)
return version
|
ee8ca7e177ab8538fd668a42111f86503b57edc1
| 3,648,455
|
def scale_log2lin(value):
"""
Scale value from log10 to linear scale: 10**(value/10)
Parameters
----------
value : float or array-like
Value or array to be scaled
Returns
-------
float or array-like
Scaled value
"""
return 10**(value/10)
|
04f15a8b5a86a6e94dd6a0f657d7311d38da5dc0
| 3,648,456
|
from typing import Union
import torch
from typing import Optional
from typing import List
def train(
train_length:Union[int, TrainLength], model:nn.Module, dls:DataLoaders, loss_func:LossFunction,
opt:torch.optim.Optimizer, sched=None, metric:Optional[Metric]=None,
device=None, clip_grad:ClipGradOptions=None, callbacks:List[TrainingCallback]=None
) -> TrainingStats:
"""
Train `model` with the data given by `dls.train` to minimize `loss_func`.
Args
train_length: if it's an int, number of training epochs; if it's a TrainLength's subclass instance, training
won't stop until `train_length.must_stop(...)`, which is called at the end of each epoch, returns `True`.
model: module to train.
dls: dataloaders that iterates over the training and validation data. If you don't want to evaluate `model`
using a validation set, `dls.valid` can be `None`.
train_dl: dataloader that iterates over the training data.
valid_dl: dataloader that iterates over the validation data.
loss_func: loss function to minimize. We assume that this loss function applies reduction over the batch, i.e.,
it only returns one value.
opt: Pytorch optimizer
sched: scheduler with a method `step` that will be executed once per step.
metric: function that receives a model, a DataLoader `dl` and a `metric_fn` function, computes the metric
`metric_fn` for every batch of `dl` and returns the average.
device: device, in Pytorch format, where the model and data should be placed to train and calculate metrics.
clip_grad: if not None, the gradients of `clip_grad` are clipped to be at most `clip_grad.max_norm` right
before each optimizer step.
callbacks: list of callbacks that must be called every time an event (end of step, end of epoch, ...) occurs.
Returns: statistics of the training run, like a history of the losses/metrics by epoch
"""
if isinstance(train_length, int):
train_length = TrainLengthNEpochs(train_length)
assert dls.train is not None
if device is None: device = get_best_available_device()
if callbacks is None: callbacks = []
n_steps = 0
n_epochs_completed = 0
train_loss_history = []
train_metric_history = []
valid_metric_history = []
while (True):
model.train()
train_losses_epoch = None
n_examples_epoch = 0
for x, y, *extra_xs in dls.train:
x, y = x.to(device), y.to(device)
opt.zero_grad()
preds = model(x, *extra_xs)
loss = loss_func(preds, y)
loss.backward()
if clip_grad is not None:
torch.nn.utils.clip_grad_norm_(clip_grad.params, clip_grad.max_norm)
opt.step()
n_steps += 1
if sched is not None: sched.step()
with torch.no_grad():
actual_bs = x.shape[0]
n_examples_epoch += actual_bs
detached_loss = loss.detach()[None] * actual_bs
train_losses_epoch = (
detached_loss if train_losses_epoch is None else torch.cat((train_losses_epoch, detached_loss))
)
for cb in callbacks:
cb.on_step_end(loss, model, opt)
#losses.append(loss.detach().cpu().item())
#print('Train loss = ', loss.detach())
#print('Epoch completed')
model.eval()
train_metric, valid_metric, metric_name = None, None, ''
if metric is not None:
metric_name = metric.name
train_metric = metric(model, dls.train, device=device)
train_metric_history.append(train_metric)
if dls.valid is not None:
valid_metric = metric(model, dls.valid, device=device)
valid_metric_history.append(valid_metric)
avg_train_loss = ((train_losses_epoch.sum()) / n_examples_epoch).item()
train_loss_history.append(avg_train_loss)
n_epochs_completed += 1
epoch_stats = EpochTrainingStats(avg_train_loss, train_metric, valid_metric, n_epochs_completed, metric_name)
for cb in callbacks:
cb.on_epoch_end(epoch_stats, model, opt)
if train_length.must_stop(epoch_stats):
break
#valid_metric_str = f'{valid_metric:.4f}' if dls.valid is not None else 'N/A'
#last_iter_train_loss = loss.detach().item()
#print(f'Avg train loss = {avg_train_loss:.4f}, Last iter train loss = {last_iter_train_loss:.4f}')
#print(f'Train metric (f1) = {train_metric}')
#print(f'Valid metric (f1) = {valid_metric}')
return TrainingStats(
np.array(train_loss_history),
np.array(train_metric_history),
np.array(valid_metric_history),
n_epochs_completed,
n_steps,
)
|
ad6e4796df66a38df2140060a2150f77b8d7c525
| 3,648,457
|
def _error_to_level(error):
"""Convert a boolean error field to 'Error' or 'Info' """
if error:
return 'Error'
else:
return 'Info'
|
b43e029a4bb14b10de4056758acecebc85546a95
| 3,648,458
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.