content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def html_it():
"""Run coverage and make an HTML report for everything."""
import coverage
cov = coverage.coverage()
cov.start()
import here # pragma: nested
cov.stop() # pragma: nested
cov.html_report(directory="../html_other") | 5,342,700 |
def mkdirs(path, raise_path_exits=False):
"""Create a dir leaf"""
if not os.path.exists(path):
os.makedirs(path)
else:
if raise_path_exits:
raise ValueError('Path %s has exitsted.' % path)
return path | 5,342,701 |
def is_triplet(tiles):
"""
Checks if the tiles form a triplet.
"""
return len(tiles) == 3 and are_all_equal(tiles) | 5,342,702 |
def cli(ctx, comment, metadata=""):
"""Add a canned comment
Output:
A dictionnary containing canned comment description
"""
return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata) | 5,342,703 |
def get_latest_slot_for_resources(latest, task, schedule_set):
"""
Finds the latest opportunity that a task may be executed
:param latest: type int
A maximum bound on the latest point where a task may be executed
:param task: type DAGSubTask
The task to obtain the latest starting slot for
:param schedule_set: type list
List of occupied time slots of the resources used for the task
:return: type int
The latest slot where task may begin
"""
# Obtain set of occupied slots across resources
occupied_slots = set()
for rs in schedule_set:
occupied_slots |= set(rs)
# Filter ones that are earlier than latest opportunity
occupied_slots = list(filter(lambda s: s <= latest, list(sorted(occupied_slots))))
# Settle for latest if nothing else found
if not occupied_slots or occupied_slots[-1] < latest:
return latest
else:
occupied_ranges = list(reversed(list(to_ranges(occupied_slots))))
for (s1, e1), (s2, e2) in zip(occupied_ranges, occupied_ranges[1:]):
if s1 - e2 >= task.c:
return e2 + 1
return occupied_ranges[-1][0] - ceil(task.c) | 5,342,704 |
def _get_text(url: str):
"""
Get the text from a message url
Args:
url: rest call URL
Returns:
response: Request response
"""
response = requests.get(url["messageUrl"].split("?")[0])
return response | 5,342,705 |
def test_mat_discover():
"""Perform a simple run of mat_discover to ensure it runs without errors.
This does not involve checking to verify that the output is correct, and
additionally it uses a `dummy_run` and `dummy` such that MDS is used on a small
dataset rather than UMAP on a large dataset (for faster runtimes).
"""
disc = Discover(dummy_run=True)
train_df, val_df = disc.data(elasticity, fname="train.csv", dummy=True)
disc.fit(train_df)
score = disc.predict(val_df, umap_random_state=42)
cat_df = pd.concat((train_df, val_df), axis=0)
disc.group_cross_val(cat_df, umap_random_state=42)
print("scaled test error = ", disc.scaled_error)
disc.plot()
# disc.save() #doesn't work with pytest for some reason (pickle: object not the same)
# disc.load() | 5,342,706 |
def animate_a_b(
symbols=["AAPL", "GOOG", "MSFT", "BRK-A", "AMZN",
"FB", "XOM", "JNJ", "JPM", "WFC"],
start_date="2006-12-01", end_date="2016-12-01",
period=252 * 2):
""" --- Preprocess: You have to take Udacity! --- """
# Read data
dates = pd.date_range(start_date, end_date) # date range as index
stock_data = udacity.get_data(symbols, dates) # get data for each symbol
# Fill missing values
udacity.fill_missing_values(stock_data)
# Daily returns
daily_returns = udacity.compute_daily_returns(stock_data)
""" --- Make animation --- """
interval = 10 # Nframe interval
frames = (len(stock_data) - period) / interval # Num of frames
markers = ["o", "^", "s"]
def animate_polyfit(nframe):
plt.clf()
daily_returns_p = daily_returns[
nframe * interval: nframe * interval + period]
corr = daily_returns_p.corr(method="pearson")
xmin, xmax = -0.003, 0.003
ymin, ymax = 0.0, 2.0
plt.plot([0, 0], [ymin, ymax], '--', color='black')
plt.plot([xmin, xmax], [1, 1], '--', color='black')
for n, symbol in enumerate(symbols[1:]):
beta, alpha = np.polyfit(daily_returns_p["SPY"],
daily_returns_p[symbol], 1)
plt.plot(alpha, beta, markers[n % len(markers)], alpha=0.7,
label=symbol, color=cm.jet(n * 1. / len(symbols)),
ms=np.absolute(corr.ix[0, n + 1]) * 25)
plt.xlim([xmin, xmax])
plt.ylim([ymin, ymax])
plt.xlabel("Alpha")
plt.ylabel("Beta")
plt.text(xmax, ymax, str(daily_returns_p.index[-1]),
ha="right", va="bottom")
plt.legend(loc="upper left")
fig = plt.figure(figsize=(8, 8))
anim = ani.FuncAnimation(fig, animate_polyfit, frames=frames)
anim.save("ab.gif", writer="imagemagick", fps=18) | 5,342,707 |
def divideFacet(aFacet):
"""Will always return four facets, given one, rectangle or triangle."""
# Important: For all facets, first vertex built is always the most south-then-west, going counter-clockwise thereafter.
if len(aFacet) == 5:
# This is a triangle facet.
orient = aFacet[4] # get the string expressing this triangle's orientation
# Cases, each needing subdivision:
# ______ ___ ___
# |\ /| \ / /\ | / \ | ^
# | \ / | \ / / \ | / \ | N
# |__\ /__| \/ /____\ |/ \|
#
# up up down up down down -- orientations, as "u" or "d" in code below.
# Find the geodetic bisectors of the three sides, store in sequence using edges defined
# by aFacet vertex indeces: [0]&[1] , [1]&[2] , [2]&[3]
newVerts = []
for i in range(3):
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0]) / 2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = (newLat, newLon)
newVerts.append(newVert)
if orient == "u":
# In the case of up facets, there will be one "top" facet
# and 3 "bottom" facets after subdivision; we build them in the sequence inside the triangles:
#
# 2
# /\ Outside the triangle, a number is the index of the vertex in aFacet,
# / 1\ and a number with an asterisk is the index of the vertex in newVerts.
# 2* /____\ 1*
# /\ 0 /\
# /2 \ /3 \
# /____\/____\
# 0or3 0* 1
newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"]
newFacet1 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "u"]
newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"]
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"]
if orient == "d":
# In the case of down facets, there will be three "top" facets
# and 1 "bottom" facet after subdivision; we build them in the sequence inside the triangles:
#
# 2_____1*_____1
# \ 2 /\ 3 /
# \ / 0\ / Outside the triangle, a number is the index of the vertex in aFacet,
# \/____\/ and a number with an asterisk is the index of the vertex in newVerts.
# 2*\ 1 /0*
# \ /
# \/
# 0or3
newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"]
newFacet1 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "d"]
newFacet2 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "d"]
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "d"]
if len(aFacet) == 6:
# This is a rectangle facet.
northBoolean = aFacet[5] # true for north, false for south
if northBoolean:
# North pole rectangular facet.
# Build new facets in the sequence inside the polygons:
# 3..........2 <-- North Pole
# | |
# | 1 | Outside the polys, a number is the index of the vertex in aFacet,
# | | and a number with an asterisk is the index of the vertex in newVerts.
# | |
# 2*|--------|1* /\
# |\ /| on globe /__\
# | \ 0 / | -------> /\ /\
# | \ / | /__\/__\
# | 2 \/ 3 |
# 0or4''''''''''1
# 0*
newVerts = []
for i in range(4):
if i != 2:
# on iter == 1 we're going across the north pole - don't need this midpoint.
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0])/2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = (newLat, newLon)
newVerts.append(newVert)
newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"] # triangle
newFacet1 = [newVerts[2], newVerts[1], aFacet[2], aFacet[3], newVerts[2], True] # rectangle
newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"] # triangle
newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"] # triangle
else:
# South pole rectangular facet
# 1*
# 3..........2
# | 2 /\ 3 | Outside the polys, a number is the index of the vertex in aFacet,
# | / \ | and a number with an asterisk is the index of the vertex in newVerts.
# | / 0 \ |
# |/ \| ________
# 2*|--------|0* \ /\ /
# | | on globe \/__\/
# | 1 | -------> \ /
# | | \/
# | |
# 0or4'''''''''1 <-- South Pole
newVerts = []
for i in range(4):
if i != 0:
# on iter == 3 we're going across the south pole - don't need this midpoint
if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]:
newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1]))
else:
newLat = (aFacet[i][0] + aFacet[i+1][0])/2
newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat)
newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1])
newVert = newLat, newLon
newVerts.append(newVert)
newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"] # triangle
newFacet1 = [aFacet[0], aFacet[1], newVerts[0], newVerts[2], aFacet[0], False] # rectangle
newFacet2 = [newVerts[2], newVerts[1], aFacet[3], newVerts[2], "d"] # triangle
newFacet3 = [newVerts[1], newVerts[0], aFacet[2], newVerts[1], "d"] # triangle
# In all cases, return the four facets made in a list
return [newFacet0, newFacet1, newFacet2, newFacet3] | 5,342,708 |
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = []
for k in range(n):
g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k])
return g | 5,342,709 |
def initialize_hs(IMAG_counter):
"""Initialize the HiSeq and return the handle."""
global n_errors
experiment = config['experiment']
method = experiment['method']
method = config[method]
if n_errors is 0:
if not userYN('Initialize HiSeq'):
sys.exit()
hs.initializeCams(logger)
x_homed = hs.initializeInstruments()
if not x_homed:
error('HiSeq:: X-Stage did not home correctly')
# HiSeq Settings
inlet_ports = int(method.get('inlet ports', fallback = 2))
hs.move_inlet(inlet_ports) # Move to 2 or 8 port inlet
# Set laser power
for color in hs.lasers.keys():
laser_power = int(method.get(color+' laser power', fallback = 10))
hs.lasers[color].set_power(laser_power)
if IMAG_counter > 0:
if not hs.lasers[color].on:
error('HiSeq:: Lasers did not turn on.')
hs.f.LED('A', 'off')
hs.f.LED('B', 'off')
LED('all', 'startup')
hs.move_stage_out()
return hs | 5,342,710 |
def print_toc() -> int:
"""
Entry point for `libro print-toc`
The meat of this function is broken out into the generate_toc.py module for readability
and maintainability.
"""
parser = argparse.ArgumentParser(description="Build a table of contents for an SE source directory and print to stdout.")
parser.add_argument("-i", "--in-place", action="store_true", help="overwrite the existing toc.xhtml file instead of printing to stdout")
parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a Standard Ebooks source directory")
args = parser.parse_args()
if not args.in_place and len(args.directories) > 1:
se.print_error("Multiple directories are only allowed with the [bash]--in-place[/] option.")
return se.InvalidArgumentsException.code
for directory in args.directories:
try:
se_epub = SeEpub(directory)
except se.SeException as ex:
se.print_error(ex)
return ex.code
try:
if args.in_place:
toc_path = se_epub.path / "src/epub/toc.xhtml"
with open(toc_path, "r+", encoding="utf-8") as file:
file.write(se_epub.generate_toc())
file.truncate()
else:
print(se_epub.generate_toc())
except se.SeException as ex:
se.print_error(ex)
return ex.code
except FileNotFoundError as ex:
se.print_error(f"Couldn’t open file: [path][link=file://{toc_path}]{toc_path}[/][/].")
return se.InvalidSeEbookException.code
return 0 | 5,342,711 |
def new_game():
"""Starts new game."""
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers
global score1, score2 # these are ints
paddle1_pos = [[0, HEIGHT/2 - PAD_HEIGHT/2],
[PAD_WIDTH, HEIGHT/2 - PAD_HEIGHT/2],
[PAD_WIDTH, PAD_HEIGHT/2 + HEIGHT/2],
[0, PAD_HEIGHT/2 + HEIGHT/2]]
paddle2_pos = [[WIDTH - PAD_WIDTH, HEIGHT/2 - PAD_HEIGHT/2],
[WIDTH, HEIGHT/2 - PAD_HEIGHT/2],
[WIDTH, PAD_HEIGHT/2 + HEIGHT/2],
[WIDTH - PAD_WIDTH, PAD_HEIGHT/2 + HEIGHT/2]]
paddle1_vel = 0
paddle2_vel = 0
spawn_ball() | 5,342,712 |
def get_colden(theta_xy, theta_xz, theta_yz, n_sample_factor=1.0,
directory=None, file_name='save.npy', quick=False,
gridrate=0.5, shift=[0, 0, 0], draw=False, save=False, verbose=False):
"""
Rotate gas into arbitrary direction
"""
if gridrate < 2**(-7):
boxsize=10**2
elif gridrate < 2**(-6):
boxsize=3*10**2
else:
boxsize = 10**4
x = np.random.randint(1000, size=boxsize*n_sample_factor)
y = np.random.randint(1000, size=boxsize*n_sample_factor)
z = np.random.randint(1000, size=boxsize*n_sample_factor)
gridsize = 1000 * gridrate #### notice that gridsize is a half of box's side length
x, y, z = x - 500, y - 500, z - 500
x, y = rotation(x, y, theta_xy)
x, z = rotation(x, z, theta_xz)
y, z = rotation(y, z, theta_yz)
x, y, z = x + shift[0], y + shift[1], z + shift[2]
dsort = np.where((np.sqrt(np.square(x) + np.square(y)) < gridsize * np.sqrt(2))
& (abs(x) <= gridsize) & (abs(y) <= gridsize))
if draw:
plt.show()
else:
pass
z_sort = np.where( abs(z) <= gridsize )[0]
X_zsorted = x[z_sort]
Y_zsorted = y[z_sort]
min_xshift = min(X_zsorted)/2/gridsize
max_xshift = max(X_zsorted)/2/gridsize
min_yshift = min(Y_zsorted)/2/gridsize
max_yshift = max(Y_zsorted)/2/gridsize
min_xshi, min_yshi, min_zshi = -1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2
max_xshi, max_yshi, max_zshi = 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2
base_grid_ddx = int(max(max_xshi, abs(min_xshi)))+1
base_grid_ddy = int(max(max_yshi, abs(min_yshi)))+1
base_grid_ddz = int(max(max_zshi, abs(min_zshi)))+1
print("\n","######################","\n","base_grid_ddx is ",base_grid_ddx,"\n","#####################","\n")
base_grid = np.zeros([2*base_grid_ddz+2+1, 2*base_grid_ddy+1, 2*base_grid_ddx+1])
i = -base_grid_ddx
while i <= base_grid_ddx:
j = -base_grid_ddy
while j <= base_grid_ddy:
k = -base_grid_ddz
while k <= base_grid_ddz:
component_ijk = np.sum((abs(x + 2 * gridsize * i) <= gridsize) *
(abs(y + 2 * gridsize * j) <= gridsize) *
(abs(z + 2 * gridsize * k) <= gridsize))/boxsize
base_grid[0][j+base_grid_ddy][i+base_grid_ddx] = i
base_grid[1][j+base_grid_ddy][i+base_grid_ddx] = j
base_grid[k+base_grid_ddz+2][j+base_grid_ddy][i+base_grid_ddx] = component_ijk
#base_grid[i+base_grid_ddx][j+base_grid_ddy][k+base_grid_ddz] = component_ijk
k = k + 1
j = j +1
if i%10 == 1: print("{:.2f} % \r".format(100*abs(i+base_grid_ddx)/base_grid_ddx/2))
i = i +1
if verbose: print(base_grid)
if save:
save_route = directory
route_name = save_route+file_name
np.save(route_name,base_grid)
return len(dsort[0]), base_grid | 5,342,713 |
def main(file, size):
"""Program that solve N-puzzel game"""
if file:
puzzel_size, puzzel = parser(file)
game = Game(puzzel_size, puzzel)
elif size:
game = Game(size)
else:
raise SystemExit("Need puzzel size or puzzel file")
a_star(game) | 5,342,714 |
def PyException_GetCause(space, w_exc):
"""Return the cause (another exception instance set by raise ... from ...)
associated with the exception as a new reference, as accessible from Python
through __cause__. If there is no cause associated, this returns
NULL."""
w_cause = space.getattr(w_exc, space.wrap('__cause__'))
if space.is_none(w_cause):
return None
return w_cause | 5,342,715 |
def _get_style_data(stylesheet_file_path=None):
"""Read the global stylesheet file and provide the style data as a str.
Args:
stylesheet_file_path (str) : The path to the global stylesheet.
Returns:
str : The style data read from the stylesheet file
"""
global __style_data
if not stylesheet_file_path:
stylesheet_file_path = os.getenv("QSS_STYLESHEET", None)
if stylesheet_file_path == "":
stylesheet_file_path = None
if __style_data:
return __style_data
__style_data = None
load_default = True
if stylesheet_file_path is not None:
try:
with open(stylesheet_file_path, 'r') as stylesheet_file:
LOG.info(
"Opening style file '{0}'...".format(stylesheet_file_path))
__style_data = stylesheet_file.read()
load_default = False
except Exception as ex:
__style_data = None
LOG.error(
"Error reading the stylesheet file '{0}'. Exception: {1}".format(
stylesheet_file_path,
str(ex)))
if load_default:
try:
with open(GLOBAL_STYLESHEET, 'r') as default_stylesheet:
LOG.info("Opening the default stylesheet '{0}'...".format(
GLOBAL_STYLESHEET))
__style_data = default_stylesheet.read()
except Exception as ex:
__style_data = None
LOG.exception("Cannot find the default stylesheet file '{0}'.".format(GLOBAL_STYLESHEET))
return __style_data | 5,342,716 |
def row_interval(rows: int) -> Expression:
"""
Creates an interval of rows.
Example:
::
>>> tab.window(Over
>>> .partition_by(col('a'))
>>> .order_by(col('proctime'))
>>> .preceding(row_interval(4))
>>> .following(CURRENT_ROW)
>>> .alias('w'))
:param rows: the number of rows
"""
return _unary_op("rowInterval", rows) | 5,342,717 |
def build_post_async_retry_failed_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Long running post request, service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/lro/postasync/retry/failed")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs) | 5,342,718 |
def check_thresholds_memory_usage(X, X2, y2, sample_weight2, n_trees: int = 10, n_thresholds: int = 10, depth: int = 3):
"""
Check if tree really only gets the memory view (reference for object inside memory)
And that tree does not copy all thresholds array
"""
X = GeneticTree._check_X(GeneticTree(), X, True)
thresholds = prepare_thresholds_array(n_thresholds, X)
memory = memory_used()
print(f"Memory after creating thresholds array {memory:0.02f}.")
trees = []
builder: FullTreeBuilder = FullTreeBuilder()
start = time.time()
for i in range(n_trees):
tree: Tree = Tree(3, X2, y2, sample_weight2, thresholds, np.random.randint(10**8))
tree.resize_by_initial_depth(depth)
builder.build(tree, 3)
trees.append(tree)
end = time.time()
memory_all = memory_used()
print(trees[np.random.randint(0, len(trees))].n_features)
print(f"All memory {memory_all:0.02f}.")
print(f"Memory used for trees {memory_all - memory:0.02f}.")
print(f"Creation time {end - start}.") | 5,342,719 |
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor) | 5,342,720 |
def change_to_local_price(us_fee):
"""Get us dollar change price from redis and apply it on us_fee.
"""
dollar_change = RedisClient.get('dollar_change')
if not dollar_change:
raise ValueError(ERRORS['CHANGE_PRICE'])
Rial_fee = float(us_fee) * int(dollar_change)
return int(Rial_fee) | 5,342,721 |
def get_positive(data_frame, column_name):
"""
Query given data frame for positive values, including zero
:param data_frame: Pandas data frame to query
:param column_name: column name to filter values by
:return: DataFrame view
"""
return data_frame.query(f'{column_name} >= 0') | 5,342,722 |
def axes(*args, **kwargs):
"""
Add an axes to the figure.
The axes is added at position *rect* specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============== ==============================================
kwarg Accepts Description
======= ============== ==============================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute
with otherax
sharey otherax current axes shares yaxis attribute
with otherax
polar [True|False] use a polar axes?
aspect [str | num] ['equal', 'auto'] or a number. If a number
the ratio of x-unit/y-unit in screen-space.
Also see
:meth:`~matplotlib.axes.Axes.set_aspect`.
======= ============== ==============================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args) == 0:
return subplot(111, **kwargs)
if nargs > 1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
return a | 5,342,723 |
def scrape_options_into_new_groups(source_groups, assignments):
"""Puts options from the :py:class:`OptionParser` and
:py:class:`OptionGroup` objects in `source_groups` into the keys of
`assignments` according to the values of `assignments`. An example:
:type source_groups: list of :py:class:`OptionParser` and
:py:class:`OptionGroup` objects
:param source_groups: parsers/groups to scrape options from
:type assignments: dict with keys that are :py:class:`OptionParser` and
:py:class:`OptionGroup` objects and values that are
lists of strings
:param assignments: map empty parsers/groups to lists of destination names
that they should contain options for
"""
all_options = scrape_options_and_index_by_dest(*source_groups)
return populate_option_groups_with_options(assignments, all_options) | 5,342,724 |
def write_dau_pack16(fid, kind, data):
"""Write a dau_pack16 tag to a fif file."""
data_size = 2
data = np.array(data, dtype='>i2').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2') | 5,342,725 |
def resnext56_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar10", **kwargs) | 5,342,726 |
def _get_server_argparser():
"""
Create a :class:`argparse.ArgumentParser` with standard configuration
options that cli subcommands which communicate with a server require, e.g.,
hostname and credential information.
:return: the argparser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("host", metavar="HOST_NAME",
help="hostname where the management service resides")
parser.add_argument("-u", "--user", metavar="USERNAME",
default=None, required=False,
help="user registered at the management service")
parser.add_argument("-p", "--password", metavar="PASSWORD",
default=None, required=False,
help="password for the management service user")
parser.add_argument("-t", "--port", metavar="PORT",
required=False, default=8443,
help="port where the management service resides")
parser.add_argument("-e", "--truststore", metavar="TRUSTSTORE_FILE",
default=False, required=False,
help="""name of file containing one or more CA pems
to use in validating the management server""")
return parser | 5,342,727 |
def is_internet_file(url):
"""Return if url starts with http://, https://, or ftp://.
Args:
url (str): URL of the link
"""
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("ftp://")
) | 5,342,728 |
def zip(*args: Observable[Any]) -> Observable[Tuple[Any, ...]]:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` whenever all of the
observable sequences have produced an element at a corresponding
index.
.. marble::
:alt: zip
--1--2---3-----4---|
-a----b----c-d------|
[ zip() ]
--1,a-2,b--3,c-4,d-|
Example:
>>> res = rx.zip(obs1, obs2)
Args:
args: Observable sources to zip.
Returns:
An observable sequence containing the result of combining
elements of the sources as a :class:`tuple`.
"""
from .observable.zip import zip_
return zip_(*args) | 5,342,729 |
def GetFilter(image_ref, holder):
"""Get the filter of occurrences request for container analysis API."""
filters = [
# Display only packages
'kind = "PACKAGE_MANAGER"',
# Display only compute metadata
'has_prefix(resource_url,"https://www.googleapis.com/compute/")',
]
client = holder.client
resource_parser = holder.resources
if image_ref:
image_expander = image_utils.ImageExpander(client, resource_parser)
self_link, image = image_expander.ExpandImageFlag(
user_project=properties.VALUES.core.project.Get(),
image=image_ref.image,
image_project=image_ref.project,
return_image_resource=True
)
image_url = self_link+'/id/'+str(image.id)
filters.append('has_prefix(resource_url,"{}")'.format(image_url))
return ' AND '.join(filters) | 5,342,730 |
def predict_image_paths(image_paths, model_path, target_size=(128, 128)):
"""Use a trained classifier to predict the class probabilities of a list of images
Returns most likely class and its probability
:param image_paths: list of path(s) to the image(s)
:param model_path: path to the pre-trained model
:param target_size:
:type image_paths: list
:return:
:rtype: list
"""
desired_size = target_size[0]
if model_path in LOADED_MODELS:
loaded_model = LOADED_MODELS[model_path]
else:
with open(json_path, 'r') as json_file:
loaded_model = model_from_json(json_file.read())
loaded_model.load_weights(model_path)
LOADED_MODELS[model_path] = loaded_model
img_list = []
for image_path in image_paths:
im = Image.open(image_path)
old_size = im.size
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (desired_size, desired_size), color='White')
new_im.paste(im, ((desired_size - new_size[0]) // 2,
(desired_size - new_size[1]) // 2))
img_array = np.asarray(new_im)
img_array = img_array.astype('float32')
img_array = (img_array / 255)
img_list.append(img_array)
predictions = loaded_model.predict(np.array(img_list))
return predictions | 5,342,731 |
def in_bounding_box(point):
"""Determine whether a point is in our downtown bounding box"""
lng, lat = point
in_lng_bounds = DOWNTOWN_BOUNDING_BOX[0] <= lng <= DOWNTOWN_BOUNDING_BOX[2]
in_lat_bounds = DOWNTOWN_BOUNDING_BOX[1] <= lat <= DOWNTOWN_BOUNDING_BOX[3]
return in_lng_bounds and in_lat_bounds | 5,342,732 |
def __DataContainerERT_addFourPointData(self, *args, **kwargs):
"""Add a new data point to the end of the dataContainer.
Add a new 4 point measurement to the end of the dataContainer and increase
the data size by one. The index of the new data point is returned.
Parameters
----------
*args: [int]
At least for index values for A, B, M and N.
**args: dict
Values for the actual data configuration.
Returns
-------
ret: int
Index of this new data point.
Examples
--------
>>> import pygimli as pg
>>> d = pg.DataContainerERT()
>>> d.setSensors(pg.utils.grange(0, 3, n=4))
>>> d.addFourPointData(0,1,2,3)
0
>>> d.addFourPointData([3,2,1,0], rhoa=1.0)
1
>>> print(d)
Data: Sensors: 4 data: 2
>>> print(d('rhoa'))
2 [0.0, 1.0]
"""
try:
if len(args) == 1:
idx = self.createFourPointData(self.size(),
args[0][0], args[0][1],
args[0][2], args[0][3])
else:
idx = self.createFourPointData(self.size(),
args[0], args[1],
args[2], args[3])
except:
print("args:", args)
critical("Can't interpret arguments:", *args)
for k, v in kwargs.items():
if not self.haveData(k):
self.add(k)
self.ref(k)[idx] = v
return idx | 5,342,733 |
def spin_up(work_func, cfgs, max_workers = 8, log=None, single_thread=False, pass_n=True):
"""
Run a threadable function (typically a subprocess) in parallel.
Parameters
----------
work_func : callable
This does the work. It gets called with one or two arguments. The first
argument in always a config item from the cfgs list; the second is the integer
enumeration (if `pass_n` is True).
cfgs : iterable
An iterator of config items to pass to the workers.
max_workers : int
Maximum number of worker threads.
log : logging.logger, default None
If not None, log to this logger.
single_thread : bool, default False
If True, the work_func is not multithreaded, just run in sequence. Useful for debugging.
pass_n : bool, default True
Should the enumerator be passed to the worker function?
"""
if single_thread:
return _spin_up_single_thread(work_func, cfgs, log, pass_n)
if log is not None: log('=== pines.multirunner.spin_up begins ===')
with cf.ThreadPoolExecutor(max_workers=max_workers) as executor:
exec_futures = {}
for n,cfg in enumerate(cfgs):
if log is not None: log(f' = ThreadPoolExecutor {n} =')
try:
skip = cfg.skip
except AttributeError:
skip = False
if not skip:
if pass_n:
fut = executor.submit(work_func, cfg, n)
else:
fut = executor.submit(work_func, cfg)
exec_futures[fut] = n
for future in cf.as_completed(exec_futures):
n_future = exec_futures[future]
try:
data = future.result()
except Exception as exc:
if log is not None:
log(f'=== Thread {n_future} generated an exception ===')
y = ("".join(traceback.format_exception(type(exc), exc, exc.__traceback__)))
log(y)
if log is not None: log('=== pines.multirunner.spin_up complete ===') | 5,342,734 |
def list_run_directories(solid_run_dir):
"""Return list of matching run directories
Given the name of a SOLiD run directory, find all the 'matching'
run directories based on the instrument name and date stamp.
For example, 'solid0127_20120123_FRAG_BC' and
'solid0127_20120123_FRAG_BC_2' would form a matching set, as would
'solid0127_20120123_PE_BC' etc.
For "nonstandard" names (e.g. 'solid0127_20120123_PE_BC_COPY', if
no matches are found then just the input is returned.
Returns a list of matching directories which includes the input.
"""
# Break up the input
base_dir = os.path.dirname(os.path.abspath(solid_run_dir))
run_name = os.path.basename(solid_run_dir.rstrip(os.sep))
# Get the run info from the name
try:
base_run_info = SolidRunInfo(run_name)
except Exception:
# Wrong format for name
logging.error("'%s' not a valid SOLiD run directory name" % solid_run_dir)
return []
# List all directories in the base dir and look for matches
dirs = []
for f in os.listdir(base_dir):
if os.path.isdir(os.path.join(base_dir,f)):
try:
# Check if instrument name and datestamp match
run_info = SolidRunInfo(f)
if run_info.instrument != base_run_info.instrument or \
run_info.datestamp != base_run_info.datestamp:
# Not a match
continue
except Exception:
# Wrong format for name, not a match
continue
# Check for run definition file
if not os.path.exists(os.path.join(base_dir,f,f+'_run_definition.txt')):
continue
# Must be a match, add to the list
dirs.append(os.path.join(base_dir,f))
# Check that the original run is also included
if os.path.abspath(solid_run_dir) not in dirs:
dirs = [solid_run_dir]
# Sort and return directories
dirs.sort()
return dirs | 5,342,735 |
def dummy_register():
"""Dummy register."""
with tempfile.TemporaryDirectory() as tmp_path:
tmp_path = pathlib.Path(tmp_path)
# Prepare the datasets
# Namespace 0
Ds0(data_dir=tmp_path / 'kaggle').download_and_prepare()
Ds1(data_dir=tmp_path / 'kaggle').download_and_prepare()
# Namespace 1
Ds0(data_dir=tmp_path / 'mlds').download_and_prepare()
# Namespace 2: (non-existing)
content = textwrap.dedent(
f"""
[Namespaces]
kaggle='{os.fspath(tmp_path / 'kaggle')}'
mlds='{os.fspath(tmp_path / 'mlds')}'
other='/tmp/path/to/non-existing-path'
"""
)
dummy_path = tmp_path / 'dummy-community-datasets.toml'
dummy_path.write_text(content)
yield register_path.DataDirRegister(path=dummy_path) | 5,342,736 |
def refresh_remote_vpsa(session, rvpsa_id, return_type=None, **kwargs):
"""
Refreshes information about a remote VPSA - such as discovering new pools
and updating how much free space remote pools have.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type rvpsa_id: str
:param rvpsa_id: The remote VPSA 'name' value as returned by
get_all_remote_vpsas. For example: 'rvpsa-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_remote_vpsa_id(rvpsa_id)
path = '/api/remote_vpsas/{0}/refresh.json'.format(rvpsa_id)
return session.post_api(path=path, return_type=return_type, **kwargs) | 5,342,737 |
def zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return np.zeros(shape, dtype=np.int32 if int32 else np.float32) | 5,342,738 |
def _connect():
"""Connect to a XMPP server and return the connection.
Returns
-------
xmpp.Client
A xmpp client authenticated to a XMPP server.
"""
jid = xmpp.protocol.JID(settings.XMPP_PRIVATE_ADMIN_JID)
client = xmpp.Client(server=jid.getDomain(), port=settings.XMPP_PRIVATE_SERVER_PORT)
client.connect()
client.auth(
user=jid.getNode(),
password=settings.XMPP_PRIVATE_SERVER_PASSWORD,
resource=jid.getResource(),
)
return client | 5,342,739 |
def index():
"""Return the main page."""
return send_from_directory("static", "index.html") | 5,342,740 |
def get_groups(records_data: dict, default_group: str) -> List:
"""
Returns the specified groups in the
SQS Message
"""
groups = records_data["Groups"]
try:
if len(groups) > 0:
return groups
else:
return [default_group]
except IndexError as err:
raise err | 5,342,741 |
def sanitized_log(func: Callable[..., None], msg: AnyStr, *args, **kwargs) -> None:
"""
Sanitize args before passing to a logging function.
"""
sanitized_args = [
sanitize(a) if isinstance(a, bytes) else a
for a in args
]
func(msg, *sanitized_args, **kwargs) | 5,342,742 |
def build_stats(history, eval_output, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if time_callback:
timestamp_log = time_callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats | 5,342,743 |
def test_tags_limited_to_user(authenticated_user: User, api_client: APIClient):
"""Test that tags returned are for the authenticated user"""
user2 = create_user(email="other@testing.com", password="testpass")
Tag.objects.create(user=user2, name="Fruity")
tag = Tag.objects.create(user=authenticated_user, name="Comfort Food")
res = api_client.get(TAGS_URLS)
assert res.status_code == status.HTTP_200_OK
assert len(res.data) == 1
assert res.data[0]["name"] == tag.name | 5,342,744 |
def truncate_single_leafs(nd):
"""
>>> truncate_single_leafs(node(name='a', subs=[node(name='a', subs=None, layer='a')], layer=None))
node(name='a', subs=None, layer='a')
"""
if nd.layer:
return nd
if nd.subs and len(nd.subs) == 1:
if nd.subs[0].layer:
return node(nd.name, None, nd.subs[0].layer)
nd2 = truncate_single_leafs(nd.subs[0])
return node(name=(nd.name, nd.subs[0].name),
subs=nd2.subs,
layer=nd2.layer,
)
return node(nd.name, [truncate_single_leafs(n) for n in nd.subs], None) | 5,342,745 |
def postprocess_output(output, example, postprocessor):
"""Applies postprocessing function on a translation output."""
# Send all parts to the postprocessing.
if postprocessor is None:
text = output.output[0]
score = None
align = None
else:
tgt_tokens = output.output
src_tokens = example.source_tokens
text = postprocessor.process_input(
src_tokens,
tgt_tokens,
metadata=example.metadata,
config=example.config,
options=example.options,
)
score = sum(output.score) if all(s is not None for s in output.score) else None
attention = output.attention
if attention and len(attention) == 1:
attention = attention[0]
align = (
align_tokens(src_tokens, tgt_tokens, attention) if attention else None
)
else:
align = None
result = {"text": text}
if score is not None:
result["score"] = score
if align is not None:
result["align"] = align
return result | 5,342,746 |
def get_aqua_timestamp(iyear,ichunk,branch_flag):
"""
outputs a timestamp string for model runs with a
predifined year-month-day timestamp split into
5 x 73 day chunks for a given year
"""
import numpy as np
if branch_flag == 0:
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-01-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-15-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-27-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-08-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-20-00000'
else: # branch run chunk start days shifted by 1 day
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-02-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-16-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-28-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-09-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-21-00000'
return timestamp | 5,342,747 |
def aggregate_native(gradients, f, m=None, **kwargs):
""" Multi-Krum rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
m Optional number of averaged gradients for Multi-Krum
... Ignored keyword-arguments
Returns:
Aggregated gradient
"""
# Defaults
if m is None:
m = len(gradients) - f - 2
# Computation
return native.krum.aggregate(gradients, f, m) | 5,342,748 |
def isNormalTmpVar(vName: types.VarNameT) -> bool:
"""Is it a normal tmp var"""
if NORMAL_TMPVAR_REGEX.fullmatch(vName):
return True
return False | 5,342,749 |
def dump_obj(obj, path):
"""Dump object to file."""
file_name = hex(id(obj))
file_path = path + file_name
with open(file_path, 'wb') as f:
os.chmod(file_path, stat.S_IWUSR | stat.S_IRUSR)
pickle.dump(obj, f)
return file_name | 5,342,750 |
def count_time(start):
"""
:param start:
:return: return the time in seconds
"""
import time
end = time.time()
return end-start | 5,342,751 |
def split_by_state(xs, ys, states):
"""
Splits the results get_frame_per_second into a list of continuos line segments,
divided by state. This is to plot multiple line segments with different color for
each segment.
"""
res = []
last_state = None
for x, y, s in zip(xs, ys, states):
if s != last_state:
res.append((s, [], []))
last_state = s
res[-1][1].append(x)
res[-1][2].append(y)
return res | 5,342,752 |
def _free_x(N: int, Y: int, queen_y2x: List[int]):
"""Getting free cells in the Yth board row
>>> [x for x in _free_x(1, 0,[-1])]
[0]
>>> [x for x in _free_x(2, 1,[ 0,-1])]
[]
>>> [x for x in _free_x(2, 0,[-1,-1])]
[0, 1]
>>> [x for x in _free_x(3, 0,[ -1 ,-1, -1])]
[0, 1, 2]
>>> [x for x in _free_x(3, 1,[0,-1,-1])]
[2]
>>> [x for x in _free_x(3, 2,[0,2,-1])]
[]
>>> [x for x in _free_x(4, 0,[ -1 ,-1, -1, -1])]
[0, 1, 2, 3]
>>> [x for x in _free_x(4, 1,[0, -1, -1, -1])]
[2, 3]
>>> [x for x in _free_x(4, 1,[1, -1, -1, -1])]
[3]
>>> [x for x in _free_x(4, 1,[2, -1, -1, -1])]
[0]
>>> [x for x in _free_x(4, 1,[3, -1, -1, -1])]
[0, 1]
>>> [x for x in _free_x(4, 2,[0, 2, -1, -1])]
[]
>>> [x for x in _free_x(4, 2,[0, 3, -1, -1])]
[1]
>>> [x for x in _free_x(4, 3,[0, 3, 2, -1])]
[]
"""
x_occupie_count = [0]*N
for y, x in enumerate(queen_y2x[:Y]):
# vertical
x_occupie_count[x] += 1
# diagonal
dx = Y-y
if x-dx >= 0:
x_occupie_count[x-dx] += 1
if x+dx < N:
x_occupie_count[x+dx] += 1
for x, count in enumerate(x_occupie_count):
if count == 0:
yield x | 5,342,753 |
def sgd_update(trainables, learning_rate=1e-2):
"""
Updates the value of each trainable with SGD.
"""
for trainable in trainables:
trainable.value -= learning_rate*trainable.gradients[trainable] | 5,342,754 |
def final_spectrum(t, age, LT, B, EMAX, R, V, dens, dist, Tfir, Ufir, Tnir, Unir, binss, tmin, ebreak, alpha1, alpha2):
"""
GAMERA computation of the particle spectrum (for the extraction of the
photon sed at the end of the evolution of the PWN)
http://libgamera.github.io/GAMERA/docs/time_dependent_modeling.html
Returns
-------
sed : array-like
Array with the evolved particle spectrum (erg/cm**2/s vs TeV) at the
last step
tot : array-like
Array with the total photon spectrum (erg/cm**2/s vs TeV)
ic : array-like
Array with the inverse compton photon spectrum (erg/cm**2/s vs TeV)
ic : array-like
Array with the inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_cmb : array-like
Array with the cmb inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_fir : array-like
Array with the fir inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_nir : array-like
Array with the nir inverse compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_ssc : array-like
Array with the self-synchrotron compton contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
ic_synch : array-like
Array with the synchrotron contribution to the total
photon spectrum (erg/cm**2/s vs TeV)
"""
fp = gp.Particles()
p_spectrum = broken_powerlaw(ebreak,alpha1,alpha2,EMAX, 500)
e = np.logspace(np.log10(gp.m_e),np.log10(3*np.max(EMAX)),100) #particle escape
t_m, e_m = np.meshgrid(t, e) #particle escape
fp.SetTimeAndEnergyDependentEscapeTime(t, e, t_esc(e_m, t_m, B, R)) #particle escape
fp.SetCustomInjectionSpectrum(p_spectrum)
fp.SetLuminosity(list(zip(t,LT)))
fp.SetBField(list(zip(t,B)))
fp.SetEmax(list(zip(t,EMAX)))
fp.SetRadius(list(zip(t,R)))
fp.SetExpansionVelocity(list(zip(t,V)))
fp.SetAmbientDensity(dens)
fp.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg)
fp.AddThermalTargetPhotons(Tfir, Ufir)
fp.AddThermalTargetPhotons(Tnir, Unir)
fp.SetTmin(tmin)
erad = np.logspace(-21,4.,binss) * gp.TeV_to_erg # energies(in ergs) where radiation will be calculated
fr = gp.Radiation()
fr.SetDistance(dist)
fr.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg)
fr.AddThermalTargetPhotons(Tfir, Ufir)
fr.AddThermalTargetPhotons(Tnir, Unir)
fr.SetAmbientDensity(dens)
fp.SetAge(age)
fp.CalculateElectronSpectrum(binss)
sed = np.array(fp.GetParticleSED())
sp = np.array(fp.GetParticleSpectrum())
fr.SetElectrons(sp[:])
fr.SetBField(fp.GetBField())
fr.AddSSCTargetPhotons(fp.GetRadius())
fr.CalculateDifferentialPhotonSpectrum(erad)
tot = np.array(fr.GetTotalSED())
ic = np.array(fr.GetICSED())
ic_cmb = np.array(fr.GetICSED(0))
ic_fir = np.array(fr.GetICSED(1))
ic_nir = np.array(fr.GetICSED(2))
ic_ssc = np.array(fr.GetICSED(3))
synch = np.array(fr.GetSynchrotronSED())
return sed, tot, ic, ic_cmb, ic_fir, ic_nir, ic_ssc, synch | 5,342,755 |
def exportWorkflowTool(context):
"""Export workflow tool and contained workflow definitions as XML files.
"""
sm = getSiteManager(context.getSite())
tool = sm.queryUtility(IWorkflowTool)
if tool is None:
logger = context.getLogger('workflow')
logger.debug('Nothing to export.')
return
exportObjects(tool, '', context) | 5,342,756 |
def stop_workers(ctx):
"""Stop the workers"""
if settings['env'] == 'local':
raise Exit(
'In the local environment use kill to quit the workers '
)
else:
for conn in settings['hosts']:
conn.run(f'/etc/cron.scripts/h51_stop_workers {settings["env"]}') | 5,342,757 |
def modal():
"""Contributions input controller for modal view.
request.vars.book_id: id of book, optional
request.vars.creator_id: id of creator, optional
if request.vars.book_id is provided, a contribution to a book is presumed.
if request.vars.creator_id is provided, a contribution to a creator is
presumed.
if neither request.vars.book_id nor request.vars.creator_id are provided
a contribution to zco.mx is presumed.
request.vars.book_id takes precendence over request.vars.creator_id.
"""
book = None
creator = None
if request.vars.book_id:
book = Book.from_id(request.vars.book_id)
creator = Creator.from_id(book.creator_id)
elif request.vars.creator_id:
creator = Creator.from_id(request.vars.creator_id)
if not creator:
raise LookupError(
'Creator not found, id %s', request.vars.creator_id)
return dict(
book=book,
creator=creator,
) | 5,342,758 |
def test_launch_with_none_or_empty_lti_message_type():
"""
Does the launch request work with an empty or None lti_message_type value?
"""
oauth_consumer_key = 'my_consumer_key'
oauth_consumer_secret = 'my_shared_secret'
launch_url = 'http://jupyterhub/hub/lti/launch'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
args = factory_lti11_basic_launch_args(oauth_consumer_key, oauth_consumer_secret,)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args['lti_message_type'] = None
validator.validate_launch_request(launch_url, headers, args)
with pytest.raises(HTTPError):
args['lti_message_type'] = ''
validator.validate_launch_request(launch_url, headers, args) | 5,342,759 |
def returns(data):
"""Returns for any number of days"""
try:
trading_days = len(data)
logger.info(
"Calculating Returns for {} trading days".format(trading_days))
df = pd.DataFrame()
df['daily_returns'] = data.pct_change(1)
mean_daily_returns = df['daily_returns'].mean()
returns_data = mean_daily_returns * trading_days
return returns_data * 100
except Exception as exception:
logger.error('Oops! An error Occurred ⚠️')
raise exception | 5,342,760 |
def initialize_gear(context):
"""
Used to initialize the gear context 'gear_dict' dictionary with objects that
are used by all gears in the HCP-Suite.
Environment Variables
Manifest
Logging
dry-run
"""
# This gear will use a "gear_dict" dictionary as a custom-user field
# on the gear context.
# grab environment for gear
with open('/tmp/gear_environ.json', 'r') as f:
context.gear_dict['environ'] = json.load(f)
# grab the manifest for use later
with open('/flywheel/v0/manifest.json','r',errors='ignore') as f:
context.gear_dict['manifest_json'] = json.load(f)
#get_Custom_Logger is defined in utils.py
context.log = get_custom_logger(context)
# Set dry-run parameter
context.gear_dict['dry-run'] = context.config['dry-run'] | 5,342,761 |
def test_reraise_indirect():
"""
>>> test_reraise_indirect()
Traceback (most recent call last):
ValueError: TEST INDIRECT
"""
try:
raise ValueError("TEST INDIRECT")
except ValueError:
reraise() | 5,342,762 |
def do_rot13_on_input(input_string, ordered_radix=ordered_rot13_radix):
""" Perform a rot13 encryption on the provided message.
"""
encrypted_message = str()
for char in input_string:
# Two possibilities: in radix, or NOT in radix.
if char in ordered_radix:
# must find index of the char in the ordered_radix
char_index = ordered_radix.index(char)
mod_char_index = (char_index + 13) % len(ordered_radix)
mod_char = ordered_radix[mod_char_index]
encrypted_message += mod_char
else:
encrypted_message += char
return encrypted_message | 5,342,763 |
def pwr_y(x, a, b, e):
"""
Calculate the Power Law relation with a deviation term.
Parameters
----------
x : numeric
Input to Power Law relation.
a : numeric
Constant.
b : numeric
Exponent.
e : numeric
Deviation term.
Returns
-------
numeric
Output of Power Law relation.
Notes
-----
Power Law relation: :math:`y = a x^b + e`
"""
return a*x**b+e | 5,342,764 |
def test_get_serializer_class():
"""
Test the serializer class used by the view.
"""
view = views.EmailVerificationView()
expected = serializers.EmailVerificationSerializer
assert view.get_serializer_class() == expected | 5,342,765 |
def file_update_projects(file_id):
""" Page that allows users to interact with a single TMC file """
this_file = TMCFile.query.filter_by(uid=file_id).first()
project_form = AssignProjectsToFile()
if project_form.validate_on_submit():
data = dict((key, request.form.getlist(key) if len(
request.form.getlist(key)) > 1 else request.form.getlist(key)[0])
for key in request.form.keys())
pid_list = []
for k in data:
if "project_" in k:
pid_list.append(int(k.replace("project_", "")))
# Make sure all selected projects are associated
for pid in pid_list:
project = Project.query.filter_by(uid=pid).first()
if project not in this_file.project_ids:
this_file.project_ids.append(project)
# Remove association with unchecked projects
for project in this_file.project_ids:
if project.uid not in pid_list:
this_file.project_ids.remove(project)
db.session.commit()
flash("Updated project associations", "success")
return redirect(url_for("single_file_bp.single_file", file_id=file_id)) | 5,342,766 |
def find_optimal_cut(edge, edge1, left, right):
"""Computes the index corresponding to the optimal cut such that applying
the function compute_blocks() to the sub-blocks defined by the cut reduces
the cost function comparing to the case when the function compute_blocks() is
applied to the whole matrix. If cutting point can not be find, the algorithm returns
the result from the function compute_blocks().
Parameters
----------
edge : ndarray
sparsity pattern profile of the matrix
edge1 : ndarray
conjugated sparsity pattern profile of the matrix
left : int
size of the leftmost diagonal block
right : int
size of the rightmost diagonal block
Returns
-------
"""
unique_indices = np.arange(left, len(edge) - right + 1)
blocks = []
seps = []
sizes = []
metric = []
size = len(edge)
for j1, item1 in enumerate(unique_indices):
seps.append(item1)
item2 = size - item1
# print(item1, item2)
# print(item1)
edge_1 = edge[:item1]
edge_2 = (edge1 - np.arange(len(edge1)))[item2:] + np.arange(item1)
edge_3 = edge1[:item2]
edge_4 = (edge - np.arange(len(edge)))[item1:] + np.arange(item2)
block1 = compute_blocks(left, (edge1 - np.arange(len(edge)))[item2],
edge_1, edge_2)
block2 = compute_blocks(right, (edge - np.arange(len(edge1)))[item1],
edge_3, edge_4)
block = block1 + block2[::-1]
blocks.append(block)
metric.append(np.sum(np.array(block) ** 3))
sizes.append((block1[-1], block2[-1]))
if len(metric) == 0:
return [left, right], np.nan, 0, 0
else:
best = np.argmin(np.array(metric))
blocks = blocks[best]
blocks = [item for item in blocks if item != 0]
sep = seps[best]
right_block, left_block = sizes[best]
return blocks, sep, right_block, left_block | 5,342,767 |
def test_get_wikidata_csl_item_author_ordering():
"""
Test extraction of author ordering from https://www.wikidata.org/wiki/Q50051684.
Wikidata uses a "series ordinal" qualifier that must be considered or else author
ordering may be wrong.
Author ordering was previously not properly set by the Wikidata translator
https://github.com/zotero/translators/issues/1790
"""
wikidata_id = "Q50051684"
csl_item = get_wikidata_csl_item(wikidata_id)
family_names = [author["family"] for author in csl_item["author"]]
print(family_names)
assert family_names == [
"Himmelstein",
"Romero",
"Levernier",
"Munro",
"McLaughlin",
"Greshake", # actually should be Greshake Tzovaras
"Greene",
] | 5,342,768 |
def seq_to_networkx(header, seq, constr=None):
"""Convert sequence tuples to networkx graphs."""
graph = nx.Graph()
graph.graph['id'] = header.split()[0]
graph.graph['header'] = header
for id, character in enumerate(seq):
graph.add_node(id, label=character, position=id)
if id > 0:
graph.add_edge(id - 1, id, label='-')
assert(len(graph) > 0), 'ERROR: generated empty graph.\
Perhaps wrong format?'
graph.graph['sequence'] = seq
if constr is not None:
graph.graph['constraint'] = constr
return graph | 5,342,769 |
def add_vertex_edge_for_load_support(network, sup_dic, load_dic, bars_len, key_removed_dic):
"""
Post-Processing Function:
Adds vertices and edges in accordance with supports and loads
returns the cured network
"""
if not key_removed_dic:
load_sup_dic=merge_two_dicts(sup_dic, load_dic)
else:
load_dic_2=load_dic.copy()
for key in key_removed_dic:
load_dic_2.pop(key)
load_dic_2=merge_two_dicts(load_dic_2, key_removed_dic[key])
load_sup_dic=merge_two_dicts(sup_dic, load_dic_2)
# define arbitrary r to be added to get leaf vertex coordinates
max_len=max(bars_len)
r=max_len/3.0
# make a polygon and polyline from outer vertices of network
points = network.to_points()
cycles = network_find_cycles(network)
mesh = Mesh.from_vertices_and_faces(points, cycles)
if 0 in mesh.face and len(mesh.face)>1:
mesh.delete_face(0)
if len(mesh.face)==1:
ver_lis=[key for key in mesh.vertices()]
else:
ver_lis=mesh.vertices_on_boundary(ordered=True)
ver_lis_plyln=ver_lis[:]
ver_lis_plyln.append(ver_lis[0])
pt_lis_plygn=[mesh.vertex_coordinates(key) for key in ver_lis]
pt_lis_plyln=[mesh.vertex_coordinates(key) for key in ver_lis_plyln]
plygn=Polygon(pt_lis_plygn)
plyln=Polyline(pt_lis_plyln)
# add leaf vertices
for key in load_sup_dic:
if load_sup_dic[key][0]!=0.0:
pt_1=add_vectors(network.node_coordinates(key), (+r, 0.0, 0.0))
plyln_bln=is_point_on_polyline(pt_1, plyln.points, tol=0.001)
plygn_bln=is_point_in_polygon_xy(pt_1, plygn.points)
if plyln_bln or plygn_bln:
pt_1=add_vectors(network.node_coordinates(key), (-r, 0.0, 0.0))
key_2=network.add_node(x=np.asscalar(pt_1[0]), y=pt_1[1], z=0.0)
network.add_edge(key, key_2)
if load_sup_dic[key][1]!=0.0:
pt_2=add_vectors(network.node_coordinates(key), (0.0,+r, 0.0))
plyln_bln=is_point_on_polyline(pt_2, plyln.points, tol=0.001)
plygn_bln=is_point_in_polygon_xy(pt_2, plygn.points)
if plyln_bln or plygn_bln:
pt_2=add_vectors(network.node_coordinates(key), (0.0,-r, 0.0))
key_2=network.add_node(x=pt_2[0], y=np.asscalar(pt_2[1]), z=0.0)
network.add_edge(key, key_2)
return network, plygn, plyln | 5,342,770 |
def mimicry(span):
"""Enrich the match."""
data = {'mimicry': span.lower_}
sexes = set()
for token in span:
if token.ent_type_ in {'female', 'male'}:
if token.lower_ in sexes:
return {}
sexes.add(token.lower_)
return data | 5,342,771 |
def send_sms_code(mobile, sms_num, expires, temp_id):
"""
发送短信验证码
:param mobile: 手机号
:param sms_num: 验证码
:param expires: 有效期
:return: None
"""
try:
result = CCP().send_Template_sms(mobile, [sms_num, expires], temp_id)
except Exception as e:
logger.error("发送验证码短信[异常][ mobile: %s, message: %s ]" % (mobile, e))
else:
if result == 0:
logger.info("发送验证码短信[正常][ mobile: %s sms_code: %s]" % (mobile, sms_num))
else:
logger.warning("发送验证码短信[失败][ mobile: %s ]" % mobile) | 5,342,772 |
def LeftBinarySearch(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
low = 0
high = len(nums)
while low < high:
mid = (low + high) // 2
if nums[mid] < target:
low = mid + 1
else:
high = mid
assert low == high
if low == len(nums) or nums[low] != target:
return -1
return low | 5,342,773 |
def backup_file_content(jwd, filepath, content):
"""backs up a string in the .jak folder.
TODO Needs test
"""
backup_filepath = create_backup_filepath(jwd=jwd, filepath=filepath)
return create_or_overwrite_file(filepath=backup_filepath, content=content) | 5,342,774 |
async def security_rule_get(
hub, ctx, security_rule, security_group, resource_group, **kwargs
):
"""
.. versionadded:: 1.0.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
azurerm.network.network_security_group.security_rule_get testrule1 testnsg testgroup
"""
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
secrule = netconn.security_rules.get(
network_security_group_name=security_group,
resource_group_name=resource_group,
security_rule_name=security_rule,
)
result = secrule.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
return result | 5,342,775 |
def create_centroid_pos(Direction, Spacing, Size, position):
# dim0, dim1,dim2, label):
"""
:param Direction,Spacing, Size: from sitk raw.GetDirection(),GetSpacing(),GetSize()
:param position:[24,3]
:return:
"""
direction = np.round(list(Direction))
direc0 = direction[0:7:3]
direc1 = direction[1:8:3]
direc2 = direction[2:9:3]
dim0char = Dic[(np.argwhere((np.abs(direc0)) == 1))[0][0]]
dim1char = Dic[(np.argwhere((np.abs(direc1)) == 1))[0][0]]
dim2char = Dic[(np.argwhere((np.abs(direc2)) == 1))[0][0]]
resolution = Spacing
w, h, c = Size[0], Size[1], Size[2]
jsonlist = []
for i in range(24):
dim0, dim1, dim2 = position[i:i + 1, 0], position[i:i + 1, 1], position[i:i + 1, 2]
if dim0 >= 0:
label = i + 1
if np.sum(direc0) == -1:
if dim0char == 'X':
Jsondim0 = dim0 * resolution[0]
else:
Jsondim0 = (w - dim0) * resolution[0]
else:
if dim0char == 'X':
Jsondim0 = (w - dim0) * resolution[0]
else:
Jsondim0 = dim0 * resolution[0]
if np.sum(direc1) == -1:
if dim1char == 'X':
Jsondim1 = dim1 * resolution[1]
else:
Jsondim1 = (h - dim1) * resolution[1]
else:
if dim1char == 'X':
Jsondim1 = (h - dim1) * resolution[1]
else:
Jsondim1 = dim1 * resolution[1]
if np.sum(direc2) == -1:
if dim2char == 'X':
Jsondim2 = dim2 * resolution[2]
else:
Jsondim2 = (c - dim2) * resolution[2]
else:
if dim2char == 'X':
Jsondim2 = (c - dim2) * resolution[2]
else:
Jsondim2 = dim2 * resolution[2]
jsonlist.append({dim0char: Jsondim0, dim1char: Jsondim1, dim2char: Jsondim2, 'label': label})
return jsonlist | 5,342,776 |
def group_split_data_cv(df, cv=5, split=0):
"""
Args:
cv: number of cv folds
split: index of the cv fold to return
Note that GroupKFold is not random
"""
from sklearn.model_selection import GroupKFold
splitter = GroupKFold(n_splits=cv)
split_generator = splitter.split(df, groups=df['arpnum'])
for k, (train_idx, test_idx) in enumerate(split_generator):
if k == split:
return df.iloc[train_idx], df.iloc[test_idx] | 5,342,777 |
def encode(valeur,base):
""" int*int -->String
hyp valeur >=0
hypothèse : base maxi = 16
"""
chaine=""
if valeur>255 or valeur<0 :
return ""
for n in range (1,9) :
calcul = valeur % base
if (calcul)>9:
if calcul==10:
bit='A'
if calcul==11:
bit='B'
if calcul==12:
bit='C'
if calcul==13:
bit='D'
if calcul==14:
bit='E'
if calcul==15:
bit='F'
else :
bit=calcul
chaine =str(bit)+chaine
valeur = valeur // base
n+=1
return (chaine) | 5,342,778 |
def main(request):
"""
Main admin page.
Displayes a paginated list of files configured source directory (sorted by
most recently modified) to be previewed, published, or prepared for
preview/publish.
"""
# get sorted archive list for this user
try:
archives = request.user.archivist.sorted_archives()
except ObjectDoesNotExist:
# i.e. no user -> archivist association
if request.user.is_superuser:
archives = Archive.objects.all()
else:
archives = []
# get current tab if set in session; default to first tab
current_tab = request.session.get('active_admin_tab', 0)
# files for publication now loaded in jquery ui tab via ajax
# get the 10 most recent task results to display status
recent_tasks = TaskResult.objects.order_by('-created')[:10]
# absolute path to login, for use in javascript if timeout occurs
login_url = request.build_absolute_uri(settings.LOGIN_URL)
return render(request, 'fa_admin/index.html', {
'archives': archives,
'current_tab': current_tab,
'login_url': login_url,
'task_results': recent_tasks}) | 5,342,779 |
def parse_char(char, invert=False):
"""Return symbols depending on the binary input
Keyword arguments:
char -- binary integer streamed into the function
invert -- boolean to invert returned symbols
"""
if invert == False:
if char == 0:
return '.'
elif char == 1:
return '@'
if char == 0:
return '@'
elif char == 1:
return '.' | 5,342,780 |
def test_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test that the n_entities works for SEPTs"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
assert isinstance(tensor, SEPT)
assert tensor.n_entities == 1 | 5,342,781 |
def remote_connect(rname, rhost):
"""供master调用的接口:进行远程的rpc连接
"""
GlobalObject().remote_connect(rname, rhost) | 5,342,782 |
def run_blast(database, program, filestore, file_uuid, sequence, options):
"""
Perform a BLAST search on the given database using the given query
Args:
database: The database to search (full path).
program: The program to use (e.g. BLASTN, TBLASTN, BLASTX).
filestore: The directory to store the XML output.
file_uuid: A unique identifier for the filename.
sequence: The sequence to BLAST.
options: Any extra options to pass to the BLAST executable.
Returns:
A tuple containing the stdout and stderr of the program.
# Test:
>>> seq = ">test\\nTTCATAATTAATTTTTTATATATATATTATATTATAATATTAATTTATATTATAAAAATAATATTTATTATTAAAATATT\\nTATTCTCCTTTCGGGGTTCCGGCTCCCGTGGCCGGGCCCCGGAATTATTAATTAATAATAAATTATTATTAATAATTATT\\n>test 2\\nAATGGTATTAGATTCAGTGAATTTGGTACAAGACGTCGTAGATCTCTGAAGGCTCAAGATCTAATTATGCAAGGAATCATGAAAGCTGTGAACGGTAACCCAGACAGAAACAAATCGCTATTATTAGGCACATCAAATATTTTATTTGCCAAGAAATATGGAGTCAAGCCAATCGGTACTGTGGCTCACGAGTGGGTTATGGGAGTCGCTTCTATTAGTGAAGATTATTTGCATGCCAATAAAAATGCAATGGATTGTTGGATCAATACTTTTGGTGCAAAAAATGCTGGTTTAGCATTAACGGATACTTTTGGAACTGATGACTTTTTAAAATCATTCCGTCCACCATATTCTGATGCTTACGTCGGTGTTAGACAAGATTCTGGAGACCCAGTTGAGTATACCAAAAAGATTTCCCACCATTACCATGACGTGTTGAAATTGCCTAAATTCTCGAAGATTATCTGTTATTCCGATTCTTTGAACGTCGAAAAGGCAATAACTTACTCCCATGCAGCTAAAGAGAATG"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
>>> seq = ">test\\nTTC"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
"""
query = [program, '-db', database, '-outfmt', '5', '-query', '-', '-out', "{0}{1}.xml".format(filestore, file_uuid), '-max_target_seqs', '50']
exclude = [
'-db',
'-query',
'-out',
'-subject',
'-html',
'-gilist',
'-negative_gilist',
'-entrez_query',
'-remote',
'-outfmt',
'-num_threads',
'-import_search_strategy',
'-export_search_strategy',
'-window_masker_db',
'-index_name',
'-use_index',
]
extra = parse_extra_options(options, exclude)
query.extend(extra)
p = subprocess.Popen(query, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
stdout, stderr = p.communicate(sequence)
return (stdout, stderr) | 5,342,783 |
def get_transformer_dim(transformer_name='affine'):
""" Returns the size of parametrization for a given transformer """
lookup = {'affine': 6,
'affinediffeo': 6,
'homografy': 9,
'CPAB': load_basis()['d'],
'TPS': 32
}
assert (transformer_name in lookup), 'Transformer not found, choose between: ' \
+ ', '.join([k for k in lookup.keys()])
return lookup[transformer_name] | 5,342,784 |
def ListVfses(client_urns):
"""Lists all known paths for a list of clients.
Args:
client_urns: A list of `ClientURN` instances.
Returns:
A list of `RDFURN` instances corresponding to VFS paths of given clients.
"""
vfs = set()
cur = set()
for client_urn in client_urns:
cur.update([
client_urn.Add("fs/os"),
client_urn.Add("fs/tsk"),
client_urn.Add("temp"),
client_urn.Add("registry"),
])
while cur:
nxt = []
for _, children in aff4.FACTORY.MultiListChildren(cur):
nxt.extend(children)
vfs.update(nxt)
cur = nxt
return vfs | 5,342,785 |
def test_create_one_dimensional():
"""Tessellate a (finite) line."""
tes_one = pqca.tessellation.one_dimensional(10, 2)
assert str(
tes_one) == "Tessellation(10 qubits as 5 cells, first cell: [0, 1])" | 5,342,786 |
def delete_product(productId):
"""Deletes product"""
response = product2.delete_product(productId)
return response | 5,342,787 |
def compute_face_normals(points, trilist):
"""
Compute per-face normals of the vertices given a list of
faces.
Parameters
----------
points : (N, 3) float32/float64 ndarray
The list of points to compute normals for.
trilist : (M, 3) int16/int32/int64 ndarray
The list of faces (triangle list).
Returns
-------
face_normal : (M, 3) float32/float64 ndarray
The normal per face.
:return:
"""
pt = points[trilist]
a, b, c = pt[:, 0], pt[:, 1], pt[:, 2]
norm = np.cross(b - a, c - a)
return _normalize(norm) | 5,342,788 |
def get_deletion_confirmation(poll):
"""Get the confirmation keyboard for poll deletion."""
delete_payload = f"{CallbackType.delete.value}:{poll.id}:0"
delete_all_payload = f"{CallbackType.delete_poll_with_messages.value}:{poll.id}:0"
locale = poll.user.locale
buttons = [
[
InlineKeyboardButton(
i18n.t("keyboard.permanently_delete", locale=locale),
callback_data=delete_payload,
)
],
[
InlineKeyboardButton(
i18n.t("keyboard.permanently_delete_with_messages", locale=locale),
callback_data=delete_all_payload,
)
],
[get_back_to_management_button(poll)],
]
return InlineKeyboardMarkup(buttons) | 5,342,789 |
def ngram_tokenizer(lines, ngram_len=DEFAULT_NGRAM_LEN, template=False):
"""
Return an iterable of ngram Tokens of ngram length `ngram_len` computed from
the `lines` iterable of UNICODE strings. Treat the `lines` strings as
templated if `template` is True.
"""
if not lines:
return
ngrams = unigram_tokenizer(lines, template)
ngrams = tokens_ngram_processor(ngrams, ngram_len)
ngrams = ngram_to_token(ngrams)
return ngrams | 5,342,790 |
def list_datasets(service, project_id):
"""Lists BigQuery datasets.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
Returns:
List containing dataset names
"""
datasets = service.datasets()
response = datasets.list(projectId=project_id).execute()
dataset_list = []
for field in response['datasets']:
dataset_list.append(field['datasetReference']['datasetId'])
return dataset_list | 5,342,791 |
def tors(universe, seg, i):
"""Calculation of nucleic backbone dihedral angles.
The dihedral angles are alpha, beta, gamma, delta, epsilon, zeta, chi.
The dihedral is computed based on position of atoms for resid `i`.
Parameters
----------
universe : Universe
:class:`~MDAnalysis.core.universe.Universe` containing the trajectory
seg : str
segment id for base
i : int
resid of the first base
Returns
-------
[alpha, beta, gamma, delta, epsilon, zeta, chi] : list of floats
torsion angles in degrees
Notes
-----
If failure occurs be sure to check the segment identification.
.. versionadded:: 0.7.6
"""
a = universe.select_atoms(" atom {0!s} {1!s} O3\' ".format(seg, i - 1),
" atom {0!s} {1!s} P ".format(seg, i),
" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i))
b = universe.select_atoms(" atom {0!s} {1!s} P ".format(seg, i),
" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i))
g = universe.select_atoms(" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i))
d = universe.select_atoms(" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i))
e = universe.select_atoms(" atom {0!s} {1!s} C4\' ".format(seg, i),
" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i),
" atom {0!s} {1!s} P ".format(seg, i + 1))
z = universe.select_atoms(" atom {0!s} {1!s} C3\' ".format(seg, i),
" atom {0!s} {1!s} O3\' ".format(seg, i),
" atom {0!s} {1!s} P ".format(seg, i + 1),
" atom {0!s} {1!s} O5\' ".format(seg, i + 1))
c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i),
" atom {0!s} {1!s} C1\' ".format(seg, i),
" atom {0!s} {1!s} N9 ".format(seg, i),
" atom {0!s} {1!s} C4 ".format(seg, i))
if len(c) < 4:
c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i),
" atom {0!s} {1!s} C1\' ".format(seg, i),
" atom {0!s} {1!s} N1 ".format(seg, i),
" atom {0!s} {1!s} C2 ".format(seg, i))
alpha = a.dihedral.value() % 360
beta = b.dihedral.value() % 360
gamma = g.dihedral.value() % 360
delta = d.dihedral.value() % 360
epsilon = e.dihedral.value() % 360
zeta = z.dihedral.value() % 360
chi = c.dihedral.value() % 360
return [alpha, beta, gamma, delta, epsilon, zeta, chi] | 5,342,792 |
def get_metric(metric,midi_notes,Fe,nfft,nz=1e4,eps=10,**kwargs):
"""
returns the optimal transport loss matrix from a list of midi notes (interger indexes)
"""
nbnotes=len(midi_notes)
res=np.zeros((nfft/2,nbnotes))
f=np.fft.fftfreq(nfft,1.0/Fe)[:nfft/2]
f_note=[2.0**((n-60)*1./12)*440 for n in midi_notes]
for i in range(nbnotes):
m=np.zeros((nfft/2,))
if metric=='square':
m=(f_note[i]-f)**2
elif metric=='psquare':
if midi_notes[i]==0:
m[:]=nz
else:
nmax=int(f.max()/f_note[i])
m[:]=np.inf
for j in range(1,nmax+1):
m=np.minimum(m,(j*f_note[i]-f)**2+j*eps)
res[:,i]=m
return res,f | 5,342,793 |
def octave(track, note, dur):
"""Generate the couple of blanche"""
track.append(Message('note_on', note=note, velocity=100, time=0))
track.append(Message('note_on', note=note + 12, velocity=100, time=0))
track.append(Message('note_off', note=note, velocity=64, time=dur))
track.append(Message('note_off', note=note + 12, velocity=64, time=0))
return track | 5,342,794 |
def test_hist_2d_against_matlab():
"""
Testing for 2d sample_vec as that's the one only needed in kde2d
reference solution: kde2d.m function binned_sample=hist_2d(sample_vec,M)
"""
x = [0.11, 0.21, 0.31, 0.31, 0.21]
y = [0.61, 0.31, 0.91, 0.91, 0.31]
sample = np.vstack((x, y)).T
m = 5
expected_binned_sample = np.array(
[
[0.0, 0.0, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0, 0.4],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
binned_sample = hist_2d(sample, m)
np.testing.assert_allclose(binned_sample, expected_binned_sample) | 5,342,795 |
def solveTrajectoryPickle(dir_path, file_name, only_plot=False, solver='original', **kwargs):
""" Rerun the trajectory solver on the given trajectory pickle file. """
# Load the pickles trajectory
traj_p = loadPickle(dir_path, file_name)
# Run the PyLIG trajectory solver
if solver == 'original':
# Given the max time offset from the pickle file and input, use the larger one of the two
max_toffset = traj_p.max_toffset
if "max_toffset" in kwargs:
if (kwargs["max_toffset"] is not None) and (traj_p.max_toffset is not None):
max_toffset = max(traj_p.max_toffset, kwargs["max_toffset"])
# Remove the max time offset from the list of keyword arguments
kwargs.pop("max_toffset", None)
# Preserve the trajectory ID
if hasattr(traj_p, "traj_id"):
traj_id = traj_p.traj_id
else:
traj_id = None
# Reinitialize the trajectory solver
meastype = 2
traj = Trajectory(traj_p.jdt_ref, output_dir=dir_path, max_toffset=max_toffset, \
meastype=meastype, traj_id=traj_id, **kwargs)
# Fill the observations
for obs in traj_p.observations:
traj.infillWithObs(obs, meastype=meastype)
elif solver == 'gural':
# Init the Gural solver
traj = GuralTrajectory(len(traj_p.observations), traj_p.jdt_ref, velmodel=3, \
max_toffset=traj_p.max_toffset, meastype=2, output_dir=dir_path, verbose=True)
# Fill the observations
for obs in traj_p.observations:
traj.infillTrajectory(obs.azim_data, obs.elev_data, obs.time_data, obs.lat, obs.lon, obs.ele)
else:
print('Unrecognized solver:', solver)
if only_plot:
# Set saving results
traj_p.save_results = True
# Override plotting options with given options
traj_p.plot_all_spatial_residuals = kwargs["plot_all_spatial_residuals"]
traj_p.plot_file_type = kwargs["plot_file_type"]
# Show the plots
traj_p.savePlots(dir_path, traj_p.file_name, show_plots=kwargs["show_plots"])
# Recompute the trajectory
else:
# Run the trajectory solver
traj = traj.run()
return traj | 5,342,796 |
def check_arguments(funcdef, args, kw):
"""Check if some arguments are missing"""
assert len(args) == 0
for arg in funcdef.arguments:
if arg.mandatory and arg.name not in kw:
raise MissingArgument(arg.name) | 5,342,797 |
def test_date(date_string):
"""Test date string
:param str date_string: Date string
"""
try:
datetime.strptime(date_string, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect date format, should be YYYY-MM-DD")
exit(1) | 5,342,798 |
def load_data(path):
"""
读取.mat的原始eeg数据
:param path:
:return:
"""
data=scio.loadmat(path)
labels = data['categoryLabels'].transpose(1, 0)
X = data['X_3D'].transpose(2, 1, 0)
return X,labels | 5,342,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.