content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from Interpreter import Interpreter, InfiniteLoopException
if __name__ == '__main__':
with open("test2.txt") as file:
lines = file.read().splitlines()
part2(lines)
| [
6738,
4225,
3866,
353,
1330,
4225,
3866,
353,
11,
22380,
39516,
16922,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
351,
1280,
7203,
9288,
17,
13,
14116,
4943,
355,
2393,
25,
198,
220,
... | 2.661972 | 71 |
from os import path
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase, main as unittest_main
from trax import layers as tl
from trax.optimizers import adafactor
from ml_params_trax.example_model import get_model
from ml_params_trax.ml_params_impl import TraxTrainer
if __name__ == '__main__':
unittest_main()
| [
6738,
28686,
1330,
3108,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
6738,
20218,
7753,
1330,
33480,
67,
29510,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
11,
1388,
355,
555,
715,
395,
62,
12417,
198,
198,
6738,
1291,
87,
1330... | 3.017094 | 117 |
import pytest
from uranium.packages.versions import Versions
from packaging.specifiers import SpecifierSet
@pytest.fixture
def test_versions_coerce_lowercase(versions):
"""
coerce package names to lowercase
"""
versions["SQLAlchemy"] = "==1.0.11"
assert versions["sqlalchemy"] == "==1.0.11"
def test_versions_coerce_lowercase_update(versions):
"""
coerce package names to lowercase for update.
"""
versions.update({"SQLAlchemy": "==1.0.11"})
assert versions["sqlalchemy"] == "==1.0.11"
def test_versions_coerce_lowercase_get(versions):
"""
getting a package name should also be lowercase.
"""
versions.update({"SQLAlchemy": "==1.0.11"})
assert versions["SQLAlchemy"] == "==1.0.11"
def test_delete_coerce_lowercase(versions):
"""
getting a package name should also be lowercase.
"""
versions.update({"SQLAlchemy": "==1.0.11"})
del versions["SQLAlchemy"]
def test_and_operator(versions):
"""
the and operator should work for dictionaries
"""
versions["foo"] = ">1.0"
versions &= {"foo": "<1.1"}
assert SpecifierSet(versions["foo"]) == SpecifierSet(">1.0,<1.1")
| [
11748,
12972,
9288,
198,
6738,
22010,
13,
43789,
13,
47178,
1330,
18535,
507,
198,
6738,
16846,
13,
16684,
13350,
1330,
18291,
7483,
7248,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
4299,
1332,
62,
47178,
62,
1073,
263,
344,
62... | 2.664399 | 441 |
from typing import Optional
import numpy as np
from gbart.modified_bartpy.model import Model
from gbart.modified_bartpy.mutation import TreeMutation
from gbart.modified_bartpy.samplers.treemutation.proposer import TreeMutationProposer
from gbart.modified_bartpy.samplers.treemutation.likihoodratio import TreeMutationLikihoodRatio
from gbart.modified_bartpy.tree import Tree, mutate
from gbart.modified_bartpy.samplers.sampler import Sampler
class TreeMutationSampler(Sampler):
"""
A sampler for tree mutation space.
Responsible for producing samples of ways to mutate a tree within a model
Works by combining a proposer and likihood evaluator into:
- propose a mutation
- assess likihood
- accept if likihood higher than a uniform(0, 1) draw
Parameters
----------
proposer: TreeMutationProposer
likihood_ratio: TreeMutationLikihoodRatio
"""
| [
6738,
19720,
1330,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
308,
16575,
13,
41771,
62,
16575,
9078,
13,
19849,
1330,
9104,
198,
6738,
308,
16575,
13,
41771,
62,
16575,
9078,
13,
76,
7094,
1330,
12200,
44,
7094,
... | 3.1 | 290 |
a1,b1,c1=input().split()
a2,b2,c2=input().split()
a1=int(a1)
a2=int(a2)
b1=int(b1)
b2=int(b2)
c1=int(c1)
c2=int(c2)
if a1==0 and a2==0:
a1==1
a2==1
if b1==0 and b2==0:
b1=1
b2=1
if a1==0 and b2==0:
a1=1
b2=1
if a2==0 and b1==0:
a2=1
b1=1
if c1==0 and c2==0:
c1=1
c2=1
if ((a1%a2==0)or(a2%a1==0))and((b1%b2==0)or(b2%b1==0))and((c1%c2==0)or(c2%c1==0)):
print("many solutions")
elif (((-1*a1)/b1)==((-1*a2)/b2))and c1==c2:
print("many solutions")
elif ((-1*a1)/b1)==((-1*a2)/b2)and c1!=c2:
print("no solution")
else:
print("one solution")
| [
64,
16,
11,
65,
16,
11,
66,
16,
28,
15414,
22446,
35312,
3419,
198,
64,
17,
11,
65,
17,
11,
66,
17,
28,
15414,
22446,
35312,
3419,
198,
64,
16,
28,
600,
7,
64,
16,
8,
198,
64,
17,
28,
600,
7,
64,
17,
8,
198,
65,
16,
28,
... | 1.47032 | 438 |
#! /usr/env/python
"""Component that models 2D diffusion using an explicit finite-volume method.
Created July 2013 GT Last updated March 2016 DEJH with LL v1.0 component
style
"""
import numpy as np
from landlab import Component, FieldError, LinkStatus, NodeStatus, RasterModelGrid
_ALPHA = 0.15 # time-step stability factor
# ^0.25 not restrictive enough at meter scales w S~1 (possible cases)
class LinearDiffuser(Component):
"""This component implements linear diffusion of a Landlab field.
Component assumes grid does not deform. If the boundary conditions on the
grid change after component instantiation, be sure to also call
:func:`updated_boundary_conditions` to ensure these are reflected in the
component (especially if fixed_links are present).
The method keyword allows control of the way the solver works. Options
other than 'simple' will make the component run slower, but give second
order solutions that incorporate information from more distal nodes (i.e.,
the diagonals). Note that the option 'resolve_on_patches' can result in
somewhat counterintuitive behaviour - this option tells the component to
treat the diffusivity as a field **with directionality to it** (i.e., that
the diffusivites are defined on links). Thus if all links have the same
diffusivity value, with this flag active "effective" diffusivities
at the nodes will be *higher* than this value (by a factor of root 2) as
the diffusivity at each patch will be the mean vector sum of that at the
bounding links.
The primary method of this class is :func:`run_one_step`.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> mg = RasterModelGrid((9, 9))
>>> z = mg.add_zeros("topographic__elevation", at="node")
>>> z.reshape((9, 9))[4, 4] = 1.
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> ld = LinearDiffuser(mg, linear_diffusivity=1.)
>>> for i in range(1):
... ld.run_one_step(1.)
>>> np.isclose(z[mg.core_nodes].sum(), 1.)
True
>>> mg2 = RasterModelGrid((5, 30))
>>> z2 = mg2.add_zeros("topographic__elevation", at="node")
>>> z2.reshape((5, 30))[2, 8] = 1.
>>> z2.reshape((5, 30))[2, 22] = 1.
>>> mg2.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> kd = mg2.node_x/mg2.node_x.mean()
>>> ld2 = LinearDiffuser(mg2, linear_diffusivity=kd)
>>> for i in range(10):
... ld2.run_one_step(0.1)
>>> z2[mg2.core_nodes].sum() == 2.
True
>>> z2.reshape((5, 30))[2, 8] > z2.reshape((5, 30))[2, 22]
True
An example using links:
>>> mg1 = RasterModelGrid((10, 10), xy_spacing=100.)
>>> mg2 = RasterModelGrid((10, 10), xy_spacing=100.)
>>> z1 = mg1.add_zeros("topographic__elevation", at="node")
>>> z2 = mg2.add_zeros("topographic__elevation", at="node")
>>> dt = 1.
>>> nt = 10
>>> kappa_links = mg2.add_ones("surface_water__discharge", at="link")
>>> kappa_links *= 10000.
>>> dfn1 = LinearDiffuser(mg1, linear_diffusivity=10000.)
>>> dfn2 = LinearDiffuser(mg2, linear_diffusivity='surface_water__discharge')
>>> for i in range(nt):
... z1[mg1.core_nodes] += 1.
... z2[mg2.core_nodes] += 1.
... dfn1.run_one_step(dt)
... dfn2.run_one_step(dt)
>>> np.allclose(z1, z2)
True
>>> z2.fill(0.)
>>> dfn2 = LinearDiffuser(mg2, linear_diffusivity='surface_water__discharge',
... method='resolve_on_patches')
>>> for i in range(nt):
... z2[mg2.core_nodes] += 1.
... dfn2.run_one_step(dt)
>>> np.all(z2[mg2.core_nodes] < z1[mg2.core_nodes])
True
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
Culling, W. (1963). Soil Creep and the Development of Hillside Slopes.
The Journal of Geology 71(2), 127-161. https://dx.doi.org/10.1086/626891
"""
_name = "LinearDiffuser"
_unit_agnostic = True
_info = {
"hillslope_sediment__unit_volume_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m**2/s",
"mapping": "link",
"doc": "Volume flux per unit width along links",
},
"topographic__elevation": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__gradient": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "link",
"doc": "Gradient of the ground surface",
},
}
def __init__(self, grid, linear_diffusivity=0.01, method="simple", deposit=True):
"""
Parameters
----------
grid : ModelGrid
A grid.
linear_diffusivity : float, array, or field name (m**2/time)
The diffusivity. If an array or field name, these must be the
diffusivities on either nodes or links - the component will
distinguish which based on array length. Values on nodes will be
mapped to links using an upwind scheme in the simple case.
method : {'simple', 'resolve_on_patches', 'on_diagonals'}
The method used to represent the fluxes. 'simple' solves a finite
difference method with a simple staggered grid scheme onto the links.
'resolve_on_patches' solves the scheme by mapping both slopes and
diffusivities onto the patches and solving there before resolving
values back to the nodes (and at the moment requires a raster grid).
Note that this scheme enforces directionality in the diffusion field;
it's no longer just a scalar field. Thus diffusivities must be defined
*on links* when this option is chosen.
'on_diagonals' permits Raster diagonals to carry the diffusional
fluxes. These latter techniques are more computationally expensive,
but can suppress cardinal direction artifacts if diffusion is
performed on a raster. 'on_diagonals' pretends that the "faces" of a
cell with 8 links are represented by a stretched regular octagon set
within the true cell.
deposit : {True, False}
Whether diffusive material can be deposited. True means that diffusive
material will be deposited if the divergence of sediment flux is
negative. False means that even when the divergence of sediment flux is
negative, no material is deposited. (No deposition ever.) The False
case is a bit of a band-aid to account for cases when fluvial incision
likely removes any material that would be deposited. If one couples
fluvial detachment-limited incision with linear diffusion, the channels
will not reach the predicted analytical solution unless deposit is set
to False.
"""
super().__init__(grid)
self._bc_set_code = self._grid.bc_set_code
assert method in ("simple", "resolve_on_patches", "on_diagonals")
if method == "resolve_on_patches":
assert isinstance(self._grid, RasterModelGrid)
self._use_patches = True
else:
self._use_patches = False
if method == "on_diagonals" and isinstance(self._grid, RasterModelGrid):
self._use_diags = True
else:
self._use_diags = False
self._current_time = 0.0
self._run_before = False
self._kd_on_links = False
if isinstance(linear_diffusivity, str):
try:
self._kd = self._grid.at_link[linear_diffusivity]
self._kd_on_links = True
except KeyError:
self._kd = self._grid.at_node[linear_diffusivity]
else:
self._kd = linear_diffusivity
if isinstance(self._kd, (float, int)):
self._kd = float(self._kd)
else:
if self._kd.size == self._grid.number_of_links:
self._kd_on_links = True
else:
assert self._kd.size == self._grid.number_of_nodes
if self._kd_on_links is True:
assert isinstance(self._grid, RasterModelGrid)
# if we're using patches, it is VITAL that diffusivity is defined on
# links. The whole point of this functionality is that we honour
# *directionality* in the diffusivities.
if self._use_patches:
assert self._kd_on_links
# set _deposit flag to tell code whether or not diffusion can deposit.
self._deposit = deposit
self._values_to_diffuse = "topographic__elevation"
# Set internal time step
# ..todo:
# implement mechanism to compute time-steps dynamically if grid is
# adaptive/changing
# as of modern componentization (Spring '16), this can take arrays
# and irregular grids
# CFL condition precalc:
CFL_prefactor = (
_ALPHA * self._grid.length_of_link[: self._grid.number_of_links] ** 2.0
)
# ^ link_length can include diags, if not careful...
self._CFL_actives_prefactor = CFL_prefactor[self._grid.active_links]
# ^note we can do this as topology shouldn't be changing
# Get a list of interior cells
self._interior_cells = self._grid.node_at_core_cell
self._z = self._grid.at_node[self._values_to_diffuse]
self._dqsds = self._grid.zeros("node", dtype=float)
if not self._use_diags:
g = self._grid.zeros(at="link")
qs = self._grid.zeros(at="link")
try:
self._g = self._grid.add_field(
"topographic__gradient", g, at="link", clobber=False
)
# ^note this will object if this exists already
except FieldError: # keep a ref
self._g = self._grid.at_link["topographic__gradient"]
try:
self._qs = self._grid.add_field(
"hillslope_sediment__unit_volume_flux", qs, at="link", clobber=False
)
except FieldError:
self._qs = self._grid.at_link["hillslope_sediment__unit_volume_flux"]
# note all these terms are deliberately loose, as we won't always
# be dealing with topo
else:
g = np.zeros(self._grid.number_of_d8, dtype=float)
qs = np.zeros(self._grid.number_of_d8, dtype=float)
self._g = g
self._qs = qs
# now we have to choose what the face width of a diagonal is...
# Adopt a regular octagon config if it's a square raster, and
# stretch this geometry as needed.
# Conceptually, this means we're passing mass across diamond-
# shaped holes centered at the corners.
# Note that this WON'T affect the inferred cell size - that's
# still derived from the rectangle.
self._d8width_face_at_link = np.empty(self._grid.number_of_d8)
# note there will be null entries here
# by our defs, every active link must have a face.
# calc the length of a diag "face":
rt2 = np.sqrt(2.0)
horizontal_face = self._grid.dx / (1.0 + rt2)
vertical_face = self._grid.dy / (1.0 + rt2)
diag_face = np.sqrt(0.5 * (horizontal_face ** 2 + vertical_face ** 2))
# NOTE: Do these need to be flattened?
# self._hoz = self.grid.horizontal_links.flatten()
# self._vert = self.grid.vertical_links.flatten()
self._hoz = self.grid.horizontal_links
self._vert = self.grid.vertical_links
self._d8width_face_at_link[self._hoz] = vertical_face
self._d8width_face_at_link[self._vert] = horizontal_face
# ^ this operation pastes in faces where there are none, but
# we'll never use them
self._d8width_face_at_link[self._grid.number_of_links :] = diag_face
self._vertlinkcomp = np.sin(self._grid.angle_of_link)
self._hozlinkcomp = np.cos(self._grid.angle_of_link)
if self._use_patches or self._kd_on_links:
mg = self._grid
try:
self._hoz = self.grid.horizontal_links
self._vert = self.grid.vertical_links
except AttributeError:
pass
self._x_link_patches = mg.patches_at_link[self._hoz]
x_link_patch_pres = mg.patches_present_at_link[self._hoz]
self._x_link_patch_mask = np.logical_not(x_link_patch_pres)
self._y_link_patches = mg.patches_at_link[self._vert]
y_link_patch_pres = mg.patches_present_at_link[self._vert]
self._y_link_patch_mask = np.logical_not(y_link_patch_pres)
self._hoz_link_neighbors = np.empty((self._hoz.size, 4), dtype=int)
self._vert_link_neighbors = np.empty((self._vert.size, 4), dtype=int)
# do some pre-work to make fixed grad BC updating faster in the loop:
self.updated_boundary_conditions()
@property
def fixed_grad_nodes(self):
"""Fixed gradient nodes."""
return self._fixed_grad_nodes
@property
def fixed_grad_anchors(self):
"""Fixed gradient anchors."""
return self._fixed_grad_anchors
@property
def fixed_grad_offsets(self):
"""Fixed gradient offsets."""
return self._fixed_grad_offsets
def updated_boundary_conditions(self):
"""Call if grid BCs are updated after component instantiation.
Sets `fixed_grad_nodes`, `fixed_grad_anchors`, & `fixed_grad_offsets`,
such that::
value[fixed_grad_nodes] = value[fixed_grad_anchors] + offset
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> mg = RasterModelGrid((4, 5))
>>> z = mg.add_zeros("topographic__elevation", at="node")
>>> z[mg.core_nodes] = 1.
>>> ld = LinearDiffuser(mg, linear_diffusivity=1.)
>>> ld.fixed_grad_nodes.size == 0
True
>>> ld.fixed_grad_anchors.size == 0
True
>>> ld.fixed_grad_offsets.size == 0
True
>>> mg.at_link['topographic__slope'] = mg.calc_grad_at_link(
... 'topographic__elevation')
>>> mg.status_at_node[mg.perimeter_nodes] = mg.BC_NODE_IS_FIXED_GRADIENT
>>> ld.updated_boundary_conditions()
>>> ld.fixed_grad_nodes
array([ 1, 2, 3, 5, 9, 10, 14, 16, 17, 18])
>>> ld.fixed_grad_anchors
array([ 6, 7, 8, 6, 8, 11, 13, 11, 12, 13])
>>> ld.fixed_grad_offsets
array([-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.])
>>> np.allclose(z[ld.fixed_grad_nodes],
... z[ld.fixed_grad_anchors] + ld.fixed_grad_offsets)
True
"""
fixed_grad_nodes = np.where(
self._grid.status_at_node == NodeStatus.FIXED_GRADIENT
)[0]
heads = self._grid.node_at_link_head[self._grid.fixed_links]
tails = self._grid.node_at_link_tail[self._grid.fixed_links]
head_is_fixed = np.in1d(heads, fixed_grad_nodes)
self._fixed_grad_nodes = np.where(head_is_fixed, heads, tails)
self._fixed_grad_anchors = np.where(head_is_fixed, tails, heads)
vals = self._grid.at_node[self._values_to_diffuse]
self._fixed_grad_offsets = (
vals[self._fixed_grad_nodes] - vals[self._fixed_grad_anchors]
)
if self._use_diags:
self._g.fill(0.0)
if self._kd_on_links or self._use_patches:
mg = self._grid
x_link_patch_pres = mg.patches_present_at_link[self._hoz]
self._x_link_patch_mask = np.logical_not(x_link_patch_pres)
y_link_patch_pres = mg.patches_present_at_link[self._vert]
self._y_link_patch_mask = np.logical_not(y_link_patch_pres)
self._hoz_link_neighbors[:, :2] = mg.links_at_node[
mg.node_at_link_head[self._hoz], 1:4:2
]
self._hoz_link_neighbors[:, 2:] = mg.links_at_node[
mg.node_at_link_tail[self._hoz], 1:4:2
]
self._vert_link_neighbors[:, :2] = mg.links_at_node[
mg.node_at_link_head[self._vert], 0:3:2
]
self._vert_link_neighbors[:, 2:] = mg.links_at_node[
mg.node_at_link_tail[self._vert], 0:3:2
]
self._vert_link_badlinks = np.logical_or(
mg.status_at_link[self._vert_link_neighbors] == LinkStatus.INACTIVE,
self._vert_link_neighbors == -1,
)
self._hoz_link_badlinks = np.logical_or(
mg.status_at_link[self._hoz_link_neighbors] == LinkStatus.INACTIVE,
self._hoz_link_neighbors == -1,
)
def run_one_step(self, dt):
"""Run the diffuser for one timestep, dt.
If the imposed timestep dt is longer than the Courant-Friedrichs-Lewy
condition for the diffusion, this timestep will be internally divided
as the component runs, as needed.
Parameters
----------
dt : float (time)
The imposed timestep.
"""
mg = self._grid
z = self._grid.at_node[self._values_to_diffuse]
if not self._run_before:
self.updated_boundary_conditions() # just in case
self._run_before = True
if self._bc_set_code != self._grid.bc_set_code:
self.updated_boundary_conditions()
self._bc_set_code = self._grid.bc_set_code
core_nodes = self._grid.node_at_core_cell
# do mapping of array kd here, in case it points at an updating
# field:
if isinstance(self._kd, np.ndarray):
if not self._kd_on_links:
kd_links = self._grid.map_max_of_link_nodes_to_link(self._kd)
kd_activelinks = kd_links[self._grid.active_links]
# re-derive CFL condition, as could change dynamically:
dt_links = self._CFL_actives_prefactor / kd_activelinks
self._dt = np.nanmin(dt_links)
else:
kd_links = self._kd
kd_activelinks = self._kd[self._grid.active_links]
dt_links = self._CFL_actives_prefactor / kd_activelinks
self._dt_links = dt_links
self._dt = np.nanmin(np.fabs(dt_links))
else:
kd_activelinks = self._kd
# re-derive CFL condition, as could change dynamically:
dt_links = self._CFL_actives_prefactor / kd_activelinks
self._dt = np.nanmin(dt_links)
if self._use_patches:
# need this else diffusivities on inactive links deform off-angle
# calculations
kd_links = kd_links.copy()
kd_links[self._grid.status_at_link == LinkStatus.INACTIVE] = 0.0
# Take the smaller of delt or built-in time-step size self._dt
self._tstep_ratio = dt / self._dt
repeats = int(self._tstep_ratio // 1.0)
extra_time = self._tstep_ratio - repeats
# Can really get into trouble if no diffusivity happens but we run...
if self._dt < np.inf:
loops = repeats + 1
else:
loops = 0
for i in range(loops):
if not self._use_diags:
grads = mg.calc_grad_at_link(z)
self._g[mg.active_links] = grads[mg.active_links]
if not self._use_patches: # currently forbidden
# if diffusivity is an array, self._kd is already
# active_links-long
self._qs[mg.active_links] = (
-kd_activelinks * self._g[mg.active_links]
)
# Calculate the net deposition/erosion rate at each node
mg.calc_flux_div_at_node(self._qs, out=self._dqsds)
else: # project onto patches
slx = mg.zeros("link")
sly = mg.zeros("link")
slx[self._hoz] = self._g[self._hoz]
sly[self._vert] = self._g[self._vert]
patch_dx, patch_dy = mg.calc_grad_at_patch(z)
xvecs_vert = np.ma.array(
patch_dx[self._y_link_patches], mask=self._y_link_patch_mask
)
slx[self._vert] = xvecs_vert.mean()
yvecs_hoz = np.ma.array(
patch_dy[self._x_link_patches], mask=self._x_link_patch_mask
)
sly[self._hoz] = yvecs_hoz.mean()
# now map diffusivities (already on links, but we want
# more spatial averaging)
Kx = mg.zeros("link")
Ky = mg.zeros("link")
Kx[self._hoz] = kd_links[self._hoz]
Ky[self._vert] = kd_links[self._vert]
vert_link_crosslink_K = np.ma.array(
kd_links[self._vert_link_neighbors],
mask=self._vert_link_badlinks,
)
hoz_link_crosslink_K = np.ma.array(
kd_links[self._hoz_link_neighbors], mask=self._hoz_link_badlinks
)
Kx[self._vert] = vert_link_crosslink_K.mean(axis=1)
Ky[self._hoz] = hoz_link_crosslink_K.mean(axis=1)
Cslope = np.sqrt(slx ** 2 + sly ** 2)
v = np.sqrt(Kx ** 2 + Ky ** 2)
flux_links = v * Cslope
# NEW, to resolve issue with K being off angle to S:
# in fact, no. Doing this just makes this equivalent
# to the basic diffuser, but with a bunch more crap
# involved.
# flux_x = slx * Kx
# flux_y = sly * Ky
# flux_links = np.sqrt(flux_x*flux_x + flux_y*flux_y)
theta = np.arctan(np.fabs(sly) / (np.fabs(slx) + 1.0e-10))
flux_links[self._hoz] *= np.sign(slx[self._hoz]) * np.cos(
theta[self._hoz]
)
flux_links[self._vert] *= np.sign(sly[self._vert]) * np.sin(
theta[self._vert]
)
# zero out the inactive links
self._qs[mg.active_links] = -flux_links[mg.active_links]
self._grid.calc_flux_div_at_node(self._qs, out=self._dqsds)
else: # ..._use_diags
# NB: this is dirty code. It uses the obsolete diagonal data
# structures, and necessarily has to do a bunch of mapping
# on the fly.
# remap the kds onto the links, as necessary
if isinstance(self._kd, np.ndarray):
d8link_kd = np.empty(self._grid.number_of_d8, dtype=float)
d8link_kd[self._grid.active_links] = kd_activelinks
d8link_kd[self._grid.active_diagonals] = np.amax(
self._kd[
self._grid.nodes_at_diagonal[self._grid.active_diagonals]
],
axis=1,
).flatten()
else:
d8link_kd = self._kd
self._g[self._grid.active_links] = self._grid.calc_grad_at_link(z)[
self._grid.active_links
]
self._g[self._grid.active_diagonals] = (
z[self._grid._diag_activelink_tonode]
- z[self._grid._diag_activelink_fromnode]
) / self._grid.length_of_d8[self._grid.active_diagonals]
self._qs[:] = -d8link_kd * self._g
total_flux = self._qs * self._d8width_face_at_link # nlinks
totalflux_allnodes = (
total_flux[self._grid.links_at_node]
* self._grid.active_link_dirs_at_node
).sum(axis=1)
totalflux_allnodes += (
total_flux[self._grid.d8s_at_node[:, 4:]]
* self._grid.active_diagonal_dirs_at_node
).sum(axis=1)
self._dqsds[self._grid.node_at_cell] = (
-totalflux_allnodes[self._grid.node_at_cell]
/ self._grid.area_of_cell
)
# Calculate the total rate of elevation change
dzdt = -self._dqsds
if not self._deposit:
dzdt[np.where(dzdt > 0)] = 0.0
# Update the elevations
timestep = self._dt
if i == (repeats):
timestep *= extra_time
else:
pass
self._grid.at_node[self._values_to_diffuse][core_nodes] += (
dzdt[core_nodes] * timestep
)
# check the BCs, update if fixed gradient
vals = self._grid.at_node[self._values_to_diffuse]
vals[self._fixed_grad_nodes] = (
vals[self._fixed_grad_anchors] + self._fixed_grad_offsets
)
@property
def time_step(self):
"""Returns internal time-step size (as a property)."""
return self._dt
| [
2,
0,
1220,
14629,
14,
24330,
14,
29412,
198,
37811,
21950,
326,
4981,
362,
35,
44258,
1262,
281,
7952,
27454,
12,
29048,
2446,
13,
198,
198,
41972,
2901,
2211,
7963,
4586,
6153,
2805,
1584,
5550,
41,
39,
351,
27140,
410,
16,
13,
15... | 2.036772 | 12,727 |
# -*- coding: utf-8 -*-
import logging
import os
try:
logging.getLogger('allowedsites').addHandler(logging.NullHandler())
except AttributeError: # < Python 2.7
pass
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.messages',
)
SKIP_SOUTH_TESTS = True
SOUTH_TESTS_MIGRATE = False
ROOT_URLCONF = 'test_urls'
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
SITE_ID = 1
TEMPLATE_CONTEXT_PROCESSORS = ()
HERE_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIRS = ()
SILENCED_SYSTEM_CHECKS = [
"1_7.W001",
]
USE_TZ = True
MIDDLEWARE = [
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ws4redis.context_processors.default',
],
'loaders': (
'django.template.loaders.app_directories.Loader',
),
},
},
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
28311,
25,
198,
220,
220,
220,
18931,
13,
1136,
11187,
1362,
10786,
12154,
5379,
2737,
27691,
2860,
25060,
7,
6404,
2667,
13,
... | 2.063275 | 806 |
# This file is present to facilitate semantic releases.
__version__ = "0.2.0" | [
2,
770,
2393,
318,
1944,
284,
15570,
37865,
10050,
13,
198,
834,
9641,
834,
796,
366,
15,
13,
17,
13,
15,
1
] | 3.5 | 22 |
import jwt
from datetime import datetime
import uuid
import json
import os
API_ID = os.getenv('AWS_APIGW_ID', 'unknown')
AWS_REGION = os.getenv('AWS_REGION', 'us-east-1')
SECRET = 'cvb973246g(*&^89gbn(^&$%VC*67i876bnf9nm(*)&^%B*)(bfmj99087wq6onIOIKG(*765'
TTL = 60
now = get_utc_timestamp()
payload = {
'iss': 'test-issuer',
'sub': '3f94876234f876hb8',
'exp': now + TTL,
'nbf': now,
'iat': now,
'jti': str(uuid.uuid1()),
}
print('Payload in JSON:\n---------------\n{}\n---------------\n'.format(json.dumps(payload, indent=' ')))
encoded_jwt = jwt.encode(payload, SECRET, algorithm='HS256').decode('utf-8')
print('encoded_jwt={}\n'.format(encoded_jwt))
print('curl test command:\n----------------------------------------\n\ncurl -vvv -X GET -H "Authorization: {}" https://{}.execute-api.{}.amazonaws.com/dev/echo-test\n\n----------------------------------------\n'.format(encoded_jwt, API_ID, AWS_REGION))
| [
11748,
474,
46569,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
334,
27112,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
17614,
62,
2389,
796,
28686,
13,
1136,
24330,
10786,
12298,
50,
62,
2969,
3528,
54,
62,
2389,
3256,
705,... | 2.355 | 400 |
from chia.util.ints import uint32, uint64
# 1 Hydrangea coin = 1,000,000,000,000 = 1 trillion mojo.
_mojo_per_hydrangea = 1000000000000
_prefarm = 21000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
_block_reward = 125 # 125 Hydrangea are awarded per block
_farmer_fraction = (10 / _block_reward)
_pool_fraction = (90 / _block_reward)
_community_fraction = (20 / _block_reward)
_staking_fraction = (5 / _block_reward)
_timelord_fraction = ((1 / 1000) / _block_reward)
def _calculate_reward(fraction, height: uint32) -> uint64:
"""
Returns the block reward for a given reward fraction at a certain block height.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int(fraction * _prefarm * _mojo_per_hydrangea))
elif height < 3 * _blocks_per_year:
return uint64(int(fraction * _block_reward * _mojo_per_hydrangea))
elif height < 6 * _blocks_per_year:
return uint64(int(fraction * (_block_reward / 2) * _mojo_per_hydrangea))
elif height < 9 * _blocks_per_year:
return uint64(int(fraction * (_block_reward / 4) * _mojo_per_hydrangea))
elif height < 12 * _blocks_per_year:
return uint64(int(fraction * (_block_reward / 8) * _mojo_per_hydrangea))
else:
return uint64(int(fraction * (_block_reward / 16) * _mojo_per_hydrangea))
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
"""
return _calculate_reward(_pool_fraction, height)
def calculate_community_reward(height: uint32) -> uint64:
"""
Returns the community reward at a certain block height.
"""
return _calculate_reward(_community_fraction, height)
def calculate_staking_reward(height: uint32) -> uint64:
"""
Returns the staking reward at a certain block height.
"""
return _calculate_reward(_staking_fraction, height)
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
Returns the coinbase reward at a certain block height.
"""
return _calculate_reward(_farmer_fraction, height)
def calculate_base_timelord_fee(height: uint32) -> uint64:
"""
Returns the base timelord reward at a certain block height.
"""
return _calculate_reward(_timelord_fraction, height) | [
6738,
442,
544,
13,
22602,
13,
29503,
1330,
20398,
2624,
11,
20398,
2414,
198,
198,
2,
352,
15084,
9521,
64,
10752,
796,
352,
11,
830,
11,
830,
11,
830,
11,
830,
796,
352,
12989,
6941,
7639,
13,
198,
62,
5908,
7639,
62,
525,
62,
... | 2.761905 | 945 |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import apteco_api
from apteco_api.api.shares_api import SharesApi # noqa: E501
from apteco_api.rest import ApiException
class TestSharesApi(unittest.TestCase):
"""SharesApi unit test stubs"""
def test_shares_create_share(self):
"""Test case for shares_create_share
Creates a new share from the given details, sharing from the logged in user. # noqa: E501
"""
pass
def test_shares_create_share_update(self):
"""Test case for shares_create_share_update
Creates a new share update from the given details, sharing from the logged in user. # noqa: E501
"""
pass
def test_shares_delete_share(self):
"""Test case for shares_delete_share
Deletes the specified share # noqa: E501
"""
pass
def test_shares_get_share(self):
"""Test case for shares_get_share
Returns the details of a particular share # noqa: E501
"""
pass
def test_shares_get_share_update(self):
"""Test case for shares_get_share_update
Returns a specific update that is associated with a particular share # noqa: E501
"""
pass
def test_shares_get_share_update_added_users(self):
"""Test case for shares_get_share_update_added_users
Returns the list of the users added to a share as part of a specific update # noqa: E501
"""
pass
def test_shares_get_share_update_removed_users(self):
"""Test case for shares_get_share_update_removed_users
Returns the list of the users removed from a share as part of a specific update # noqa: E501
"""
pass
def test_shares_get_share_updates(self):
"""Test case for shares_get_share_updates
Returns the updates that are associated with a particular share # noqa: E501
"""
pass
def test_shares_get_share_users(self):
"""Test case for shares_get_share_users
Returns the list of users that are associated with a particular share # noqa: E501
"""
pass
def test_shares_get_shares(self):
"""Test case for shares_get_shares
Requires OrbitAdmin: Gets summary information about each share in the DataView. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
317,
457,
47704,
7824,
628,
220,
220,
220,
1052,
7824,
284,
1249,
1895,
284,
317,
457,
47704,
22137,
26264,
4133,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
... | 2.582119 | 1,029 |
import unittest
import pytest
import json
from peewee import SqliteDatabase
from genre_api.models.genre import Genre
from genre_api.models.singer import Singer
from genre_api.models.song import Song
from genre_api.models.playlist import Playlist
from genre_api.models.song_to_playlist import SongToPlaylist
MODELS = [Genre, Singer, Song, Playlist, SongToPlaylist]
@pytest.mark.usefixtures('app_class')
| [
11748,
555,
715,
395,
198,
11748,
12972,
9288,
198,
11748,
33918,
198,
6738,
613,
413,
1453,
1330,
311,
13976,
578,
38105,
198,
6738,
12121,
62,
15042,
13,
27530,
13,
35850,
1330,
5215,
260,
198,
6738,
12121,
62,
15042,
13,
27530,
13,
... | 3.214286 | 126 |
from __future__ import print_function
import time
import random
start = time.time()
from collections import Counter, defaultdict
from biaffine import DeepBiaffineAttentionDecoder
import dynet as dy
import numpy as np
# format of files: each line is "word1/tag2 word2/tag2 ..."
train_file = "../data/parsing/graph/ptb_train.txt"
test_file = "../data/parsing//graph/ptb_dev.txt"
w2i = defaultdict(lambda: len(w2i))
t2i = defaultdict(lambda: len(t2i))
UNK = w2i["<unk>"]
train = list(read(train_file))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read(test_file))
nwords = len(w2i)
ntags = len(t2i)
# DyNet Starts
model = dy.Model()
trainer = dy.AdamTrainer(model)
# Lookup parameters for word embeddings
EMB_SIZE = 32
HID_SIZE = 64
W_emb = model.add_lookup_parameters((nwords, EMB_SIZE)) # Word embeddings
fwdLSTM = dy.SimpleRNNBuilder(1, EMB_SIZE, HID_SIZE, model) # Forward LSTM
bwdLSTM = dy.SimpleRNNBuilder(1, EMB_SIZE, HID_SIZE, model) # Backward LSTM
biaffineParser = DeepBiaffineAttentionDecoder(model, ntags, src_ctx_dim=HID_SIZE * 2,
n_arc_mlp_units=64, n_label_mlp_units=32)
for ITER in range(100):
# Perform training
random.shuffle(train)
train_loss = 0.0
start = time.time()
for words, labels, heads in train:
loss = calc_loss(words, labels, heads)
train_loss += loss.value()
loss.backward()
trainer.update()
print("iter %r: train loss/sent=%.4f, time=%.2fs" % (ITER, train_loss / len(train), time.time() - start))
correct_heads = 0.
correct_labels = 0.
total = 0.
for words, labels, heads in dev:
head_acc, label_acc = calc_acc(words, labels, heads)
correct_heads += head_acc * len(words)
correct_labels += label_acc * len(words)
total += len(words)
print("iter %r: test head_acc=%.4f, label_acc=%.4f" % (ITER, correct_heads * 100 / total,
correct_labels * 100 / total))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
640,
198,
11748,
4738,
198,
198,
9688,
796,
640,
13,
2435,
3419,
198,
198,
6738,
17268,
1330,
15034,
11,
4277,
11600,
198,
6738,
275,
544,
487,
500,
1330,
10766,
33,
544,
487,... | 2.283126 | 883 |
import angr
from __init__ import Plugin
from binary_dependency_graph.utils import *
from binary_dependency_graph.bdp_enum import *
from taint_analysis.utils import *
import itertools
INDEXING_OPS = ('add', 'sub')
CMP_FUNCTIONS = ('strcmp', 'strncmp', 'memcmp')
LIB_KEYWORD = 'lib' | [
11748,
281,
2164,
198,
6738,
11593,
15003,
834,
1330,
42636,
198,
6738,
13934,
62,
45841,
1387,
62,
34960,
13,
26791,
1330,
1635,
198,
6738,
13934,
62,
45841,
1387,
62,
34960,
13,
17457,
79,
62,
44709,
1330,
1635,
198,
6738,
256,
2913,
... | 2.896907 | 97 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import cv2
from ImageLibrary import imutils
from PIL import Image
from future.utils import iterkeys, itervalues
try:
from SlotBot import errors
except ImportError:
pass #for stand-alone usage
import numpy as np
from robot.api import logger as LOGGER
#use it when prepairing yaml file | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
269,
85,
17,
198,
6738,
7412,
23377,
1330,
545,
26791,... | 3.071429 | 126 |
from kafka import KafkaConsumer, TopicPartition, conn
from json import loads
from sqlalchemy import create_engine
if __name__ == "__main__":
drop_tbl_transaction()
create_tbl_transaction()
c = XactionConsumer()
c.handleMessages()
| [
6738,
479,
1878,
4914,
1330,
46906,
49106,
11,
47373,
7841,
653,
11,
48260,
198,
6738,
33918,
1330,
15989,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,... | 2.918605 | 86 |
from flask import Flask, render_template, jsonify
from flask_sqlalchemy import SQLAlchemy
from src.config import DevelopmentConfig
from flask_migrate import Migrate
import logging
from logging.handlers import RotatingFileHandler
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
db = SQLAlchemy()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
33918,
1958,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
12351,
13,
11250,
1330,
7712,
16934,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
198,
1... | 3.288136 | 118 |
import re
import os
import pysam
import shutil
import pandas
from .SplitRunner import SplitRunner
import cgatcore.pipeline as P
import cgatcore.experiment as E
import cgatcore.iotools as IOTools
class run_split_vcf_by_chromosome(SplitRunner):
"""split a VCF by chromosome using tabix.
The file is assumed to be sorted and indexed.
"""
name = "vcf_by_chromosome"
output = "chromosome_{}.dir/result.vcf.gz"
| [
11748,
302,
198,
11748,
28686,
198,
11748,
279,
893,
321,
198,
11748,
4423,
346,
198,
11748,
19798,
292,
198,
198,
6738,
764,
41205,
49493,
1330,
27758,
49493,
198,
11748,
269,
41268,
7295,
13,
79,
541,
4470,
355,
350,
198,
11748,
269,
... | 2.847682 | 151 |
import sys, urllib.request
try:
rfc_number = int(sys.argv[1])
except (IndexError, ValueError):
print('Must supply an RFC number as first argument')
sys.exit(2)
template = 'http://www.ietf.org/rfc/rfc{}.txt'
url = template.format(rfc_number)
rfc_raw = urllib.request.urlopen(url).read()
rfc = rfc_raw.decode()
print(rfc) | [
11748,
25064,
11,
2956,
297,
571,
13,
25927,
198,
198,
28311,
25,
198,
220,
220,
220,
374,
16072,
62,
17618,
796,
220,
493,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
16341,
357,
15732,
12331,
11,
11052,
12331,
2599,
198,
220,
220,
... | 2.437956 | 137 |
import hlir
import expr
import utils
import numbering
import draw
MAX_ITERATIONS = 20
# collapse each interval into a single node
# based on "Interval Algorithm" from reverse compilation techniques
# check a condition: there must exist
# attempts to create a loop, though it may fail.
# an interval for a header node is the maximal subgraph for which it holds
# that every cycle goes through the interval header
##############################################################################
######################## conditional structuring #############################
##############################################################################
"""
Constraints:
- if the header is inside a loop, we may never leave that loop
- if we find a loop that is not the one we're initially inside, then that
loop's nodes (including its header) may not be reached
- we may never follow continues
- if the true-node or false-node terminates, we can choose that as the follow
- but we must still obey the constraints; e.g., the true-node can't break
out of the loop.
- we should never follow an edge to the cond header. (This should not happen,
but it still might if all loops were not detected.)
- we should never follow imprecise successors
- we currently use node.next_bb as the next false node. Is that accurate??
- the numbering computation should use the above rules, too.
We should use a special "successor" function that only returns valid edges.
Then we can use that to compute the reach.
We can also use that to check if we can make the true- or false-node a follow.
We should keep the cond header as a self-property so we don't have to pass it
around so much.
And we should remember to check that the constraints hold via assertions.
"""
| [
11748,
289,
75,
343,
198,
11748,
44052,
198,
11748,
3384,
4487,
198,
11748,
47622,
198,
11748,
3197,
198,
198,
22921,
62,
2043,
1137,
18421,
796,
1160,
628,
197,
198,
197,
2,
9807,
1123,
16654,
656,
257,
2060,
10139,
628,
197,
197,
62... | 4.155814 | 430 |
from datetime import datetime
from typing import Union
import config
import models
# Старый вариант премиума где дата окончания находится в таблице пользователей - удалить
# Новый вариант где дата окончания находится в таблице user_tarif или chat_tarif
# По окончанию этой даты надо сменить тариф на базовый
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
4566,
198,
11748,
4981,
628,
198,
2,
12466,
94,
20375,
16142,
21169,
45035,
140,
117,
12466,
110,
16142,
21169,
18849,
16142,
22177,
20375,
12466,
123,
211... | 1.257028 | 249 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-02 19:35
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
18,
319,
1584,
12,
1065,
12,
2999,
678,
25,
2327,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.736842 | 57 |
import arrow
from test import get_user_session, cassette
| [
11748,
15452,
198,
198,
6738,
1332,
1330,
651,
62,
7220,
62,
29891,
11,
42812,
628,
628
] | 3.8125 | 16 |
# Есть список песен группы Depeche Mode со временем звучания с точностью до долей минут
# Точность указывается в функции round(a, b)
# где a, это число которое надо округлить, а b количество знаков после запятой
# более подробно про функцию round смотрите в документации https://docs.python.org/3/search.html?q=round
violator_songs_list = [
['World in My Eyes', 4.86],
['Sweetest Perfection', 4.43],
['Personal Jesus', 4.56],
['Halo', 4.9],
['Waiting for the Night', 6.07],
['Enjoy the Silence', 4.20],
['Policy of Truth', 4.76],
['Blue Dress', 4.29],
['Clean', 5.83],
]
# TODO Распечатайте общее время звучания трех песен: 'Halo', 'Enjoy the Silence' и 'Clean' в формате
# Три песни звучат ХХХ.XX минут
# TODO Распечатайте общее время звучания трех песен: 'Sweetest Perfection', 'Policy of Truth' и 'Blue Dress'
# А другие три песни звучат приблизительно ХХХ минут
| [
198,
2,
12466,
243,
21727,
20375,
45367,
220,
21727,
140,
123,
18849,
21727,
25443,
118,
12466,
123,
16843,
21727,
16843,
22177,
12466,
111,
21169,
35072,
140,
123,
140,
123,
45035,
1024,
431,
2395,
10363,
220,
21727,
15166,
12466,
110,
2... | 1.359165 | 671 |
# Uri Online Judge 1079
N = int(input())
for i in range(0,N):
Numbers = input()
num1 = float(Numbers.split()[0])
num2 = float(Numbers.split()[1])
num3 = float(Numbers.split()[2])
print(((2*num1+3*num2+5*num3)/10).__round__(1)) | [
2,
46688,
7467,
8974,
838,
3720,
198,
198,
45,
796,
493,
7,
15414,
28955,
628,
198,
1640,
1312,
287,
2837,
7,
15,
11,
45,
2599,
198,
220,
220,
220,
27797,
796,
5128,
3419,
628,
220,
220,
220,
997,
16,
796,
12178,
7,
49601,
13,
3... | 2.261261 | 111 |
import struct
| [
11748,
2878,
201,
198,
201,
198,
197
] | 2.571429 | 7 |
#!/usr/local/bin/python
print 2**3
print pow(2,3)
print abs(-10)
print round(1.536,2)
print 1/2
print 1.0//2.0
print 0xAF
print 010
import cmath
print cmath.sqrt(-1)
import math
print math.floor(32.8)
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
4798,
362,
1174,
18,
198,
4798,
7182,
7,
17,
11,
18,
8,
198,
4798,
2352,
32590,
940,
8,
198,
4798,
2835,
7,
16,
13,
44468,
11,
17,
8,
198,
4798,
352,
14,
17,
198,
4798,
352... | 2.258427 | 89 |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_BN_CLASS_MAP = {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
# Adopted from https://github.com/pytorch/pytorch/blob/master/torch/nn/intrinsic/qat/modules/conv_fused.py
class QuantizedConvBatchNorm2d(_ConvBnNd, nn.Conv2d):
"""A QuantizedConvBatchNorm2d module is a module fused from
Conv2d and BatchNorm2d attached with FakeQuantizer modules for weight and
batchnorm stuffs used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf section 3.2.2
Similar to :class:`torch.nn.Conv2d`, with FakeQuantizer modules initialized
to default.
"""
class QuantizedConvBatchNorm3d(_ConvBnNd, nn.Conv3d):
"""A QuantizedConvBatchNorm3d module is a module fused from
Conv3d and BatchNorm3d attached with FakeQuantizer modules for weight and
batchnorm stuffs used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv3d` and
:class:`torch.nn.BatchNorm3d`.
Similar to `QuantizedConvBatchNorm3d`.
"""
_FUSED_CLS = [
QuantizedConvBatchNorm2d, QuantizedConvBatchNorm3d,
QuantizedConvTransposeBatchNorm2d, QuantizedConvTransposeBatchNorm3d
]
# TODO(yuwang): Move to top api for user.
| [
2,
15069,
13130,
1395,
346,
28413,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2... | 3.016484 | 910 |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 20:04:16 2018
@author: xingrongtech
"""
from sympy import Symbol
from quantities.quantity import Quantity
from . import ACategory, BCategory, measure
from .ins import Ins
from ..amath import sqrt
from ..num import Num
from ..numitem import NumItem
from ..lsym import LSym
from ..lsymitem import LSymItem
from ..latexoutput import LaTeX
from ..system.exceptions import expressionInvalidException
from ..system.unit_open import openUnit, closeUnit
from ..system.text_unicode import usub
from ..system.format_units import format_units_unicode, format_units_latex
class BaseMeasure(measure.Measure):
'''BaseMeasure为直接测量类,该类通过给出一组直接测量的数据以及测量该数据所使用的仪器或方法,从而计算单个直接测量的不确定度。'''
AMethod = 'auto' #静态属性,控制A类不确定度如何计算
__value = None
__staUnc = None
__q = 1
__data = None
__linearfit_data = None
<<<<<<< HEAD
useRelUnc = False #决定由str、latex生成的测量结果中,不确定度是否使用相对不确定度表示
=======
>>>>>>> fcd8aeb38c983d9242fa950ef4c982492c7b950e
def __init__(self, data, instrument=None, unit=None, sym=None, description=None):
'''初始化一个BaseMeasure直接测量
【参数说明】
1.data:测量数据,可以是单个测量值、一个样本或多个样本,是用于计算A类不确定度的依据。data可以是以下数据类型:
(1)单个测量值:
①Num:直接给出单个测量值的Num数值;
②str:通过给出数值的字符串表达式,得到对应的Num数值。
③LSym:若LaTeX符号中有数值(即创建LSym时,sNum为str或Num类型),则可以通过给出LSym,得到LSym中的Num数值。
(2)一个样本:
①NumItem:直接给出样本的NumItem数组;
②str:对于还没有转换成Num的数值,将数值以空格隔开,表示成字符串表达式,以此生成样本的NumItem数组;
③list<Num>:对于已经转换成Num的数值,将数值用list表示出,以此生成样本的NumItem数组;
④list<LSym>:若LaTeX符号中有数值(即创建LSym时,sNum为str或Num类型),则可以通过给出LSym的列表,得到每个LSym中的Num数值,以此生成样本的NumItem数组;
⑤LSymItem:通过给出LSymItem,得到LSymItem中对应的Num数值作为样本数据。
(3)多个样本:
①list<NumItem>:直接给出每个样本的NumItem数组,得到多个样本数组;
②list<str>:列表中的每个str生成一个样本数组,从而得到多个样本数组;
③list<LSymItem>:通过给出LSymItem,得到每个LSymItem中对应的Num数值作为每个样本的数据,从而得到多个样本数组。
2.instrument(可选,Instrument):测量仪器,是获得测量结果单位、用于计算B类不确定的依据。当测量仪器为None时,将不会计算B类不确定度。默认instrument=None。
3.unit(可选,str):测量的单位。给出unit时,会优先使用unit的单位;若没有给出unit,则当给定测量仪器时,使用测量仪器的单位;若未给出测量仪器的单位,则当data中有单位(比如Num、LSym、NumItem、LSymItem,或list<Num>、list<LSym>、list<NumItem>、list<LSymItem>中首个元素)则使用data中的单位,否则没有单位。默认unit=None。
4.sym(可选,str):符号。一般情况下不需要给出,当需要展示计算过程时,最好给出。当sym未给出时,若data以NumItem形式给出且包含sym,则使用data的sym,否则没有sym。默认sym=None。
5.description(可选,str):在展示测量(含测量值和不确定度)时,作为对测量的语言描述。默认description=None。'''
self.__instrument = instrument
if unit != None:
self.__q = Quantity(1., unit) if type(unit) == str else unit
if instrument != None:
instrument.q = self.__q
elif instrument != None and instrument.q != None:
self.__q = instrument.q
if type(data) == Num or type(data) == LSym:
self.__uA = None #单个测量值、空值没有A类不确定度
if type(data) == Num:
self.__value = data
if type(self.__q) != Quantity:
self.__q = data._Num__q
elif type(data) == LSym:
self.__value = data.num()
if type(self.__q) != Quantity:
self.__q = data.num()._Num__q
if sym == None:
sym = data.sym()
elif type(data) == LSymItem:
self.__uA = 'single'
self.__data = NumItem(data)
if type(self.__q) != Quantity:
self.__q = data._LSymItem__q
elif type(data) == str:
isItem = (data.find(' ') >= 0)
if isItem:
self.__uA = 'single'
self.__data = NumItem(data)
if type(self.__q) != int:
self.__data._NumItem__q = self.__q
else:
self.__uA = None
self.__value = Num(data)
if type(self.__q) != int:
self.__value._Num__q = self.__q
elif type(data) == NumItem:
self.__uA = 'single'
self.__data = data
if type(self.__q) != Quantity:
self.__q = data._NumItem__q
else:
data._NumItem__q = self.__q
data._NumItem__qUpdate()
elif type(data) == list:
self.__uA = 'comb'
if len(data) == 0:
raise expressionInvalidException('用于创建测量的参数无效')
elif type(data[0]) == str:
self.__data = [NumItem(di) for di in data]
if type(self.__q) != int:
for di in self.__data:
di._NumItem__q = self.__q
di._NumItem__qUpdate()
elif type(data[0]) == NumItem:
self.__data = data
if type(self.__q) != Quantity:
self.__q = self.__data[0]._NumItem__q
else:
if sym == None:
for di in self.__data:
di._NumItem__q = self.__q
di._NumItem__qUpdate()
else:
for di in self.__data:
di._NumItem__q = self.__q
di._NumItem__qUpdate()
di._NumItem__sym = sym
elif type(data[0]) == LSymItem:
self.__data = [NumItem(di) for di in data]
if type(self.__q) != Quantity:
self.__q = self.__data[0]._NumItem__q
else:
for di in self.__data:
di._NumItem__q = self.__q
di._NumItem__qUpdate()
elif type(data[0]) == Num or type(data[0]) == LSym:
self.__data = NumItem(data)
if type(self.__q) != Quantity:
self.__q = self.__data._NumItem__q
else:
self.__data._NumItem__q = self.__q
self.__data._NumItem__qUpdate()
else:
raise expressionInvalidException('用于创建直接测量的参数无效')
self.__value = NumItem([d.mean() for d in self.__data]).mean()
else:
raise expressionInvalidException('用于创建直接测量的参数无效')
if type(self.__data) == NumItem:
self.__value = self.__data.mean()
if sym == None:
self.__sym = self.__data._NumItem__sym
else:
self.__sym = '{%s}' % sym
self.__data._NumItem__sym = self.__sym
else:
if sym == None:
self.__sym = '{x_{%d}}' % id(self)
else:
self.__sym = '{%s}' % sym
if self.__uA == 'comb':
for i in range(len(self.__data)):
self.__data[i]._NumItem__sym = '{%s_{%d}}' % (self.__sym, i+1)
self._Measure__symbol = Symbol(self.__sym, real=True)
if unit != None:
self.__value._Num__q = self.__q
if instrument != None:
instrument.q = self.__q
if measure.Measure.process:
self._Measure__vl = LSym(self.__sym, self.__value)
else:
self._Measure__vl = self.__value
if description == None:
self.__description = self.__sym
else:
self.__description = description
self._Measure__baseMeasures = {}
self._Measure__measures = {}
self._Measure__lsyms = {}
self._Measure__consts = {}
self.__staUnc = self.__calStaUnc()
self.useRelUnc = False
def fromReport(report, unit=None, sym=None, distribution=1, description=None, **param):
'''从不确定度报告中获得BaseMeasure
【参数说明】
1.report(str):形如`114.818(3)`的不确定度报告。
2.distribution(可选,int):分布类型,从以下列表中取值。默认distribution=Ins.rectangle。
①Ins.norm:正态分布;
②Ins.rectangle:矩形分布;
③Ins.triangle:三角分布;
④Ins.arcsin:反正弦分布;
⑤Ins.twopoints:两点分布;
⑥Ins.trapezoid:梯形分布,此分布下需要通过附加参数beta给出β值。
3.unit(可选,str):测量的单位。默认unit=None。
4.sym(可选,str):符号。一般情况下不需要给出,当需要展示计算过程时,最好给出。默认sym=None。
5.description(可选,str):在展示测量(含测量值和不确定度)时,作为对测量的语言描述。默认description=None。'''
rlist = report[:-1].split('(')
v = rlist[0]
zero_behind = len(v.split('.')[1])-len(rlist[1]) #小数点后空零的数量
half_width = '0.' + zero_behind*'0' + rlist[1]
if len(param) == 0:
rIns = Ins(half_width, distribution, unit)
else:
rIns = Ins(half_width, distribution, unit, beta=param['beta'])
return BaseMeasure(v, rIns, sym=sym, description=description)
def value(self, process=False, needValue=False):
'''获得测量值
【参数说明】
1.process(可选,bool):是否展示计算过程。默认proces=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
【返回值】
①process为False时,返回值为Num类型的测量值。
②process为True且needValue为False时,返回值为LaTeX类型的计算过错。
③process为True且needValue为True时,返回值为Num类型的测量值及其LaTeX类型的计算过程组成的元组。
Num:测量值。'''
if process:
if self.__uA == 'single':
return self.__data.mean(True, needValue)
elif self.__uA == 'comb':
m = len(self.__data)
#根据所有样本的样本数量是否一致,判断使用哪个公式
nSame = True
n = len(self.__data[0])
for di in self.__data:
if len(di._NumItem__arr) != n:
nSame = False
break
sciDigit = max([di._NumItem__sciDigit() for di in self.__data], key=lambda digit: abs(digit))
large_brac = max([len([x for x in di._NumItem__arr if x < 0]) for di in self.__data]) > 0
sub_sumExpr = []
if sciDigit == 0:
for di in self.__data:
plusExpr = '+'.join([n.dlatex(2) for n in di._NumItem__arr])
if len([x for x in di._NumItem__arr if x < 0]) == 0:
sub_sumExpr.append(r'\left(%s\right)' % plusExpr)
else:
sub_sumExpr.append(r'\left[%s\right]' % plusExpr)
if large_brac:
sumExpr = r'\left \{ %s \right \}' % '+'.join(sub_sumExpr)
else:
sumExpr = r'\left[ %s \right]' % '+'.join(sub_sumExpr)
else:
for di in self.__data:
diArr = di * 10**(-sciDigit)
plusExpr = '+'.join([n.dlatex(2) for n in diArr._NumItem__arr])
if len([x for x in diArr._NumItem__arr if x < 0]) == 0:
sub_sumExpr.append(r'\left(%s\right)' % plusExpr)
else:
sub_sumExpr.append(r'\left[%s\right]' % plusExpr)
if large_brac:
sumExpr = r'\left \{ %s \right \}\times 10^{%d}' % ('+'.join(sub_sumExpr), sciDigit)
else:
sumExpr = r'\left[ %s \right]\times 10^{%d}' % ('+'.join(sub_sumExpr), sciDigit)
if nSame:
formula = r'\cfrac{1}{mn}\sum\limits_{i=1}^m\sum\limits_{j=1}^n %s_{ij}' % self.__sym
meanExpr = r'\cfrac{1}{%d \times %d}%s' % (m, n, sumExpr)
else:
formula = r'\cfrac{1}{\sum_{i=1}^m n_i}\sum\limits_{i=1}^{m}\sum\limits_{j=1}^{n_i} %s_{ij}' % self.__sym
n_sumExpr = '+'.join([str(len(di._NumItem__arr)) for di in self.__data])
meanExpr = r'\cfrac{1}{%s}%s' % (n_sumExpr, sumExpr)
latex = LaTeX(r'\overline{%s}=%s=%s=%s' % (self.__sym, formula, meanExpr, self.__value.latex()))
if needValue:
return self.__value, latex
else:
return latex
else:
return self.__value
def __calStaUnc(self):
'''计算标准不确定度'''
if self.__uA != None: #判断是否需要计算A类不确定度
if self.__uA == 'single':
if BaseMeasure.AMethod == 'auto':
if len(self.__data) > 9: #样本数量最大的组为9以上时,使用Bessel法
uA = ACategory.Bessel(self.__data)
else:
uA = ACategory.Range(self.__data)
elif BaseMeasure.AMethod == 'Bessel':
uA = ACategory.Bessel(self.__data)
elif BaseMeasure.AMethod == 'Range':
uA = ACategory.Range(self.__data)
elif BaseMeasure.AMethod == 'CollegePhysics':
uA = ACategory.CollegePhysics(self.__data)
elif self.__uA == 'comb':
uA = ACategory.CombSamples(self.__data)
if self.__instrument != None: #判断是否需要计算B类不确定度
uB = BCategory.b(self.__instrument)
if self.__uA != None and self.__instrument != None:
closeUnit()
u = sqrt(uA**2 + uB**2)
u._Num__q = self.__q
openUnit()
elif self.__uA != None:
u = uA
elif self.__instrument != None:
u = uB
return u
def unc(self, process=False, needValue=False, remainOneMoreDigit=False):
'''获得不确定度
【参数说明】
1.process(可选,bool):是否展示计算过程。默认proces=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
3.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=False。
【返回值】
Num:标准不确定度数值。'''
if process:
oneMoreDigit = remainOneMoreDigit or (self.__uA != None and self.__instrument != None)
if self.__uA != None: #判断是否需要计算A类不确定度
if self.__uA == 'single':
if BaseMeasure.AMethod == 'auto':
if len(self.__data) > 9: #样本数量最大的组为9以上时,使用Bessel法
uA = ACategory.Bessel(self.__data, True, True, oneMoreDigit)
else:
uA = ACategory.Range(self.__data, True, True, oneMoreDigit)
elif BaseMeasure.AMethod == 'Bessel':
uA = ACategory.Bessel(self.__data, True, True, oneMoreDigit)
elif BaseMeasure.AMethod == 'Range':
uA = ACategory.Range(self.__data, True, True, oneMoreDigit)
elif BaseMeasure.AMethod == 'CollegePhysics':
uA = ACategory.CollegePhysics(self.__data, True, True, oneMoreDigit)
elif self.__uA == 'comb':
uA = ACategory.CombSamples(self.__data, BaseMeasure.AMethod, True, True, self.__sym, oneMoreDigit)
if self.__instrument != None: #判断是否需要计算B类不确定度
uB = BCategory.b(self.__instrument, self.__sym, process, True, oneMoreDigit)
latex = LaTeX()
if self.__uA != None:
latex.add(uA[1])
uA = uA[0]
if self.__instrument != None:
latex.add(uB[1])
uB = uB[0]
if self.__uA != None and self.__instrument != None:
closeUnit()
u = sqrt(uA**2 + uB**2)
u._Num__q = self.__q
openUnit()
if not remainOneMoreDigit:
u.cutOneDigit()
latex.add(r'u_{%s}=\sqrt{{u_{%s A}}^{2}+{u_{%s B}}^{2}}=\sqrt{%s^{2}+%s^{2}}=%s' % (self.__sym, self.__sym, self.__sym, uA.dlatex(1), uB.dlatex(1), u.latex()))
elif self.__uA != None:
u = uA
if not remainOneMoreDigit:
u.cutOneDigit()
latex.add(r'u_{%s}=u_{%s A}=%s' % (self.__sym, self.__sym, u.latex()))
elif self.__instrument != None:
u = uB
if not remainOneMoreDigit:
u.cutOneDigit()
latex.add(r'u_{%s}=u_{%s B}=%s' % (self.__sym, self.__sym, u.latex()))
if self._Measure__K != None:
P = measure.KTable[self._Measure__K]
u = u * self._Measure__K
if not remainOneMoreDigit:
u.cutOneDigit()
latex.add(r'u_{%s,%s}=%g u_{%s}=%s' % (self.__sym, P[1], self._Measure__K, self.__sym, u.latex()))
if needValue:
return u, latex
else:
return latex
else:
uc = self.__staUnc
if self._Measure__K != None:
uc = uc * self._Measure__K
if remainOneMoreDigit:
uc.remainOneMoreDigit()
return uc
def uncLSym(self):
'''获得不确定度公式的LaTeX符号
【返回值】
LSym:不确定度公式的LaTeX符号。'''
assert measure.Measure.process, 'Measure.process为False时,无法获取LSym'
return LSym('u_{%s}' % self.__sym, self.unc())
def __str__(self):
'''获得测量值和不确定度的字符串形式
【返回值】
str:(测量值±不确定度),如已给出单位,会附加单位'''
unitExpr = format_units_unicode(self.__q)
val = self.value()
u = self.unc()
sciDigit = val._Num__sciDigit()
if self.useRelUnc:
ur = u / val
ur.setIsRelative(True)
expr = r'%s(1±%s)%s' % (val.strNoUnit(), ur, unitExpr)
else:
if sciDigit == 0:
u._Num__setDigit(val._Num__d_front, val._Num__d_behind, val._Num__d_valid)
while float(u.strNoUnit()) == 0:
u.remainOneMoreDigit()
expr = r'%s±%s' % (val.strNoUnit(), u.strNoUnit())
if unitExpr != '':
expr = '(%s)%s' % (expr, unitExpr)
else:
val *= 10**(-sciDigit)
u *= 10**(-sciDigit)
u._Num__setDigit(val._Num__d_front, val._Num__d_behind, val._Num__d_valid)
while float(u.strNoUnit()) == 0:
u.remainOneMoreDigit()
expr = r'(%s±%s)×10%s%s' % (val.strNoUnit(), u.strNoUnit(), usub(sciDigit), unitExpr)
return expr
def __repr__(self):
'''获得测量值和不确定度的字符串形式
【返回值】
str:(测量值±不确定度),如已给出单位,会附加单位'''
return self.__str__()
<<<<<<< HEAD
unitExpr = format_units_latex(self.__q)
=======
>>>>>>> fcd8aeb38c983d9242fa950ef4c982492c7b950e
val = self.value()
u = self.unc()
sciDigit = val._Num__sciDigit()
if self.useRelUnc:
ur = u / val
ur.setIsRelative(True)
expr = r'%s\left(1 \pm %s\right)%s' % (val.strNoUnit(), ur.dlatex(), unitExpr)
else:
<<<<<<< HEAD
if sciDigit == 0:
u._Num__setDigit(val._Num__d_front, val._Num__d_behind, val._Num__d_valid)
while float(u.strNoUnit()) == 0:
u.remainOneMoreDigit()
expr = r'\left(%s \pm %s\right)%s' % (val.strNoUnit(), u.strNoUnit(), unitExpr)
else:
val *= 10**(-sciDigit)
u *= 10**(-sciDigit)
u._Num__setDigit(val._Num__d_front, val._Num__d_behind, val._Num__d_valid)
while float(u.strNoUnit()) == 0:
u.remainOneMoreDigit()
expr = r'\left(%s \pm %s\right)\times 10^{%d}%s' % (val.strNoUnit(), u.strNoUnit(), sciDigit, unitExpr)
return expr
=======
val *= 10**(-sciDigit)
u *= 10**(-sciDigit)
u._Num__setDigit(val._Num__d_front, val._Num__d_behind, val._Num__d_valid)
while float(u.strNoUnit()) == 0:
u.remainOneMoreDigit()
expr = r'\left(%s \pm %s\right)\times 10^{%d}%s' % (val.strNoUnit(), u.strNoUnit(), sciDigit, unitExpr)
return expr
def _repr_latex_(self):
return r'$\begin{align}%s\end{align}$' % self.latex()
>>>>>>> fcd8aeb38c983d9242fa950ef4c982492c7b950e
| [
171,
119,
123,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
3158,
220,
718,
1160,
25,
3023,
25,
1433,
2864,
198,
198,
31,
9800,
25,
2124,
278,
81,
506,
13670,
198,
37811,
198,
198,
... | 1.438818 | 13,803 |
# Author: Fabio Rodrigues Pereira
# E-mail: fabior@uio.no
# Author: Per Morten Halvorsen
# E-mail: pmhalvor@uio.no
# Author: Eivind Grønlie Guren
# E-mail: eivindgg@ifi.uio.no
try:
from exam.utils.preprocessing import OurDataset, pad_b
from exam.utils.models import Transformer, TransformerMTL
except:
from utils.preprocessing import OurDataset, pad_b
from utils.models import Transformer, TransformerMTL
from torch.utils.data import DataLoader
import torch
NORBERT = "/cluster/shared/nlpl/data/vectors/latest/216"
# NORBERT = 'exam/saga/216'
train_file = '/cluster/projects/nn9851k/IN5550/fabior/train.conll'
dev_file = '/cluster/projects/nn9851k/IN5550/fabior/dev.conll'
test_file = '/cluster/projects/nn9851k/IN5550/fabior/test.conll'
# train_file = 'exam/data/train.conll'
# dev_file = 'exam/data/dev.conll'
# test_file = 'exam/data/test.conll'
train_dataset = OurDataset(
data_file=train_file,
specify_y='POLARITY',
NORBERT_path=NORBERT,
tokenizer=None
)
# x_ds, y_ds, att_ds = next(iter(train_dataset))
# sentence_tk_ds = train_dataset.tokenizer.convert_ids_to_tokens(x_ds)
# sentence_ds = train_dataset.tokenizer.decode(x_ds)
dev_dataset = OurDataset(
data_file=dev_file,
specify_y='POLARITY',
NORBERT_path=None,
tokenizer=train_dataset.tokenizer
)
test_dataset = OurDataset(
data_file=test_file,
specify_y='POLARITY',
NORBERT_path=None,
tokenizer=train_dataset.tokenizer
)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=32,
shuffle=True,
collate_fn=lambda batch: pad_b(batch=batch,
IGNORE_ID=train_dataset.IGNORE_ID)
)
# x, y, att = next(iter(train_loader))
dev_loader = DataLoader(
dataset=dev_dataset,
batch_size=1,
shuffle=True,
collate_fn=lambda batch: pad_b(batch=batch,
IGNORE_ID=train_dataset.IGNORE_ID)
)
# x1, y1, att1 = next(iter(dev_loader))
test_loader = DataLoader(
dataset=test_dataset,
batch_size=1,
shuffle=True,
collate_fn=lambda batch: pad_b(batch=batch,
IGNORE_ID=train_dataset.IGNORE_ID)
)
# x2, y2, att2 = next(iter(test_loader))
model_polarity = Transformer(
NORBERT=NORBERT,
tokenizer=train_dataset.tokenizer,
num_labels=3,
IGNORE_ID=train_dataset.IGNORE_ID,
device="cuda" if torch.cuda.is_available() else "cpu",
epochs=10, # best is 6
lr_scheduler=False,
factor=0.1,
lrs_patience=2,
loss_funct='cross-entropy',
random_state=1,
verbose=True,
lr=0.00001,
momentum=0.9,
epoch_patience=1,
label_indexer=None,
optmizer='AdamW'
)
model_polarity.fit(
train_loader=train_loader,
verbose=True,
dev_loader=dev_loader,
need_pipeline=torch.load("/cluster/projects/nn9851k/IN5550/fabior/"
"transformer_bio.pt")
)
binary_f1, propor_f1 = model_polarity.evaluate(test_loader)
# torch.save(model_polarity, "exam/transformer_polarity.pt")
| [
2,
6434,
25,
14236,
952,
16114,
947,
17229,
8704,
198,
2,
412,
12,
4529,
25,
7843,
1504,
31,
84,
952,
13,
3919,
198,
198,
2,
6434,
25,
2448,
10788,
268,
11023,
85,
669,
268,
198,
2,
412,
12,
4529,
25,
9114,
14201,
20867,
31,
84,... | 2.137199 | 1,414 |
import sys
from greenery.lego import lego, parse
from greenery.fsm import FSM
regexes = list(sys.argv[1:])
if len(regexes) == 0:
from ast import literal_eval
while True:
reg = input("regex|")
if not reg:
break
if reg[0] in '\'"' or reg[:2] in ('r"',"r'"):
reg = literal_eval(reg)
regexes.append(reg)
if len(regexes) < 2:
print("Please supply several regexes to compute their intersection, union and concatenation.")
print("E.g. \"19.*\" \"\\d{4}-\\d{2}-\\d{2}\"")
else:
regexes = [parse(regex) for regex in regexes]
fsms = [regex.to_fsm() for regex in regexes]
print(f"Have Intersection: {not FSM.intersection(*fsms).empty()}")
print("Intersection: %s" % (lego.intersection(*regexes).reduce()))
print("Union: %s" % (lego.union(*regexes).reduce()))
print("Concatenation: %s" % (lego.concatenate(*regexes).reduce()))
| [
11748,
25064,
198,
6738,
10536,
24156,
13,
1455,
78,
1330,
1232,
78,
11,
21136,
198,
6738,
10536,
24156,
13,
69,
5796,
1330,
376,
12310,
198,
198,
260,
25636,
274,
796,
1351,
7,
17597,
13,
853,
85,
58,
16,
25,
12962,
198,
198,
361,
... | 2.237288 | 413 |
# model
model = dict(
type='TwoStageDetectorDA', # use adaptive model
# pretrained='open-mmlab://detectron2/resnet50_caffe', # only for backbone, overwritten by `load_from=...`
backbone=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
rpn_head=dict(type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHeadAdaptive', # special head that returns more information
bbox_roi_extractor=dict(type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Split2FCBBoxHeadAdaptive',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1, # for pedestrian detection
bbox_coder=dict(type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
train_cfg=dict(train_source=False,
da=[
dict(type='gpa',
feat='feat_roi',
loss_weights=dict(intra=10.0, inter=0.1),
mode='prediction',
lambd=1),
dict(type='gpa',
feat='feat_rcnn_cls',
loss_weights=dict(intra=100.0, inter=0.1),
mode='prediction',
lambd=1),
],
rpn=dict(assigner=dict(type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(assigner=dict(type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False)),
test_cfg=dict(rpn=dict(nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0),
rcnn=dict(score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
# data
dataset_type = 'CocoDataset'
data_root_src = 'data/PIROPO/'
data_root_tgt = 'data/MW-18Mar/'
classes = ('person', )
img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline_src = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_pipeline_tgt = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train_src=dict( # source domain training set
type=dataset_type,
ann_file=data_root_src + 'omni_training.json',
img_prefix=data_root_src,
pipeline=train_pipeline_src,
classes=classes),
train_tgt=dict( # target domain training set
type=dataset_type,
ann_file=data_root_tgt + 'training.json',
img_prefix=data_root_tgt,
pipeline=train_pipeline_tgt,
classes=classes),
val=dict( # validation set from target domain
type=dataset_type,
ann_file=data_root_tgt + 'test.json',
img_prefix=data_root_tgt,
pipeline=test_pipeline,
classes=classes),
test=dict( # test set from target domain
type=dataset_type,
ann_file=data_root_tgt + 'test.json',
img_prefix=data_root_tgt,
pipeline=test_pipeline,
classes=classes))
# training and optimizer
# fine-tuning: smaller lr, freeze FPN (neck), freeze RPN
optimizer = dict(
type='SGD',
lr=0.001,
momentum=0.9,
weight_decay=0.0001,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='fixed')
# change to iteration based runner with 1776*x iterations for training on PIROPO
runner = dict(type='EpochBasedRunnerAdaptive', max_epochs=40) # use adaptive runner that loads 2 datasets
checkpoint_config = dict(interval=200) # i.e. no additional checkpoints
evaluation = dict(interval=1, save_best='bbox_mAP_50', metric='bbox')
log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
# load_from = 'mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth'
load_from = 'mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227_split.pth'
resume_from = None
workflow = [('train', 1)]
work_dir = 'work_dirs/da'
| [
2,
2746,
198,
19849,
796,
8633,
7,
198,
220,
220,
220,
2099,
11639,
7571,
29391,
11242,
9250,
5631,
3256,
220,
1303,
779,
29605,
2746,
198,
220,
220,
220,
1303,
2181,
13363,
11639,
9654,
12,
3020,
23912,
1378,
15255,
478,
1313,
17,
14... | 1.717172 | 4,851 |
import json
import os.path as osp
import numpy as np
from torch.utils.data import Dataset
| [
11748,
33918,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
628
] | 3.172414 | 29 |
from django.utils import timezone
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from inventory.devices.models import Ipad, Adapter, Headphones, Case
class DeviceForm(forms.Form):
'''Form for creating a new device.
'''
DEVICE_TYPES = (
('ipad', 'iPad'),
('headphones', 'Headphones'),
('adapter', 'Power adapter'),
('case', 'iPad case'),
)
device_type = forms.ChoiceField(label='Device type: ', widget=forms.Select,
choices=DEVICE_TYPES,
required=True)
description = forms.CharField(label='Description: ', widget=forms.Textarea,
max_length=1000, help_text='Optional',
required=False)
responsible_party = forms.CharField(label='Responsible party: ',
help_text='Optional',
required=False)
make = forms.CharField(label='Make: ')
purchased_at = forms.DateField(label="Purchased at: ",
initial=timezone.now(),
required=False, help_text='Optional')
serial_number = forms.CharField(label="Serial number: ",
required=False, help_text='Optional')
class CheckinForm(forms.Form):
'''Form for checking in a device.
'''
CONDITIONS = (
('excellent', 'Excellent'),
('scratched', 'Scratched'),
('broken', 'Broken'),
('missing', 'Missing')
)
condition = forms.ChoiceField(label='Condition: ', widget=forms.Select,
choices=CONDITIONS,
required=True)
comment = forms.CharField(label="Comment: ", widget=forms.Textarea,
max_length=500,
required=False)
class DeviceUpdateForm(forms.ModelForm):
'''Form for updating a device's attributes.
'''
| [
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42807,
62,
23914,
13,
2978,
525,
1330,
5178,
47429,
198,
6738,
42807,
62,
23914,
13,
39786,
1330,
1635,
198,
6738,
13184,
13,
42034,
13,
27... | 1.910806 | 1,166 |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend_impl import *
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
#if 0 // TODO(spanev): figure out which return_value_policy to choose
#def test_tensorlist_getitem_slice():
# arr = np.random.rand(3, 5, 6)
# tensorlist = TensorListCPU(arr, "NHWC")
# two_first_tensors = tensorlist[0:2]
# assert(type(two_first_tensors) == tuple)
# assert(type(two_first_tensors[0]) == TensorCPU)
| [
2,
15069,
357,
66,
8,
13130,
11,
15127,
23929,
44680,
6234,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
284... | 3.129794 | 339 |
# -*- coding: UTF-8 -*-
"""
Classes to manage the VolumeNumbers, PageNumbers, and PageRanges, as would be found in books and journals.
Contains classes:
VolumeNumber - Class to encapsulate journal and book volume numbers, as their own data type.
PageNumber - Class to encapsulate pagenumbers for journal and book as their own type, as they have special non-numeric properties!
PageRange - Class to represent a range of pages, e.g., 1-200.
This module adapted from a much older module used in PEPXML to compile PEP instances since 200x!
** Slowly being adopted to opas **
"""
#TODO: At some point these could be separated into their own modules.
#MODIFIED:
# 2009-07-17 - Major cleanup of docstrings and added doctests
# 2022-06-09 - Adapted and converted from Python 2.x for Opas, some issues in converting all
# the numbers in doctests, but
# these limitations should not affect usage here.
#
import sys, os.path
import logging
logger = logging.getLogger(__name__)
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
# import six
import re
import opasGenSupportLib as opasgenlib
# import sciHLMath
# import sciSupport
LOCALID = "LocalID" # need this here because it is the default for a argument
# and you can't use the local class definition,
def my_cmp(a, b):
"""
Python 3 replacement for Python 2 builtin cmp
"""
ret_val = False
if a is not None and b is not None:
try:
ret_val = (a > b) - (a < b)
except ValueError as e:
print (f"ValueError: {e}")
except TypeError as e:
print (f"TypeError: {e}")
else:
ret_val = True
return ret_val
def split_page_range(str_pgrg):
"""
>>> split_page_range("120-330")
('120', '330', '120-330')
>>> split_page_range("10-33")
('10', '33', '10-33')
>>> split_page_range("10")
('10', '10', '10')
"""
patPgRg = "(?P<pgrg>(?P<pgstart>[1-9][0-9]*)(\-(?P<pgend>[1-9][0-9]*))?)"
m = re.search(patPgRg, str_pgrg)
if m != None:
bpgrg = m.group("pgrg")
bpgstart = m.group("pgstart")
bpgend = m.group("pgend")
if opasgenlib.is_empty(bpgend):
bpgend = bpgstart
return (bpgstart, bpgend, bpgrg )
#----------------------------------------------------------------------------------------
# CLASS DEF: Vol
#----------------------------------------------------------------------------------------
class VolumeNumber:
"""
Class to encapsulate journal and book volume numbers, as their own data type.
A volume number can now be 1 to 4 digits! It can have a letter suffix (repeated), and a special "S" suffix (supplement)
@TODO: That's little too loose in the suffix area; should be tightened.
>>> VolumeNumber("IVS")
4S
>>> PageNumber("IVS")
IVS
>>> VolumeNumber("022A")
22A
>>> VolumeNumber("5,7")
5
>>> VolumeNumber("IV")
4
>>> VolumeNumber("IV (Suppl)")
4
>>> VolumeNumber("4S")
4S
>>> VolumeNumber("4AS")
4AS
>>> VolumeNumber("4ABCS")
4ABCS
>>> VolumeNumber("4SABCS")
4SABCS
>>> VolumeNumber(22)
22
>>> VolumeNumber('22S')
22S
>>> VolumeNumber("22S").volID(noLeadingZeros=True)
'22S'
>>> VolumeNumber("22Suppl")
22S
>>> VolumeNumber("022S").volID(noSuffix=True)
'022'
>>> VolumeNumber("022S").volID(noSuffix=True, noLeadingZeros=True)
'22'
>>> a = VolumeNumber("IV")
>>> VolumeNumber(a, volSuffix="G")
4G
>>> c = VolumeNumber("10", volSuffix="S")
>>> "Val %s" % c # when forced to string, it includes the suffix
'Val 10S'
>>> "Val %d" % c # note when forced to int, it drops the volSuffix!
'Val 10'
>>> str(VolumeNumber(c)) # this should show the "S" suffix
'10S'
>>> c.volID()
'010S'
"""
rgxSuppl = re.compile("sup(p|pl|ple|plemen(t?))\.?", re.IGNORECASE)
#--------------------------------------------------------------------------------
def __init__(self, volNum=0, volSuffix=None):
"""
Initialize pagenumber object
If forceRoman = true, the int pgNum is flagged to be interpreted as roman (i.e.,
isRoman will return true)
Note: Setting volSuffix to "" will remove the volume suffix if volNum is already a volumenumber
"""
if isVolumeNumber(volNum):
# if argument is already a volume number instance.
self.volPrefix = volNum.volPrefix
self.volNumber = volNum.volNumber
if volSuffix == None:
self.volSuffix = volNum.volSuffix
else:
self.volSuffix = volSuffix
self.volRomanLower = volNum.volRomanLower
self.volOriginalType = volNum.volOriginalType
else:
self.volNumber = ""
self.volPrefix = ""
self.volRomanLower = False # lowercase roman numerals
self.volOriginalType = "A"
if volSuffix == None:
self.volSuffix = volSuffix = ""
else:
self.volSuffix = volSuffix
if not opasgenlib.is_empty(volNum):
try:
if isinstance(volNum, str) and volNum[-1].upper() == "S":
volSuffix = "S"
volNum = volNum[:-1]
except Exception as e:
logger.error(f"Can't remove vol suffix {volNum} {e}")
if volNum == None or volNum=="":
logging.warning("Bad volNum. Set to 0")
volNum = 0
if isinstance(volNum, str): # supports string and unicode Was if type(volNum) == type(""):
volNum = re.split(r"[,/\-\s&]", volNum)
volNum = volNum[0]
volNum = opasgenlib.trimPunctAndSpaces(str(volNum), punct_set=[',', '.', ' ', ':', ';', '(', '\t', '"', "'", "-"])
if opasgenlib.isRoman(volNum):
self.volOriginalType = "R"
self.volNumber = opasgenlib.convRomanToArabic(volNum)
if volNum.islower():
self.volRomanLower = True
else:
prefix, self.volStr, self.volSuffixEmbedded = opasgenlib.removeLetterPrefixAndSuffix(volNum)
# if a suffix was supplied, use that as precedent. Embedded suffix is stored anyway
if self.volSuffix == "" and self.volSuffixEmbedded!=None:
self.volSuffix = self.volSuffixEmbedded
self.volNumber = opasgenlib.convertStringToArabic(self.volStr)
#print "Check: ", self.volNumber, type(self.volNumber), "Suffix: %s" % self.volSuffix
if self.volSuffix in ["-", "&"]:
self.volSuffix = ""
if self.volSuffix == None:
self.volSuffix = ""
# standardize the "Roman" prefix
if prefix.lower()=="r":
self.volOriginalType = prefix
raise ValueError("Volume Number Prefix Found: {prefix}")
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def __sub__(self, val2):
"""
Combine/concatenate number to current pagenumber.
"""
raise ValueError("Error: You cannot subtract two volume numbers!")
#--------------------------------------------------------------------------------
def __add__(self, val2):
"""
Combine/concatenate number to current pagenumber.
"""
raise ValueError("Error: You cannot add two volume numbers!")
#--------------------------------------------------------------------------------
def __eq__(self, other):
"""
Assess equality of two objects.
"""
retVal = (0 == self.__cmp__(other))
return retVal
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# CLASS DEF: PageNumber
#----------------------------------------------------------------------------------------
class PageNumber:
"""
Class to encapsulate pagenumbers for journal and book as their own type, as they have special non-numeric properties!
A volume number can now be 1 to 4 digits! It can have a letter suffix (repeated), and a special "S" suffix (supplement)
@TODO: That's little too loose in the suffix area; should be tightened.
>>> PageNumber("iv").isRoman()
True
>>> PageNumber("022A")
22A
>>> PageNumber("IV")
IV
>>> PageNumber("IVS")
0
>>> PageNumber("4S")
4S
>>> PageNumber("4AS")
4AS
>>> PageNumber("4ABCS")
4ABCS
>>> PageNumber("4SABCS")
4SABCS
>>> PageNumber("4").isRoman()
False
>>> PageNumber("4a").isRoman()
False
>>> PageNumber("iva").isRoman()
False
>>> PageNumber(3) > PageNumber(6)
False
>>> PageNumber(6) > PageNumber(3)
True
>>> PageNumber("3") > PageNumber(6)
False
>>> PageNumber("6") > PageNumber(3)
True
>>> PageNumber("R7") > PageNumber("R2")
True
>>> PageNumber("R2") > PageNumber("R7")
False
>>> PageNumber("R1") > PageNumber("R2")
False
>>> PageNumber("R7") > PageNumber("2")
False
>>> PageNumber("R7") < PageNumber("2")
True
>>> PageNumber("R13") < PageNumber("7")
True
>>> PageNumber("R3") < PageNumber("R1")
False
>>> PageNumber("R3a") < PageNumber("R1a")
False
>>> PageNumber("R13a") < PageNumber("7a")
True
>>> PageNumber("3") > PageNumber("R7")
True
>>> PageNumber("3a") > PageNumber("6b")
False
>>> PageNumber("3a") > PageNumber("6a")
False
>>> PageNumber("6a") > PageNumber("3a")
True
>>> PageNumber("6a") > PageNumber("3b")
True
>>> PageNumber("3b") > PageNumber("3a")
True
>>> PageNumber("3b") < PageNumber("3a")
False
>>> PageNumber("iii") > PageNumber("xxii")
False
>>> PageNumber("v") > PageNumber("ii")
True
>>> PageNumber("3") == PageNumber("3")
True
>>> PageNumber("3") == PageNumber("3f")
False
"""
LOCALID = LOCALID # format keyword option (defined as a module constant because
# it's used as the default for a argument.
FORCEINT = "ForceInt" # Use this to force the page number to an integer, EVEN if it's
# roman. Roman page numbers will be changed to negatives.
# This allows the number to be stored in a database where the field
# is integer.
#--------------------------------------------------------------------------------
def __init__(self, pgNum=0, forceRoman=None):
"""
Initialize the pagenumber object, optionally setting the value to pgNum.
If forceRoman = true, the int pgNum is flagged to be interpreted as roman (i.e.,
isRoman will return true)
"""
self.pgSuffix = ""
self.pgNumberText = repr(pgNum)
self.pgPrefix = ""
self.pgRomanLower = False # lowercase roman numerals
self.internalNotation = False
self.pgNumber = 0
self.pgNumberRoman = False
pgSuffix = ""
if isinstance(pgNum, PageNumber):
pgNum = pgNum.forceInt()
if not opasgenlib.is_empty(pgNum):
try:
if isinstance(pgNum, str) and pgNum[-1].upper() == "S":
pgSuffix = "S"
pgNum = pgNum[:-1]
except Exception as e:
logger.error(f"Can't remove vol suffix {pgNum} {e}")
try:
if opasgenlib.isRoman(pgNum):
self.pgNumberRoman = True
self.pgPrefix = "R"
self.pgNumber = opasgenlib.convRomanToArabic(pgNum)
self.pgSuffix = pgSuffix
if str(pgNum).islower():
self.pgRomanLower = True
else:
# put any removed pgSuffix back, it wasn't roman
if pgSuffix != "":
pgNum += pgSuffix
self.pgPrefix, self.pgStr, self.pgSuffix = opasgenlib.removeLetterPrefixAndSuffix(pgNum)
self.pgNumber = opasgenlib.convertStringToArabic(self.pgStr)
#try:
#self.pgNumber = int(self.pgStr)
#except:
#logger.error(f"Argument value '{self.pgStr}' does not represent a known number")
#self.pgNumber = 0
# standardize the "Roman" prefix
if self.pgPrefix=="r" or self.pgPrefix=="-":
if self.pgNumber!=0:
self.pgPrefix = "R"
if self.pgNumber<0:
print("Negative!")
self.pgNumber = -self.pgNumber
if self.pgPrefix == "P" or self.pgPrefix == "p":
self.pgPrefix = ""
if self.pgPrefix == "PR":
self.pgPrefix = "R"
except ValueError as e:
print (f"ValueError: {e}")
if forceRoman:
self.pgPrefix = "R"
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def isRoman(self):
"""
Return true if the current value is Roman
>>> pgNum = PageNumber("iv")
>>> pgNum.isRoman()
True
>>> pgNum = PageNumber("3")
>>> pgNum.isRoman()
False
"""
retVal = (True == (self.pgPrefix == "R"))
return retVal
#--------------------------------------------------------------------------------
def forceInt(self):
"""
Return an integer page number; if roman, return it as negative
>>> pgNum = PageNumber("iv")
>>> pgNum.forceInt()
-4
>>> pgNum = PageNumber("2")
>>> pgNum.forceInt()
2
"""
pgNumber = self.pgNumber
if pgNumber == None:
retVal = 0
else:
if self.pgPrefix == "R":
# return int
retVal = -pgNumber
else:
retVal = pgNumber
return retVal
#--------------------------------------------------------------------------------
def format(self, keyword=LOCALID, formatStr=None):
"""
Format the page number as a localID (default) per the formatStr argument.
Args:
keyword = Only working value is LOCALID
formatStr = Regular python format string, can be used to change significant digits.
>>> pgNum = PageNumber("iv")
>>> print (pgNum.format(keyword=pgNum.LOCALID))
PR0004
>>> print (pgNum.format())
PR0004
>>> pgNum = PageNumber("2")
>>> print (pgNum.format(formatStr="%03d"))
P002
"""
if opasgenlib.is_empty(formatStr):
formatStr="%04d"
#if self.pgPrefix == "P":
# raise "Trap 2"
if keyword == LOCALID:
#print "Formatting Page Ref: %s" % self.pgNumber, self.pgPrefix
retVal = "P" + self.pgPrefix + (formatStr % int(self.pgNumber))
else:
retVal = self.pgPrefix + (formatStr % int(self.pgNumber))
#print "Returning: %s (prefix: %s, pgnum: %s)" % (retVal, self.pgPrefix, self.pgNumber)
return retVal
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def __add__(self, val2):
"""
Combine/concatenate number to current pagenumber.
"""
self.pgNumber = self.pgNumber + int(val2)
return self
# #--------------------------------------------------------------------------------
# def __eq__(self, other):
# """
# Assess equality of two objects.
# """
# retVal = (0 == self.__cmp__(other))
# return retVal
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def __sub__(self, val2):
"""
Combine/concatenate number to current pagenumber.
"""
self.pgNumber = self.pgNumber - int(val2)
return self
#--------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# CLASS DEF: PageRange
#
# A range of pagenumbers
#----------------------------------------------------------------------------------------
class PageRange:
"""
Class to represent a range of pages, e.g., 1-200.
The start of the range is given by pgRg.pgStart, and the end by pgRg.pgEnd.
These two page numbers are instances of the PageNumber object.
A volume number can now be 1 to 4 digits! It can have a letter suffix (repeated), and a special "S" suffix (supplement)
>>> (PageRange("5") > PageRange("12-14"))
False
>>> (PageRange("11") > PageRange("12-14"))
False
>>> (PageRange("15") > PageRange("12-14"))
True
>>> (PageRange(15) > PageRange("12-14"))
True
>>> (PageRange("14") > PageRange("12-14"))
False
>>> (PageRange("15") < PageRange("12-14"))
False
>>> (PageRange("11") < PageRange("12-14"))
True
>>> (PageRange("R14") < PageRange("12-14"))
True
>>> (PageRange("12") < PageRange("12-14"))
False
>>> (PageRange("14") < PageRange("12-14"))
False
>>> (PageRange("14") == PageRange("12-14"))
False
>>> (PageRange("13") == PageRange("12-14"))
False
>>> (PageRange("12") == PageRange("12-14"))
False
>>> PageRange("12-14").exactlyEqual(PageRange("12-14"))
True
>>> PageRange("12-13").exactlyEqual(PageRange("12-14"))
False
>>> PageRange("11-14").exactlyEqual(PageRange("12-14"))
False
>>> PageRange("11-12").exactlyEqual(PageRange("12-14"))
False
>>> PageRange("10-11").exactlyEqual(PageRange("12-14"))
False
>>> PageRange("12-14").exactlyEqual(PageRange("12-14"))
True
>>> PageRange("12-14").exactlyEqual(PageRange(10))
False
>>> PageRange("12-14").exactlyEqual(PageRange("10-11"))
False
>>> PageRange("12-14").contains(PageRange("13a"))
True
>>> PageRange("12-14").contains(PageRange("13"))
True
>>> PageRange("12-14").contains(PageRange("12"))
True
>>> PageRange("12-14").contains(PageRange("13"))
True
>>> PageRange("12-14").contains(PageRange("14"))
True
>>> PageRange("12-14").contains(PageRange("13c"))
True
>>> PageRange("12-14").contains(PageRange("11"))
False
>>> PageRange("12-14").contains(12)
True
>>> PageRange("12-14").contains("12")
True
>>> PageRange("12-14").contains(PageNumber("12"))
True
>>> PageRange("10-20").validate()
True
>>> PageRange("10a-20").validate()
True
>>> PageRange("10-20b").validate()
True
>>> PageRange("iii-iv").validate()
True
>>> PageRange("iii-20").validate()
True
>>> PageRange("5-2").validate()
False
>>> PageRange("5-iii").validate()
False
>>> PageRange("iii-20").validate(isRomanStart=False)
False
>>> PageRange("iii-20").validate(isRomanStart=True)
True
>>> PageRange("iii-20").validate()
True
>>> PageRange("iii").validate()
True
"""
#--------------------------------------------------------------------------------
def __init__(self, pgRg="", sourceLabel=""):
"""
Initialize the PageRange object, optionally setting the value to the supplied parameter pgrg.
"""
if type(pgRg) == type(0):
pgRgWork = str(pgRg)
elif isinstance(pgRg, PageNumber):
pgRgWork = repr(pgRg)
#print "PGTYPE: ", type(pgRg)
else:
pgRgWork = pgRg
if pgRgWork != None:
pgrgList = pgRgWork.split("-")
pgStart = pgrgList[0]
if len(pgrgList) > 1:
pgEnd = pgrgList[1]
else:
pgEnd = pgrgList[0]
self.pgStart = PageNumber(pgStart)
if pgEnd =="" or pgEnd==None:
self.pgEnd = PageNumber(pgStart)
else:
self.pgEnd = PageNumber(pgEnd)
#print "Initialize Range: %s (%s) to %s (%s) = %s" % (self.pgStart, type(self.pgStart), self.pgEnd, type(self.pgEnd), self.pgStart==self.pgEnd)
#print self.pgStart, self.pgEnd
#--------------------------------------------------------------------------------
def __repr__(self):
"""
Return the page range as a displayable string.
"""
return self.__str__()
#--------------------------------------------------------------------------------
def __add__(self, val2):
"""
Combine/concatenate number to current pagenumber.
"""
raise NotImplementedError("Add is not supported for PageRanges.")
#--------------------------------------------------------------------------------
def __eq__(self, other):
"""
Assess equality of two objects.
"""
retVal = (0 == self.__cmp__(other))
return retVal
#--------------------------------------------------------------------------------
def __str__(self):
"""
Return the page range as a displayable string.
"""
if self.pgStart != self.pgEnd:
start = self.pgStart
# don't let a range start with 0
if start==0:
retVal = "1-%s" % (self.pgEnd)
else:
retVal = "%s-%s" % (start, self.pgEnd)
else:
retVal = "%s" % (self.pgStart)
return retVal
#--------------------------------------------------------------------------------
def validate(self, isRomanStart=None, isRomanEnd=None):
"""
Validate the page range. Return True if valid.
"""
retVal = False
if (self.pgStart <= self.pgEnd):
retVal = True
if isRomanStart==False and self.pgStart.isRoman():
retVal = False
if isRomanEnd==False and self.pgEnd.isRoman():
retVal = False
return retVal
#--------------------------------------------------------------------------------
def exactlyEqual(self, other):
"""
Compare two page ranges. If either is contained within the other, returns true
"""
retVal = False
if (self.pgStart == other.pgStart and self.pgEnd == other.pgEnd):
retVal = True
return retVal
#--------------------------------------------------------------------------------
def eitherContains(self, other):
"""
Compare two page ranges. If either is contained within the other, returns true
"""
retVal = False
if (self.pgStart >= other.pgStart and self.pgEnd <= other.pgEnd) \
or (other.pgStart >= self.pgStart and other.pgEnd <= self.pgEnd):
retVal = True
return retVal
#--------------------------------------------------------------------------------
def contains(self, other):
"""
Compare two page ranges. If other is contained in self, returns true
"""
retVal = False
if isinstance(other, PageNumber):
#print "PageNumber!"
if (self.pgStart <= other.pgNumber and self.pgEnd >= other.pgNumber):
retVal = True
elif not isinstance(other, PageRange):
#print other, type(other)
try:
otherNum = PageNumber(other)
except:
raise Exception("Can't compare to {}".format(type(other)))
if (self.pgStart <= otherNum and self.pgEnd >= otherNum):
retVal = True
else:
if (self.pgStart <= other.pgStart and self.pgEnd >= other.pgEnd):
retVal = True
return retVal
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def __cmp__(self, other=None):
"""
Compare two page ranges.
IMPORTANT: if one of the ranges contains the other, the return is equal (0)
"""
retVal = 0
if other==None or not isinstance(other, PageRange):
retVal = 1
else:
#print "Other: ", other, type(other)
try:
if self.eitherContains(other):
#retVal = 0
#print "Warning: Contains"
pass
else:
try:
retVal = self.pgStart.__cmp__(other.pgStart)
except Exception as e:
# other may be different type ot none
raise ValueError("Error: Wrong Type(s) %s - %s (%s)" % (other, type(other), e))
except Exception as e:
print("PgRg Exception %s" % e)
#print "Returning: ", retVal
return retVal
class PubYear:
"""
Class to encapsulate year for reference processing. Helps to clean up any irregularities.
A year must be 4 digits! If you have 2, it will assume the defaultCentury and add it.
Any characters following the year are stripped out.
@TODO: Perhaps detect range and store the second part of the range.
>>> print (PubYear("1922"))
1922
>>> print (PubYear("1922-25"))
1922
>>> print (PubYear("22-25"))
1922
>>> print (PubYear("25"))
1925
>>> print (PubYear("2014") + 1)
2015
"""
rgxSuppl = re.compile("sup(p|pl|ple|plemen(t?))\.?", re.IGNORECASE)
rgxYear = re.compile("((?P<baseYear>[1-9][0-9]([0-9][0-9])?)(?P<suffix>.*))", re.IGNORECASE)
#--------------------------------------------------------------------------------
def __init__(self, pubYear=0, defaultCentury="19"):
"""
Initialize year object
"""
self.yearValue = ""
self.yearSuffix = ""
if isinstance(pubYear, PubYear): # use callers object
self = pubYear
elif isinstance(pubYear, str): # supports string and unicode Was if type(volNum) == type(""):
m = self.rgxYear.match(pubYear)
if m != None:
self.yearValue = m.group("baseYear")
self.yearSuffix = m.group("suffix")
if len(self.yearValue) == 2:
# they left off the century
self.yearValue = "%s%s" % (defaultCentury, self.yearValue)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def __sub__(self, val2):
"""
Subtract from year
"""
val1 = self.yearValue
try:
retVal = int(val1) - int(val2)
except Exception as e:
print("Error: ", e)
return retVal
#--------------------------------------------------------------------------------
def __add__(self, val2):
"""
Add to year
"""
val1 = self.yearValue
try:
retVal = int(val1) + int(val2)
except Exception as e:
print("Error: ", e)
return retVal
#--------------------------------------------------------------------------------
def __eq__(self, other):
"""
Assess equality of two objects.
"""
retVal = (0 == self.__cmp__(other))
return retVal
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def isVolumeNumber(val):
"""
Return true if this is a volume number instance
"""
if isinstance(val, VolumeNumber):
retVal = True
else:
retVal = False
return retVal
#--------------------------------------------------------------------------------
def unforceInt(pgNumber):
"""
If integer page number is negative, return a roman
"""
if pgNumber < 0:
pgNumber = -pgNumber
retVal = PageNumber(pgNumber, forceRoman=True)
else:
retVal = PageNumber(pgNumber)
return retVal
#==================================================================================================
# Main Routines
#==================================================================================================
if __name__ == "__main__":
import sys
print ("Running in Python %s" % sys.version_info[0])
if 1:
import doctest
doctest.testmod()
print ("Done with tests.")
sys.exit(0)
else:
print (PageNumber("v") < PageNumber("iii"))
print (PageNumber("v") < 3)
print (PageNumber("iii") < PageNumber("v"))
print (PageNumber("v") < PageNumber("2"))
print (PageNumber("v") < PageNumber(2))
print (PageNumber("v") > PageNumber(2))
print (PageNumber(2) > PageNumber("v"))
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
9487,
274,
284,
6687,
262,
14701,
49601,
11,
7873,
49601,
11,
290,
7873,
49,
6231,
11,
355,
561,
307,
1043,
287,
3835,
290,
22790,
13,
198,
198,
4264,
12... | 2.514783 | 12,514 |
from quicksort import quicksort
def test_quicksort():
"""Test quicksort algorithm with odd-numbered list."""
list = [13, 28, 5]
assert quicksort(list, 0, len(list) - 1) == [5, 13, 28]
def test_quicksort_even():
"""Test quicksort algorithm with even-numbered list."""
list = [13, 28, 5, 40]
assert quicksort(list, 0, len(list) - 1) == [5, 13, 28, 40]
def test_quicksort_small():
"""Test quicksort with two-element list."""
list = [3, 2]
assert quicksort(list, 0, len(list) - 1) == [2, 3]
| [
6738,
627,
3378,
419,
1330,
627,
3378,
419,
628,
198,
4299,
1332,
62,
421,
3378,
419,
33529,
198,
220,
220,
220,
37227,
14402,
627,
3378,
419,
11862,
351,
5629,
12,
35565,
1351,
526,
15931,
198,
220,
220,
220,
1351,
796,
685,
1485,
... | 2.483568 | 213 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.2.1, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional
import msrest.serialization
class AccessAssignment(msrest.serialization.Model):
"""AccessAssignment.
:param action_ids:
:type action_ids: list[str]
:param attribute_conditions:
:type attribute_conditions: list[~oscp.controlplane.models.AttributeCondition]
"""
_attribute_map = {
'action_ids': {'key': 'actionIds', 'type': '[str]'},
'attribute_conditions': {'key': 'attributeConditions', 'type': '[AttributeCondition]'},
}
class AttributeCondition(msrest.serialization.Model):
"""AttributeCondition.
:param attribute_name:
:type attribute_name: str
:param values:
:type values: list[str]
:param attribute_matching_scheme:
:type attribute_matching_scheme: str
"""
_attribute_map = {
'attribute_name': {'key': 'attributeName', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'attribute_matching_scheme': {'key': 'attributeMatchingScheme', 'type': 'str'},
}
class ResourceOfShare(msrest.serialization.Model):
"""ResourceOfShare.
:param id:
:type id: str
:param name:
:type name: str
:param type:
:type type: str
:param location:
:type location: str
:param e_tag:
:type e_tag: str
:param tags: A set of tags. The insensitive version of dictionary.
:type tags: dict[str, str]
:param properties:
:type properties: ~oscp.controlplane.models.Share
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'Share'},
}
class ResourceOfWorkspace(msrest.serialization.Model):
"""ResourceOfWorkspace.
:param id:
:type id: str
:param name:
:type name: str
:param type:
:type type: str
:param location:
:type location: str
:param e_tag:
:type e_tag: str
:param tags: A set of tags. The insensitive version of dictionary.
:type tags: dict[str, str]
:param properties:
:type properties: ~oscp.controlplane.models.Workspace
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'Workspace'},
}
class Share(msrest.serialization.Model):
"""Share.
All required parameters must be populated in order to send to Azure.
:param share_id: Gets or sets shareId.
:type share_id: str
:param share_name: Gets or sets the share Name.
:type share_name: str
:param description: Gets or sets the share description.
:type description: str
:param source_tenant_id:
:type source_tenant_id: str
:param target_tenant_id: Required. Gets or sets the Target TenantId.
:type target_tenant_id: str
:param share_state: Gets or sets the state.
:type share_state: str
:param access_assignments: Gets or sets the accessAssignments.
:type access_assignments: list[~oscp.controlplane.models.AccessAssignment]
"""
_validation = {
'share_name': {'max_length': 512, 'min_length': 0},
'description': {'max_length': 512, 'min_length': 0},
'target_tenant_id': {'required': True, 'min_length': 1},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'share_name': {'key': 'shareName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'source_tenant_id': {'key': 'sourceTenantId', 'type': 'str'},
'target_tenant_id': {'key': 'targetTenantId', 'type': 'str'},
'share_state': {'key': 'shareState', 'type': 'str'},
'access_assignments': {'key': 'accessAssignments', 'type': '[AccessAssignment]'},
}
class Workspace(msrest.serialization.Model):
"""Workspace.
:param workspace_id: The workspace Id.
:type workspace_id: str
:param provisioning_state: The provisioning state.
:type provisioning_state: str
:param share_id: Gets or sets the Share Id for projected Resource.
:type share_id: str
:param workspace_url: The Workspace Url.
:type workspace_url: str
"""
_validation = {
'workspace_id': {'max_length': 128, 'min_length': 0},
}
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'share_id': {'key': 'shareId', 'type': 'str'},
'workspace_url': {'key': 'workspaceUrl', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
357,
2306,
26522,
25,
513,
13,
17,
13,
16,
11,
17301,
25,
1391,
8612,
1352,
30072,
198,
2,
19179,
743,
2728,
... | 2.549713 | 2,092 |
# -*- coding: utf-8 -*-
import re
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
302,
628,
198
] | 1.947368 | 19 |
import numpy as np
# local modules
from ray import Ray
from raytrace import raytrace
import utils
RGB_CHANNELS = 3
MAX_COLOR = 255
| [
11748,
299,
32152,
355,
45941,
198,
2,
1957,
13103,
198,
6738,
26842,
1330,
7760,
198,
6738,
26842,
40546,
1330,
26842,
40546,
198,
11748,
3384,
4487,
628,
198,
36982,
62,
3398,
22846,
37142,
796,
513,
198,
22921,
62,
46786,
796,
14280,
... | 3.268293 | 41 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Communicating Between Processes with MPI
TensorFlow natively provides inter-device communication through send and
receive ops and inter-node communication through Distributed TensorFlow, based
on the same send and receive abstractions. On HPC clusters where Infiniband or
other high-speed node interconnects are available, these can end up being
insufficient for synchronous data-parallel training (without asynchronous
gradient descent). This module implements a variety of MPI ops which can take
advantage of hardware-specific MPI libraries for efficient communication.
In order to use this module, TensorFlow must be built with an MPI library,
which can be provided to the `./configure` script at build time. As a user of
TensorFlow, you will need to build TensorFlow yourself to select the MPI
library to use; to do so, follow the [instructions for building TensorFlow from
source](https://www.tensorflow.org/get_started/os_setup#installing_from_sources).
### Utility Ops
In addition to reductions and gathers, this module provides utility operations
for detecting the running MPI configuration.
Example:
```python
from tensorflow.contrib import mpi
# Use `mpi.Session` instead of `tf.Session`
with mpi.Session() as session:
rank = session.run(mpi.rank())
print("My MPI Rank:", rank)
if rank == 0:
print("MPI Size:", session.run(mpi.size()))
```
@@rank
@@size
### Ring Allreduce and Allgather
When summing or averaging tensors across many processes, communication can
easily become a bottleneck. A naive implementation will send all the tensor
values to the same process, perform the reduction, and then broadcast the
values back to all other processes, effectively creating a synchronous
parameter server in one process. However, the process responsible for
performing the reduction will have to receive and send a massive amount of data
which scales with the number of processes *and* the number of parameters in the
model.
Instead of centralizing the reduction and having one primary reducer, we can
implement a distributed allreduce or allgather. A bandwidth-optimal allreduce
will end up sending 2(N - 1) values for every value in the input tensor,
and can be implemented with a ring allreduce [1]. (Intuitively, a linear reduce
requires at least (N - 1) sends between the different nodes, and a broadcast of
the result also requires (N - 1) sends, for a total of 2 (N - 1); these two
steps cannot be combined in a clever way to reduce the number of required
sends.) This module implements bandwidth-optimal ring allreduce and ring
allgather operations using MPI; by choosing a hardware-appropriate MPI
implementation (such as OpenMPI with CUDA-IPC support), you can train large
models with synchronous gradient descent with minimal communication overhead.
In addition to the `allreduce` and `allgather` functions, a convenience
`DistributedOptimizer` wrapper is provided to simplify using these functions
for reducing model gradients.
Example:
```python
import tensorflow as tf
from tensorflow.contrib import mpi_collectives as mpi
# Construct a simple linear regression model to optimize
W = tf.get_variable("W", shape=[20, 1], dtype=tf.float32)
B = tf.get_variable("B", shape=[1, 1], dtype=tf.float32)
inputs = tf.placeholder("Inputs", shape=[None, 20])
outputs = tf.placeholder("Outputs", shape=[None, 1])
loss = tf.nn.l2_loss(tf.matmul(inputs, W) + B - outputs)
# Training using MPI allreduce with DistributedOptimizer
optimizer = mpi.DistributedOptimizer(tf.train.AdamOptimizer())
train = optimizer.minimize(loss)
# Average loss over all ranks, for printing.
# Do not pass this to an optimizer!
avg_loss = mpi.allreduce(loss)
# On different ranks, feed different input data.
with mpi.Session() as session:
rank = session.run(mpi.rank())
batch_inputs, batch_outputs = construct_batch_for_rank(rank)
feed_dict = {inputs: batch_inputs, outputs: batch_outputs}
_, l = session.run([train, avg_loss], feed_dict=feed_dict)
print("Average Loss:", l)
```
[1] Patarasuk, Pitch and Yuan, Xin. "Bandwidth Optimal All-reduce Algorithms
for Clusters of Workstations".
@@Session
@@DistributedOptimizer
@@allreduce
@@allgather
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.mpi_ops import size
from tensorflow.contrib.mpi_collectives.mpi_ops import rank
from tensorflow.contrib.mpi_collectives.mpi_ops import local_rank
from tensorflow.contrib.mpi_collectives.mpi_ops import allgather
from tensorflow.contrib.mpi_collectives.mpi_ops import _allreduce
from tensorflow.contrib.mpi_collectives.mpi_ops import init
def allreduce(tensor, average=True):
"""Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.
Arguments:
tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
The shape of the input must be identical across all ranks.
average: If True, computes the average over all ranks.
Otherwise, computes the sum over all ranks.
This function performs a bandwidth-optimal ring allreduce on the input
tensor. If the input is an tf.IndexedSlices, the function instead does an
allgather on the values and the indices, effectively doing an allreduce on
the represented tensor.
"""
if isinstance(tensor, tf.IndexedSlices):
# For IndexedSlices, do two allgathers intead of an allreduce.
mpi_size = tf.cast(size(), tensor.values.dtype)
values = allgather(tensor.values)
indices = allgather(tensor.indices)
# To make this operation into an average, divide all gathered values by
# the MPI size.
new_values = tf.div(values, mpi_size) if average else values
return tf.IndexedSlices(new_values, indices,
dense_shape=tensor.dense_shape)
else:
mpi_size = tf.cast(size(), tensor.dtype)
summed_tensor = _allreduce(tensor)
new_tensor = (tf.div(summed_tensor, mpi_size)
if average else summed_tensor)
return new_tensor
class DistributedOptimizer(tf.train.Optimizer):
"""An optimizer that wraps another tf.Optimizer, using an MPI allreduce to
average gradient values before applying gradients to model weights."""
def __init__(self, optimizer, name=None, use_locking=False):
"""Construct a new DistributedOptimizer, which uses another optimizer
under the hood for computing single-process gradient values and
applying gradient updates after the gradient values have been averaged
across all the MPI ranks.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
use_locking: Whether to use locking when updating variables. See
Optimizer.__init__ for more info.
"""
if name is None:
name = "Distributed{}".format(type(optimizer).__name__)
self._optimizer = optimizer
super(DistributedOptimizer, self).__init__(
name=name, use_locking=use_locking)
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of all trainable variables.
See Optimizer.compute_gradients() for more info.
In DistributedOptimizer, compute_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = (super(DistributedOptimizer, self)
.compute_gradients(*args, **kwargs))
return [(allreduce(gradient), var) for (gradient, var) in gradients]
def _apply_dense(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_dense(*args, **kwargs)
def _apply_sparse(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse(*args, **kwargs)
def _apply_sparse_duplicate_indices(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse_duplicate_indices(*args,
**kwargs)
def _prepare(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._prepare(*args, **kwargs)
def _create_slots(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._create_slots(*args, **kwargs)
def _valid_dtypes(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._valid_dtypes(*args, **kwargs)
def _finish(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._finish(*args, **kwargs)
class Session(tf.Session):
"""A class for running TensorFlow operations, with copies of the same graph
running distributed across different MPI nodes.
The primary difference between `tf.Session` and
`tf.contrib.mpi_collectives.Session` is that the MPI `Session` ensures that
the `Session` options are correct for use with `tf.contrib.mpi`, and
initializes MPI immediately upon the start of the session.
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow MPI session.
Unlike a normal `tf.Session`, an MPI Session may only use a single GPU,
which must be specified in advance before the session is initialized.
In addition, it only uses a single graph evaluation thread, and
initializes MPI immediately upon starting.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# Initialize MPI on the relevant device.
# TODO: Move this to library load and eliminate mpi.Session()
if graph is None:
graph = tf.get_default_graph()
with graph.as_default():
self.run(init())
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.234775 | 3,514 |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from typing import List, Tuple, Set
if __name__ == '__main__':
print(part_1())
print(part_2())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
5345,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
... | 2.515625 | 64 |
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the principal search WebDAV-specific
"""
import os
from webdav.Condition import ContainsTerm
from webdav.Connection import WebdavError
from webdav.Constants import NS_DAV, PROP_DISPLAY_NAME
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.principal_search import constants, principal
from datafinder.persistence.principal_search.principalsearcher import NullPrincipalSearcher
from datafinder.persistence.adapters.webdav_ import util
__version__ = "$Revision-Id:$"
class PrincipalSearchWebdavAdapter(NullPrincipalSearcher):
""" Implements the search for principals WebDAV-specific. """
def __init__(self, userCollectionUrl, groupCollectionUrl, connectionPool, connectionHelper=util):
"""
Constructor.
@param userCollectionUrl: URL pointing to the user collection.
@type userCollectionUrl: C{unicode}
@param groupCollectionUrl: URL pointing to the group collection.
@type groupCollectionUrl: C{unicode}
@param connectionPool: Connection pool.
@type connectionPool: L{Connection<datafinder.persistence.webdav_.connection_pool.WebdavConnectionPool>}
@param connectionHelper: Utility object/module creating WebDAV library storer instances.
@type connectionHelper: L{ItemIdentifierMapper<datafinder.persistence.adapters.webdav_.util}
"""
NullPrincipalSearcher.__init__(self)
self.__connectionPool = connectionPool
self.__userCollectionUrl = userCollectionUrl
self.__groupCollectionUrl = groupCollectionUrl
self.__connectionHelper = connectionHelper
def searchPrincipal(self, pattern, searchMode):
""" @see: L{NullPrincipalSearcher<datafinder.persistence.principal_search.principalsearcher.NullPrincipalSearcher>} """
connection = self.__connectionPool.acquire()
try:
userCollectionStorer = self.__connectionHelper.createCollectionStorer(self.__userCollectionUrl, connection)
groupCollectionStorer = self.__connectionHelper.createCollectionStorer(self.__groupCollectionUrl, connection)
return self._searchPrincipal(pattern, searchMode, userCollectionStorer, groupCollectionStorer)
finally:
self.__connectionPool.release(connection)
def _searchPrincipal(self, pattern, searchMode, userCollectionStorer, groupCollectionStorer):
""" Performs principal search on the WebDAV server. """
mappedResult = list()
userRawResult = dict()
groupRawResult = dict()
if searchMode == constants.SEARCH_MODE_USER_AND_GROUP:
groupRawResult = self._performSearch(pattern, groupCollectionStorer)
userRawResult = self._performSearch(pattern, userCollectionStorer)
elif searchMode == constants.SEARCH_MODE_GROUP_ONLY:
groupRawResult = self._performSearch(pattern, groupCollectionStorer)
elif searchMode == constants.SEARCH_MODE_USER_ONLY:
userRawResult = self._performSearch(pattern, userCollectionStorer)
else:
raise PersistenceError("The specified search mode is not supported.")
self._mapRawResult(userRawResult, mappedResult, True)
self._mapRawResult(groupRawResult, mappedResult, False)
return mappedResult
@staticmethod
def _performSearch(name, collectionStorer):
""" Performs the principal search on the given WebDAV principal collection. """
condition = ContainsTerm(PROP_DISPLAY_NAME, name, False)
try:
searchResult = collectionStorer.search(condition, [(NS_DAV, PROP_DISPLAY_NAME)])
except WebdavError, error:
errorMessage = "Cannot perform user/group query. Reason: %s" % error.reason
raise PersistenceError(errorMessage)
return searchResult
@staticmethod
def _mapRawResult(rawResult, mappedResult, isUser):
""" Maps the WebDAV search result to the required format. """
for key, value in rawResult.iteritems():
uniqueName = os.path.basename(key)
displayName = ""
if (NS_DAV, PROP_DISPLAY_NAME) in value:
displayName = unicode(value[(NS_DAV, PROP_DISPLAY_NAME)].textof())
if isUser:
principalType = constants.USER_PRINCIPAL_TYPE
else:
principalType = constants.GROUP_PRINCIPAL_TYPE
principal_ = principal.Principal(uniqueName, type=principalType, displayName=displayName)
mappedResult.append(principal_)
| [
2,
720,
35063,
3,
220,
201,
198,
2,
720,
30515,
669,
3,
201,
198,
2,
4586,
32068,
25,
720,
10430,
3,
720,
6935,
1967,
3,
720,
18009,
1166,
12,
7390,
3,
201,
198,
2,
201,
198,
2,
15069,
357,
66,
8,
5816,
12,
9804,
11,
2679,
4... | 2.714527 | 2,368 |
from ds1054z import DS1054Z
scope = DS1054Z('USB0::6833::1230::DS1ZA182511136::0::INSTR')
print("Connected to: ", scope.idn) | [
6738,
288,
82,
940,
4051,
89,
1330,
17400,
940,
4051,
57,
198,
198,
29982,
796,
17400,
940,
4051,
57,
10786,
27155,
15,
3712,
3104,
2091,
3712,
1065,
1270,
3712,
5258,
16,
34892,
1507,
1495,
1157,
20809,
3712,
15,
3712,
1268,
18601,
1... | 2.272727 | 55 |
from math import tanh
from pysqlite2 import dbapi2 as sqlite
| [
6738,
10688,
1330,
25706,
71,
198,
6738,
279,
893,
13976,
578,
17,
1330,
20613,
15042,
17,
355,
44161,
578,
198
] | 3.05 | 20 |
import pioneer.common.constants as Constants
from pioneer.common.logging_manager import LoggingManager
from pioneer.das.api.sources.filesource import FileSource, try_all_patterns
from pioneer.das.api.loaders import pickle_loader
from ruamel.std import zipfile
import multiprocessing
import numpy as np
import pandas as pd
import os
import re
import sys
import threading
import time
import traceback
import yaml
class ZipFileSource(FileSource):
"""Loads a list of files from a zip archive."""
| [
11748,
29570,
13,
11321,
13,
9979,
1187,
355,
4757,
1187,
198,
6738,
29570,
13,
11321,
13,
6404,
2667,
62,
37153,
1330,
5972,
2667,
13511,
198,
6738,
29570,
13,
67,
292,
13,
15042,
13,
82,
2203,
13,
16624,
1668,
1330,
9220,
7416,
11,
... | 3.424658 | 146 |
import discord
from discord.ext import commands
from bot.lib.game import Game, Pieces
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
6738,
10214,
13,
8019,
13,
6057,
1330,
3776,
11,
44676,
628
] | 4 | 22 |
# -*- coding: utf-8 -*-
"""Top-level package for PyGitDB."""
__author__ = """Jorge Arevalo"""
__email__ = 'jorge.arevalo@reclamador.es'
__version__ = '0.1.0'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
9485,
38,
270,
11012,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
41,
3643,
4231,
2100,
78,
37811,
198,
834,
12888,
834,
7... | 2.191781 | 73 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
c_src_files = [
"tracker/src/tracker.cc",
"tracker/src/tracker_helper.cc",
"tracker/src/math_helper.cc"
]
cython_src_files = [
"tracker/wrap/tracker_wrap.pyx"
]
cython_module = Extension(
"tracker.tracker_wrap",
c_src_files + cython_src_files,
include_dirs =["tracker/src", "third_party/eigen3"],
language = "c++"
)
setup(
name = "tracker",
version = "0.0.1",
description = "A library to track partition function of an RBM",
long_description = (
"An implementation of the paper"
"G. Desjardins et al.(2011) On Tracking The Partition Function"),
license = "MIT License",
packages = ["tracker"],
ext_modules = cythonize(cython_module),
install_requires = [
"cython==0.27.2",
"numpy==1.14.0"
]
) | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
2302,
3004,
133... | 2.692737 | 358 |
"""Library for synthesizing Entropic Reactive Control Improvisers for
stochastic games."""
# flake8: noqa
from improvisers.game_graph import *
from improvisers.implicit import *
from improvisers.explicit import *
from improvisers.tabular import *
from improvisers.policy import *
| [
37811,
23377,
329,
24983,
2890,
7232,
1773,
291,
797,
5275,
6779,
12205,
4703,
364,
329,
198,
301,
5374,
3477,
1830,
526,
15931,
198,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
47833,
364,
13,
6057,
62,
34960,
1330,
1635,
198,
... | 3.649351 | 77 |
'''
Easy Control Beta
Speed Default 5
w
a s d
e : speed up 0.1
c : speed down 0.2
x : speed default 5
'''
import rospy
rospy.init_node('robot_easygo', anonymous=False)
import math
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Imu
from std_msgs.msg import Float32
from tf.transformations import euler_from_quaternion
import time
import keyCap
import easyGo
import keyCap
main()
| [
7061,
6,
201,
198,
28406,
6779,
17993,
201,
198,
22785,
15161,
642,
201,
198,
201,
198,
220,
220,
220,
266,
201,
198,
64,
220,
220,
264,
220,
220,
288,
201,
198,
201,
198,
68,
1058,
2866,
510,
657,
13,
16,
201,
198,
66,
1058,
28... | 2.541176 | 170 |
'''
Dr. Dragonbot by MadScotty
BEGIN LICENSE
By existing on the same mortal coil as the author of this software you hereby
allow the author, henceforth known as Dr. Awesomeweiner, to sleep on your couch,
watch your television, and use your microwave. By reading this license you agree
that Lord Satan isn't as bad as everyone says #fakenews
END LICENSE
'''
import random
import json
import discord
# Go ahead and put the json files into memory for faster lookup
spellbook = json.load(open("spells.json", encoding = "utf-8"))
conditions_full_list = json.load(open("conditions.json", encoding = "utf-8"))
gamedate = -1
# helpbox() returns a list of commands and what they do as a string, using Discord's code block formatting
# spell_lookup(spell_name) returns a discord embed containing details of spell_name, which is pulled from spells.json
# condition_lookup returns info about a specified condition as a string, or if none is specified, it returns a list of conditions
# It needs the command_prefix to cater a helpful reply (how to get info on a specific condition)
# I CAST MAGIC MISSLE
# PM user the current changelog
# Let's roll some dice! Takes a string in the format XdY | [
7061,
6,
198,
6187,
13,
2851,
13645,
416,
4627,
37559,
774,
198,
198,
33,
43312,
38559,
24290,
198,
198,
3886,
4683,
319,
262,
976,
22122,
25661,
355,
262,
1772,
286,
428,
3788,
345,
29376,
198,
12154,
262,
1772,
11,
12891,
25718,
190... | 3.665644 | 326 |
from collections import defaultdict
from datetime import datetime
from types import GeneratorType
from typing import (
AbstractSet,
Any,
Callable,
Dict,
List,
Mapping,
Tuple,
Union,
)
from uuid import UUID
from bson import ObjectId, DBRef
from pydantic import BaseModel
from .bson import ENCODERS_BY_TYPE
from ..fields import LinkTypes
bson_encoder = Encoder()
| [
6738,
17268,
1330,
4277,
11600,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
6738,
3858,
1330,
35986,
6030,
201,
198,
6738,
19720,
1330,
357,
201,
198,
220,
220,
220,
27741,
7248,
11,
201,
198,
220,
220,
220,
4377,
11,
201,... | 2.573171 | 164 |
# -*- coding: utf-8 -*-
from py_lets_be_quickly_rational import constants
if __name__ == "__main__":
try:
raise BelowIntrinsicException
except VolatilityValueException as e:
if not isinstance(e, BelowIntrinsicException):
raise Exception("Should be BelowIntrinsicException")
try:
raise AboveMaximumException
except VolatilityValueException as e:
if not isinstance(e, AboveMaximumException):
raise Exception("Should be AboveMaximumException")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
12972,
62,
5289,
62,
1350,
62,
24209,
306,
62,
20310,
1330,
38491,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
22... | 2.708333 | 192 |
print Solution().convertToTitle(701) | [
198,
4798,
28186,
22446,
1102,
1851,
2514,
19160,
7,
41583,
8
] | 3.363636 | 11 |
#!/usr/bin/env python3
"""jc - JSON CLI output utility
JC cli module
"""
import sys
import os
import shlex
import importlib
import textwrap
import signal
import json
import jc.utils
__version__ = info.version
parsers = [
'arp',
'blkid',
'crontab',
'crontab-u',
'csv',
'df',
'dig',
'du',
'env',
'free',
'fstab',
'group',
'gshadow',
'history',
'hosts',
'id',
'ifconfig',
'ini',
'iptables',
'jobs',
'last',
'ls',
'lsblk',
'lsmod',
'lsof',
'mount',
'netstat',
'passwd',
'pip-list',
'pip-show',
'ps',
'route',
'shadow',
'ss',
'stat',
'systemctl',
'systemctl-lj',
'systemctl-ls',
'systemctl-luf',
'uname',
'uptime',
'w',
'who',
'xml',
'yaml'
]
def ctrlc(signum, frame):
"""exit with error on SIGINT"""
sys.exit(1)
def parser_shortname(parser_argument):
"""short name of the parser with dashes and no -- prefix"""
return parser_argument[2:]
def parser_argument(parser):
"""short name of the parser with dashes and with -- prefix"""
return f'--{parser}'
def parser_mod_shortname(parser):
"""short name of the parser's module name (no -- prefix and dashes converted to underscores)"""
return parser.replace('--', '').replace('-', '_')
def parser_module(parser):
"""import the module just in time and return the module object"""
importlib.import_module('jc.parsers.' + parser_mod_shortname(parser))
return getattr(jc.parsers, parser_mod_shortname(parser))
def parsers_text(indent=0, pad=0):
"""return the argument and description information from each parser"""
ptext = ''
for parser in parsers:
parser_arg = parser_argument(parser)
parser_mod = parser_module(parser)
if hasattr(parser_mod, 'info'):
parser_desc = parser_mod.info.description
padding = pad - len(parser_arg)
padding_char = ' '
indent_text = padding_char * indent
padding_text = padding_char * padding
ptext += indent_text + parser_arg + padding_text + parser_desc + '\n'
return ptext
def about_jc():
"""return jc info and the contents of each parser.info as a dictionary"""
parser_list = []
for parser in parsers:
parser_mod = parser_module(parser)
if hasattr(parser_mod, 'info'):
info_dict = {}
info_dict['name'] = parser_mod.__name__.split('.')[-1]
info_dict['argument'] = parser_argument(parser)
parser_entry = vars(parser_mod.info)
for k, v in parser_entry.items():
if not k.startswith('__'):
info_dict[k] = v
parser_list.append(info_dict)
return {
'name': 'jc',
'version': info.version,
'description': info.description,
'author': info.author,
'author_email': info.author_email,
'parser_count': len(parser_list),
'parsers': parser_list
}
def helptext(message):
"""return the help text with the list of parsers"""
parsers_string = parsers_text(indent=12, pad=17)
helptext_string = f'''
jc: {message}
Usage: COMMAND | jc PARSER [OPTIONS]
or magic syntax:
jc [OPTIONS] COMMAND
Parsers:
{parsers_string}
Options:
-a about jc
-d debug - show trace messages
-p pretty print output
-q quiet - suppress warnings
-r raw JSON output
Example:
ls -al | jc --ls -p
or using the magic syntax:
jc -p ls -al
'''
print(textwrap.dedent(helptext_string), file=sys.stderr)
def magic():
"""Parse with magic syntax: jc -p ls -al"""
if len(sys.argv) > 1 and not sys.argv[1].startswith('--'):
parser_info = about_jc()['parsers']
# correctly parse escape characters and spaces with shlex
args_given = " ".join(map(shlex.quote, sys.argv[1:])).split()
options = []
found_parser = None
# find the options
if args_given[0].startswith('-'):
p = 0
for i, arg in list(enumerate(args_given)):
# parser found - use standard syntax
if arg.startswith('--'):
return
# option found - populate option list
elif arg.startswith('-'):
options.append(args_given.pop(i - p)[1:])
p = p + 1
# command found if iterator didn't already stop - stop iterating
else:
break
# find the command and parser
for parser in parser_info:
if 'magic_commands' in parser:
# first pass for two word commands: e.g. 'pip list'
for magic_command in parser['magic_commands']:
try:
if ' '.join(args_given[0:2]) == magic_command:
found_parser = parser['argument']
break
# No command found - go to next loop (for cases like 'jc -a')
except Exception:
break
# second pass for one word commands: e.g. 'ls'
if not found_parser:
for magic_command in parser['magic_commands']:
try:
if args_given[0] == magic_command:
found_parser = parser['argument']
break
# No command found - use standard syntax (for cases like 'jc -a')
except Exception:
return
# construct a new command line using the standard syntax: COMMAND | jc --PARSER -OPTIONS
run_command = ' '.join(args_given)
if found_parser:
if options:
cmd_options = '-' + ''.join(options)
else:
cmd_options = ''
whole_command = ' '.join([run_command, '|', 'jc', found_parser, cmd_options])
os.system(whole_command)
exit()
else:
helptext(f'parser not found for "{run_command}"')
sys.exit(1)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
48055,
532,
19449,
43749,
5072,
10361,
198,
34382,
537,
72,
8265,
198,
37811,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
427,
2588,
198,
11748,
1330,
8019,
198,
11748,
... | 2.042075 | 3,161 |
from pyauth.system import System
if __name__ == '__main__':
main()
| [
6738,
279,
3972,
1071,
13,
10057,
1330,
4482,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.642857 | 28 |
#!/usr/bin/env python
######################################################
# Copyright (c) 2020 Maker Portal LLC
# Author: Joshua Hrisko
######################################################
#
# This code reads data from the MPU9250/MPU9265 board
# (MPU6050 - accel/gyro, AK8963 - mag) to verify its
# correct wiring to a Raspberry Pi and the functionality
# of the MPU9250_i2c.py library
#
# Code modified to publish IMU data to ROS
######################################################
#
import time
import rospy
from sensor_msgs.msg import Imu
import numpy
t0 = time.time()
start_bool = False # boolean for connection
while (time.time()-t0)<5: # wait for 5-sec to connect to IMU
try:
from mpu9250_i2c import *
start_bool = True # True for forthcoming loop
break
except:
continue
#
#############################
# Strings for Units/Labs
#############################
#
imu_devs = ["ACCELEROMETER","GYROSCOPE","MAGNETOMETER"]
imu_labels = ["x-dir","y-dir","z-dir"]
imu_units = ["m/s^2","m/s^2","m/s^2","rad/s","rad/s","rad/s","uT","uT","uT"]
#Create Node
try:
talker()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
14468,
4242,
2235,
198,
2,
15069,
357,
66,
8,
12131,
21521,
25663,
11419,
198,
2,
6434,
25,
20700,
367,
19121,
78,
198,
29113,
14468,
4242,
2235,
198,
2,
198,
2,
770,
2438,
97... | 2.809069 | 419 |
import mock
from nose.tools import * # noqa
import pytest
import unittest
from tests.base import get_default_metaschema
from osf_tests.factories import ProjectFactory, DraftRegistrationFactory
from framework.auth import Auth
from addons.base.tests.models import (
OAuthAddonNodeSettingsTestSuiteMixin,
OAuthAddonUserSettingTestSuiteMixin
)
from addons.weko.models import NodeSettings
from addons.weko.tests.factories import (
WEKOUserSettingsFactory,
WEKONodeSettingsFactory,
WEKOAccountFactory
)
from addons.weko import client
pytestmark = pytest.mark.django_db
| [
11748,
15290,
198,
6738,
9686,
13,
31391,
1330,
1635,
220,
1303,
645,
20402,
198,
11748,
12972,
9288,
198,
11748,
555,
715,
395,
198,
198,
6738,
5254,
13,
8692,
1330,
651,
62,
12286,
62,
4164,
292,
2395,
2611,
198,
6738,
267,
28202,
6... | 3.10582 | 189 |
from __future__ import annotations
from datetime import datetime
from typing import List, Optional, Type, TypeVar, Dict, Any
from uuid import UUID
import orjson
from pydantic import BaseModel
from vektonn.utils import camel_case_pydantic_alias_generator, orjson_dumps
TVektonnModel = TypeVar('TVektonnModel', bound='VektonnBaseModel')
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
5994,
11,
5994,
19852,
11,
360,
713,
11,
4377,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
11748,
... | 3.153153 | 111 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CESNET.
#
# CESNET OA Publication Repository is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Publications API cli commands."""
import json
import os
import subprocess
import tempfile
import traceback
from os.path import basename
import click
import tqdm
from flask import current_app
from flask.cli import with_appcontext
from invenio_app.factory import create_api
from invenio_db import db
from invenio_files_rest.models import ObjectVersion, FileInstance, Bucket
from invenio_indexer.api import RecordIndexer
from invenio_jsonschemas import current_jsonschemas
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records.api import _records_state
from invenio_records.models import RecordMetadata
from invenio_records_files.models import RecordsBuckets
from invenio_search import current_search_client
from invenio_search.utils import build_alias_name
from json_schema_for_humans.generate import generate_from_file_object
from jsonref import JsonRef
from sqlalchemy_continuum import version_class, versioning_manager
from publications.articles.constants import ARTICLE_PID_TYPE, ARTICLE_DRAFT_PID_TYPE
from publications.articles.record import ArticleRecord, ArticleDraftRecord
from publications.datasets.constants import DATASET_PID_TYPE, DATASET_DRAFT_PID_TYPE
from publications.datasets.record import DatasetRecord, DatasetDraftRecord
@click.group()
def publications():
"""Commands for publications repository."""
pass
@publications.command('clear')
@with_appcontext
@click.pass_context
def clear(ctx, raise_on_error=True, only=None):
"""Clear all record data in publications repository."""
RecordsBuckets.query.delete()
RecordMetadata.query.delete()
PersistentIdentifier.query.delete()
ObjectVersion.query.delete()
FileInstance.query.delete()
Bucket.query.delete()
version_cls = version_class(RecordMetadata)
version_cls.query.delete()
versioning_manager.transaction_cls.query.delete()
# RecordReference.query.delete()
# ReferencingRecord.query.delete()
# ClassName.query.delete()
subprocess.call([
'oarepo',
'index',
'destroy',
'--yes-i-know',
'--force'
])
subprocess.call([
'oarepo',
'index',
'init',
'--force'
])
db.session.commit()
@publications.command('schema-docs')
@click.argument('schemas', nargs=-1)
@with_appcontext
def schema_docs(schemas):
"""Generates jsonschema docs for data models."""
for schema_path in schemas:
click.secho(f'Generating docs for schema {schema_path}')
schema = current_jsonschemas.get_schema(schema_path, with_refs=False, resolved=False)
schema = JsonRef.replace_refs(
schema,
jsonschema=True,
base_uri=current_app.config.get('JSONSCHEMAS_HOST'),
loader=_records_state.loader_cls(),
)
# TODO: this is necessary to resolve JSONRefs in allOf
schema = json.loads(json.dumps(schema, default=lambda x: x.__subject__))
# Generate and save html docs for the schema
with tempfile.NamedTemporaryFile(mode="w+") as schema_source:
schema_source.write(json.dumps(schema))
schema_source.flush()
with open(f'docs/schemas/{basename(schema_path.rstrip(".json"))}.html', mode='w+') as result_file:
click.secho(f'Writing schema docs to {result_file.name}', color='green')
generate_from_file_object(
schema_file=schema_source,
result_file=result_file,
minify=True,
expand_buttons=True
)
# Generate and save schema index page
index_md = r"""---
layout: default
---
# Data Models Schema Docs
"""
for f in os.listdir('docs/schemas/'):
if f.endswith('.html'):
index_md += f'- [{f.rstrip(".html")}](./{f})\n'
with open(f'docs/schemas/index.md', mode='w+') as index_file:
index_file.write(index_md)
@publications.group('datasets')
def datasets():
"""Commands for dataset collection management."""
@publications.group('articles')
def articles():
"""Commands for article collection management."""
@datasets.command('reindex')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@click.option(
'--only',
help='Index only this item')
@with_appcontext
@click.pass_context
@articles.command('reindex')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@click.option(
'--only',
help='Index only this item')
@with_appcontext
@click.pass_context
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
12131,
42700,
12884,
13,
198,
2,
198,
2,
42700,
12884,
440,
32,
45065,
1432,
13264,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
29... | 2.623273 | 1,882 |
"""Non-boto3 related utilities"""
| [
37811,
15419,
12,
65,
2069,
18,
3519,
20081,
37811,
198
] | 3.4 | 10 |
import argparse
import spacy
import re
import countryinfo
from concurrent.futures import ProcessPoolExecutor
nlp = spacy.load('en_core_web_sm')
QUOTE_RE = re.compile(r'".+?"')
OTHER_QUOTE_RE = re.compile(r"'.+?'")
demonym_map = {}
with open('demonyms.txt') as f:
for line in f:
country, d1, d2 = line.strip().split('\t')
d1 = d1.split(', ')
d2 = d2.split(', ')
val = d1 + d2 + [country]
demonym_map[country] = val
for x in d1 + d2:
demonym_map[x] = val
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
599,
1590,
198,
11748,
302,
198,
11748,
1499,
10951,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10854,
27201,
23002,
38409,
198,
198,
21283,
79,
796,
599,
1590,
13,
2220,
10786,
268,
62,
7295,
62,
12384,
... | 2.11194 | 268 |
from flask import Flask, render_template, redirect, url_for, request, flash,json,jsonify
from flask_wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
from wtforms import BooleanField, TextField, PasswordField, validators
from wtforms.validators import Required
from forms import SignupFormdemo
import MySQLdb
from MySQLdb import escape_string as thwart
import gc
import random, itertools
WTF_CSRF_ENABLED = False
import hashlib
app = Flask(__name__)
app.secret_key = 'adbmssecretkey'
@app.route("/")
@app.route("/logout")
@app.route("/success",methods=['GET','POST'])
@app.route("/checkpnr",methods=['GET','POST'])
@app.route("/error")
@app.route("/btnhome",methods=['GET','POST'])
@app.route("/pnrstatus",methods=['GET','POST'])
@app.route("/booking",methods=['GET','POST'])
@app.route("/check",methods=['GET','POST'])
@app.route('/signUpUser',methods=['GET','POST'])
@app.route("/signup/",methods=['GET','POST'])
@app.route("/homepage/",methods=['GET','POST'])
@app.route("/loginpage/",methods=['GET'])
@app.route("/login/",methods=['GET','POST'])
if __name__ == "__main__":
app.run() | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
220,
18941,
11,
19016,
62,
1640,
11,
2581,
11,
7644,
11,
17752,
11,
17752,
1958,
201,
198,
6738,
42903,
62,
86,
27110,
1330,
5178,
201,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
... | 2.408046 | 522 |
import tempfile
TEMP_DIR = tempfile.TemporaryDirectory().name
INSTALLED_APPS = (
'model_field_meta',
'tests',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEMP_DIR,
}
}
SECRET_KEY = 'THIS IS A SECRET KEY'
| [
11748,
20218,
7753,
198,
198,
51,
39494,
62,
34720,
796,
20218,
7753,
13,
12966,
5551,
43055,
22446,
3672,
198,
198,
38604,
7036,
1961,
62,
2969,
3705,
796,
357,
198,
220,
220,
220,
705,
19849,
62,
3245,
62,
28961,
3256,
198,
220,
220... | 2.116279 | 129 |
n = int(input())
s = 2*n -1
s2 = 3
for i in range(n): # 공백 (i)
print(' '*i + '*'*s, end='')
s -= 2
print()
for i in range(n-2, -1, -1):
print(' '*i + '*'*s2, end='')
s2 += 2
print()
## 다른 풀이
n = int(input())
for i in range(n):
print(" " * i + "*" * ((n - i) * 2 - 1))
for i in range(n - 2, -1, -1):
print(" " * i + "*" * ((n - i) * 2 - 1)) | [
77,
796,
493,
7,
15414,
28955,
198,
82,
796,
362,
9,
77,
532,
16,
198,
82,
17,
796,
513,
198,
1640,
1312,
287,
2837,
7,
77,
2599,
1303,
220,
166,
111,
113,
167,
108,
109,
357,
72,
8,
198,
220,
220,
220,
3601,
10786,
705,
9,
... | 1.718894 | 217 |
#!/usr/bin/env python
"""
Convert the output of Noise Cancelling Repeat Finder to a summary, a
tab-delimited table with one line of stats per alignment.
"""
from sys import argv,stdin,stdout,stderr,exit
from os import path as os_path
from ncrf_parse import alignments,parse_noise_rate
if __name__ == "__main__": main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
3103,
1851,
262,
5072,
286,
30964,
43780,
2680,
30021,
42500,
284,
257,
10638,
11,
257,
198,
8658,
12,
12381,
320,
863,
3084,
351,
530,
1627,
286,
9756,
583,
19114,
13,
198... | 2.848739 | 119 |
import os
import glob
import matplotlib.pyplot as matplot
import japanize_matplotlib
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, portrait
file_count = 0
total_count = 0
comment_count = 0
use_files = []
png_name = "circle.png"
pdf_name = "circle.pdf"
#下の階層用
for files in glob.glob("*/**/*"):
name, extension = os.path.splitext(files)
if extension == ".py":
use_files.append(name + extension)
file_count += 1
pyfile = open(files)
lines = pyfile.readlines()
pyfile.close()
replace_line = [linedata.replace(' ','') for linedata in lines]
replace_and_strip_line = [replacedata.replace('\n','') for replacedata in replace_line]
#コメント判定
for pyline in replace_and_strip_line:
total_count += len(pyline)
if pyline.startswith("#", 0):
comment_count += len(pyline)
#同じ階層用
for files in glob.glob("*"):
name, extension = os.path.splitext(files)
if extension == ".py":
use_files.append(name + extension)
file_count += 1
pyfile = open(files)
lines = pyfile.readlines()
pyfile.close()
replace_line = [linedata.replace(' ','') for linedata in lines]
replace_and_strip_line = [replacedata.replace('\n','') for replacedata in replace_line]
for pyline in replace_and_strip_line:
total_count += len(pyline)
if pyline.startswith("#", 0):
comment_count += len(pyline)
#円グラフ描画
not_comment_count = total_count - comment_count
circledata = [not_comment_count, comment_count]
labels = ["プログラム文", "コメント"]
a, ax = matplot.subplots()
ax.pie(circledata, labels = labels, startangle = 90, autopct="%1.1f%%")
matplot.title("コメント比率", fontsize = 24)
matplot.savefig(png_name)
#PDF作成
pdf = canvas.Canvas(pdf_name, pagesize=portrait(A4))
pdf.drawImage(png_name, 0, 500, 450, 300)
pdf.drawCentredString(107, 500, "Total: " + "{:10}".format(str(total_count)))
pdf.drawCentredString(110, 480, "Comment: " + "{:10}".format(str(comment_count)))
pdf.drawCentredString(113, 460, "Files: " + "{:10}".format(str(file_count)))
pdf.drawCentredString(107, 400, "FileName_and_Path")
x, y = 215, 375
for file_name in use_files:
pdf.drawCentredString(x, y, file_name)
y -= 15
pdf.save()
#画像ファイル削除(circle.png)
os.remove("circle.png") | [
11748,
28686,
201,
198,
11748,
15095,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
2603,
29487,
201,
198,
11748,
474,
2674,
1096,
62,
6759,
29487,
8019,
201,
198,
6738,
989,
23912,
13,
12315,
5235,
1330,
21978,
201,
198,
67... | 2.148992 | 1,141 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import datetime
import unittest
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.gcp_transfer_hook import GCPTransferServiceHook
from airflow.contrib.hooks.gcp_transfer_hook import TIME_TO_SLEEP_IN_SECONDS
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,... | 3.494118 | 340 |
# AP Ruymgaart
# Finite difference 2D wave equation
import copy, sys, numpy as np, matplotlib.pyplot as plt
from draw_boundaries import *
from tensorFiles import *
from pde_findiff_functions import *
fft2, fftshift, ifft2 = np.fft.fft2, np.fft.fftshift, np.fft.ifft2
if __name__ == '__main__':
IC, source, name = 'none', 'none', 'none'
simsel, nrSteps, c, dt, W, mod = 1, 1000, 343.0, 0.000001, 0.2, 100
plot, bSpectral = False, False
cmds = sys.argv[1:len(sys.argv)]
if len(cmds) > 0 :
stype = cmds[0].split(':')
simsel = 2
if stype[0] == 'SQUARE':
n = int(stype[1])
B = squareSim(n, stype[2]) # Boundary matrix
name = 'square_%s' % (stype[2])
elif stype[0] == 'FILE':
f = open(stype[1], 'r')
lines = f.readlines()
f.close()
B = processBcIni(lines)
name = stype[1].replace('.ini','').replace('BC.','')
if len(cmds) > 1 : nrSteps = int(cmds[1])
if len(cmds) > 2 : dt = float(cmds[2])
if len(cmds) > 3 : plot = cmds[3].lower()[0] == 't'
if len(cmds) > 4 : mod = int(cmds[4])
if len(cmds) > 5 : IC = cmds[5].split(':')
if len(cmds) > 6 : source = cmds[6].split(':')
if len(cmds) > 7 :
if cmds[7] == 'SPECTRAL' :
bSpectral = True
name += '_SPECTRAL'
if len(cmds) > 8 : W = float(cmds[8])
ds = W/B.shape[1] # delta_space (x and y)
H = ds*B.shape[0]
CFL =(c*dt)/ds
a1, a2 = CFL**2, CFL**2
stable = abs(CFL) < 1/np.sqrt(2)
bcFact = (CFL - 1)/(CFL + 1)
print('Time step', dt, 'width=', W,'height=', H, 'nr steps', nrSteps, 'Sim=', simsel, 'IC', IC, 'Source', source)
print('CFL=', CFL, 'a1', a1, 'STABLE', stable, 'Space ds=', ds)
if not stable: exit()
if bSpectral:
if not stype[0] == 'SQUARE':
print('not supported')
exit()
vU = np.zeros( (nrSteps, B.shape[0], B.shape[1]) )
if bSpectral: vU = vU.astype(complex)
x2 = np.linspace(0, W, B.shape[1]+1)
y2 = np.linspace(0, H, B.shape[0]+1)
x, y = x2[0:B.shape[1]], y2[0:B.shape[0]]
[X,Y] = np.meshgrid(x,y)
n = B.shape[0]
k = (2*np.pi/(H)) * np.append(np.arange(0,n/2),np.arange(-n/2,0))
[Kx,Ky] = np.meshgrid(k,k)
Kx2, Ky2 = np.multiply(Kx,Kx), np.multiply(Ky,Ky)
Lft = -1.0*(Kx2 + Ky2) #-- Spectral Laplacian --
if IC[0] == 'GAUSS':
G = guassian(40000, X, Y, W*float(IC[1]), H*float(IC[2]))
if bSpectral:
vU[0,:,:], vU[1,:,:] = fft2(G), fft2(G)
else:
vU[0,:,:], vU[1,:,:] = G, G
elif IC[0] == 'RECT':
ix1, ix2, iy1, iy2 = int(IC[1]), int(IC[2]), int(IC[3]), int(IC[4])
R = np.zeros(B.shape)
R[iy1:iy2,ix1:ix2] = float(IC[5])
R[B != 0] = 0.0
vU[0,:,:], vU[1,:,:] = R, R
if source[0] == 'GAUSS':
b = float(source[3])*np.pi
GS = guassian(40000, X, Y, W*float(source[1]), H*float(source[2]))
for k in range(2,nrSteps):
if source[0] == 'GAUSS':
S = GS*np.cos(k*b) # gaussianSource(k*b, 40000, X, Y, W*float(source[1]), H*float(source[2]))
elif simsel == 7:
b = 0.09*np.pi # 0.0009 seems to show resonance at dt=0.0006
S = source(k*b)
else:
S = np.zeros(B.shape)
Ulast = vU[k-2] # time i-1
U = vU[k-1] # time i
if bSpectral:
Un = waveEquationSpectralUNext(U, Ulast, dt, c, Lft)
else:
Un = waveEquationUnext(U, Ulast, S, B, a1, a2) # this is time i+1
# special case absorbing boundary available at edges only
# dealt with here rather than in stencil
jEnd = B.shape[0] - 1
iEnd = B.shape[1] - 1
for j in range(U.shape[0]):
if B[j,0] == ABSORBING:
Un[j,0] = U[j,1] + bcFact*(Un[j,1] - U[j,0] )
if B[j,iEnd] == ABSORBING:
Un[j,iEnd] = U[j,iEnd-1] + bcFact*(Un[j,iEnd-1] - U[j,iEnd] )
for i in range(U.shape[1]):
if B[0,i] == ABSORBING:
Un[0,i] = U[1,i] + bcFact*(Un[1,i] - U[0,i])
if B[jEnd,i] == ABSORBING:
Un[jEnd,i] = U[jEnd-1,i] + bcFact*(Un[jEnd-1,i] - U[jEnd,i])
vU[k] = Un
if k % mod == 0.0:
print("step", k)
if plot:
if bSpectral:
img = np.real(ifft2(vU[k]))
img = np.average(img) - img # ?
else:
img = vU[k]
im = grid2rgb(img)
im[B != 0] = 1
fig, axes = plt.subplots(figsize=(8, 8))
plt.imshow(im)
if False:
plt.show()
else:
pltname = 'IO/sim_%s_%d_%s_%s.png' % (name, k, cmds[5].replace(':','-').replace('0.',''),
cmds[6].replace(':','-').replace('0.','') )
plt.savefig(pltname, dpi=300, bbox_inches='tight')
plt.clf()
if bSpectral:
realU = np.zeros( (nrSteps, B.shape[0], B.shape[1]) )
for k in range(nrSteps):
img = np.real(ifft2(vU[k]))
realU[k] = np.average(img) - img
vU = realU
for k in range(2,nrSteps): vU[k][B != 0] = 9999.99
numpy2tnsrFile(vU, 'sim_%s_%d_%s_%s.npz' % (name, nrSteps, cmds[5].replace(':','-').replace('0.',''),
cmds[6].replace(':','-').replace('0.','')) )
| [
2,
3486,
371,
4669,
76,
4908,
433,
198,
2,
4463,
578,
3580,
362,
35,
6769,
16022,
220,
198,
11748,
4866,
11,
25064,
11,
299,
32152,
355,
45941,
11,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
3197,
62,
7784,
3166,
... | 1.798817 | 2,873 |
# 10 Processing Images Multiprocessing
import time
import concurrent.futures
from PIL import Image, ImageFilter
img_names = [
'photo-1516117172878-fd2c41f4a759.jpg',
'photo-1532009324734-20a7a5813719.jpg',
'photo-1524429656589-6633a470097c.jpg',
'photo-1530224264768-7ff8c1789d79.jpg',
'photo-1564135624576-c5c88640f235.jpg',
'photo-1541698444083-023c97d3f4b6.jpg',
'photo-1522364723953-452d3431c267.jpg',
'photo-1513938709626-033611b8cc03.jpg',
'photo-1507143550189-fed454f93097.jpg',
'photo-1493976040374-85c8e12f0c0e.jpg',
'photo-1504198453319-5ce911bafcde.jpg',
'photo-1530122037265-a5f1f91d3b99.jpg',
'photo-1516972810927-80185027ca84.jpg',
'photo-1550439062-609e1531270e.jpg',
'photo-1549692520-acc6669e2f0c.jpg'
]
size = (1200, 1200)
if __name__ == '__main__':
start = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(process_image, img_names)
finish = time.perf_counter()
print(f'finished in {finish - start} seconds') | [
2,
838,
28403,
5382,
7854,
541,
305,
919,
278,
198,
198,
11748,
640,
198,
11748,
24580,
13,
69,
315,
942,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
22417,
198,
198,
9600,
62,
14933,
796,
685,
198,
220,
220,
220,
705,
23074,
12,
... | 2.113546 | 502 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'SoundPy'
copyright = '2020, Aislyn Rose'
author = 'Aislyn Rose'
# The full version, including alpha/beta/rc tags
release = '0.1.0a2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx_gallery.gen_gallery',
'numpydoc',
'sphinx.ext.autosummary']
sphinx_gallery_conf = {
'examples_dirs': 'examples/',
'gallery_dirs': 'auto_examples',
'filename_pattern' : '\\/plot',
'ignore_pattern' : '__init__\\.py',
'reference_url': {
'sphinx_gallery': None,
'numpy': 'http://docs.scipy.org/doc/numpy/',
'np': 'http://docs.scipy.org/doc/numpy/',
'scipy': 'http://docs.scipy.org/doc/scipy/reference',
'matplotlib': 'https://matplotlib.org/',
'sklearn': 'https://scikit-learn.org/stable',
'soundfile': 'https://pysoundfile.readthedocs.io/en/latest/',
'sf': 'https://pysoundfile.readthedocs.io/en/latest/',
'librosa' : 'https://librosa.org/librosa/',
'tensorflow' : 'https://www.tensorflow.org/api_docs/python/tf',
'tf' : 'https://www.tensorflow.org/api_docs/python/tf'
}
}
autosummary_generate = False
intersphinx_mapping = {
"librosa":("https://librosa.org/doc/latest/index.html", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"np": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("https://matplotlib.org/", None),
"sklearn": ("https://scikit-learn.org/stable/", None),
"soundfile": ("https://pysoundfile.readthedocs.io/en/latest", None),
"sf": ("https://pysoundfile.readthedocs.io/en/latest", None),
'tensorflow' : ('https://www.tensorflow.org/api_docs/python/tf', None),
'tf' : ('https://www.tensorflow.org/api_docs/python/tf', None),
}
# Generate plots for example sections
numpydoc_use_plots = True
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
#Napoleon settings
napoleon_numpy_docstring = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# -- Options for HTML output -------------------------------------------------
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
html_use_modindex = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'soundpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
#The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
#The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
#Additional stuff for the LaTeX preamble.
'preamble': '',
#Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
master_doc = 'index'
latex_documents = [
(master_doc, 'soundpy.tex', u'SoundPy Docs',
u'Aislyn Rose', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'SoundPy', u'Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SoundPy', u'Documentation',
author, 'SoundPy',
'A framework for exploring and experimenting with acoustics and deep learning.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Establishes order that modules are listed; default alphabetical.
autodoc_member_order = 'bysource'
| [
2,
28373,
2393,
329,
262,
45368,
28413,
10314,
27098,
13,
198,
2,
198,
2,
770,
2393,
691,
4909,
257,
6356,
286,
262,
749,
2219,
3689,
13,
1114,
257,
1336,
198,
2,
1351,
766,
262,
10314,
25,
198,
2,
3740,
1378,
2503,
13,
82,
746,
... | 2.890379 | 2,609 |
"""Simple code to tile images.
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os, time
from math import sqrt, ceil
from PIL import Image
from itertools import *
from utils import *
RESIZE_ALL = (64, 64)
def tile(fnames, resize=RESIZE_ALL, textonly=0, rows=None, cols=None):
"""Tiles images and returns a tiled image"""
maxsize = [0, 0]
assert fnames
todel = set()
for fname in fnames:
try:
im = Image.open(fname)
maxsize = [max(m, s) for m, s in zip(maxsize, im.size)]
except Exception:
todel.add(fname)
continue
fnames = [os.path.realpath(f) for f in fnames if f not in todel] # convert symlinks to real paths
print >>sys.stderr, "There were %d images (removed %d bad) with maxsize %d x %d" % (len(fnames), len(todel), maxsize[0], maxsize[1])
# now figure out the right size of the output image
if not cols and not rows: # if neither dim is given, use the sqrt
cols = int(sqrt(len(fnames)))
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif cols and not rows: # only cols is given
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif not cols and rows: # only rows is given
cols = len(fnames)//rows + (0 if len(fnames)%rows == 0 else 1)
else: # both are given
pass
if textonly:
cur = 0
rows = list(nkgrouper(cols, fnames))
return rows
if resize:
boxsize = resize
else:
boxsize = maxsize
outsize = tuple([s*n for s, n in zip(boxsize, [cols, rows])])
print >>sys.stderr, "Output will be tiling %d x %d images, with image size %d x %d" % (cols, rows, outsize[0], outsize[1])
out = Image.new(im.mode, outsize)
cur = 0
start = time.time()
for r in range(rows):
for c in range(cols):
print >>sys.stderr, ' At col %d, row %d, cur %d, %0.2f secs elapsed...\r ' % (c, r, cur, time.time()-start),
im = Image.open(fnames[cur]).resize(boxsize, Image.ANTIALIAS)
box = (c*boxsize[0], r*boxsize[1])
out.paste(im, box)
cur += 1
if cur >= len(fnames): break
print >>sys.stderr
return out
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: python %s <outname> <image1> [<image2> ...]" % sys.argv[0]
sys.exit()
maxsize = [0, 0]
outname = sys.argv[1]
fnames = sys.argv[2:]
if not fnames: sys.exit()
outim = tile(fnames)
outim.save(outname)
| [
37811,
26437,
2438,
284,
17763,
4263,
13,
198,
198,
26656,
15385,
739,
262,
513,
12,
565,
682,
347,
10305,
13789,
25,
198,
198,
15269,
357,
66,
8,
2813,
12,
4967,
11,
3169,
263,
1228,
26105,
357,
710,
263,
1228,
74,
44844,
13,
2398,... | 2.546482 | 1,592 |
# Generated by Django 3.0.7 on 2020-06-23 17:02
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1954,
1596,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import re
from cave import manipulate
| [
11748,
302,
198,
6738,
11527,
1330,
18510,
628
] | 4.875 | 8 |
import socket
import time
import struct
from control_algorithm.adaptive_tau import ControlAlgAdaptiveTauClient, ControlAlgAdaptiveTauServer
from data_reader.data_reader import get_data, get_data_train_samples
from models.get_model import get_model
from util.sampling import MinibatchSampling
from util.utils import send_msg, recv_msg
# Configurations are in a separate config.py file
from config import SERVER_ADDR, SERVER_PORT, dataset_file_path
sock = socket.socket()
sock.connect((SERVER_ADDR, SERVER_PORT))
print('---------------------------------------------------------------------------')
batch_size_prev = None
total_data_prev = None
sim_prev = None
try:
while True:
msg = recv_msg(sock, 'MSG_INIT_SERVER_TO_CLIENT')
# ['MSG_INIT_SERVER_TO_CLIENT', model_name, dataset, num_iterations_with_same_minibatch_for_tau_equals_one, step_size, batch_size,
# total_data, use_control_alg, indices_this_node, read_all_data_for_stochastic, use_min_loss, sim]
model_name = msg[1]
dataset = msg[2]
num_iterations_with_same_minibatch_for_tau_equals_one = msg[3]
step_size = msg[4]
batch_size = msg[5]
total_data = msg[6]
control_alg_server_instance = msg[7]
indices_this_node = msg[8]
read_all_data_for_stochastic = msg[9]
use_min_loss = msg[10]
sim = msg[11]
# model = get_model(model_name)
# model2 = get_model(model_name) # Used for computing loss_w_prev_min_loss for stochastic gradient descent,
# # so that the state of model can be still used by control algorithm later.
# if hasattr(model, 'create_graph'):
# model.create_graph(learning_rate=step_size)
# if hasattr(model2, 'create_graph'):
# model2.create_graph(learning_rate=step_size)
# TODO
model = get_model(model_name)
model2 = get_model(model_name)
# Assume the dataset does not change
if read_all_data_for_stochastic or batch_size >= total_data:
if batch_size_prev != batch_size or total_data_prev != total_data or (batch_size >= total_data and sim_prev != sim):
print('Reading all data samples used in training...')
train_image, train_label, _, _, _ = get_data(dataset, total_data, dataset_file_path, sim_round=sim)
batch_size_prev = batch_size
total_data_prev = total_data
sim_prev = sim
if batch_size >= total_data:
sampler = None
train_indices = indices_this_node
else:
sampler = MinibatchSampling(indices_this_node, batch_size, sim)
train_indices = None # To be defined later
last_batch_read_count = None
data_size_local = len(indices_this_node)
if isinstance(control_alg_server_instance, ControlAlgAdaptiveTauServer):
control_alg = ControlAlgAdaptiveTauClient()
else:
control_alg = None
w_prev_min_loss = None
w_last_global = None
total_iterations = 0
msg = ['MSG_DATA_PREP_FINISHED_CLIENT_TO_SERVER']
send_msg(sock, msg)
while True:
print('---------------------------------------------------------------------------')
msg = recv_msg(sock, 'MSG_WEIGHT_TAU_SERVER_TO_CLIENT')
# ['MSG_WEIGHT_TAU_SERVER_TO_CLIENT', w_global, tau, is_last_round, prev_loss_is_min]
w = msg[1]
tau_config = msg[2]
is_last_round = msg[3]
prev_loss_is_min = msg[4]
if prev_loss_is_min or ((w_prev_min_loss is None) and (w_last_global is not None)):
w_prev_min_loss = w_last_global
if control_alg is not None:
control_alg.init_new_round(w)
time_local_start = time.time() #Only count this part as time for local iteration because the remaining part does not increase with tau
# Perform local iteration
grad = None
loss_last_global = None # Only the loss at starting time is from global model parameter
loss_w_prev_min_loss = None
tau_actual = 0
for i in range(0, tau_config):
# When batch size is smaller than total data, read the data here; else read data during client init above
if batch_size < total_data:
# When using the control algorithm, we want to make sure that the batch in the last local iteration
# in the previous round and the first iteration in the current round is the same,
# because the local and global parameters are used to
# estimate parameters used for the adaptive tau control algorithm.
# Therefore, we only change the data in minibatch when (i != 0) or (sample_indices is None).
# The last condition with tau <= 1 is to make sure that the batch will change when tau = 1,
# this may add noise in the parameter estimation for the control algorithm,
# and the amount of noise would be related to NUM_ITERATIONS_WITH_SAME_MINIBATCH.
if (not isinstance(control_alg, ControlAlgAdaptiveTauClient)) or (i != 0) or (train_indices is None) \
or (tau_config <= 1 and
(last_batch_read_count is None or
last_batch_read_count >= num_iterations_with_same_minibatch_for_tau_equals_one)):
sample_indices = sampler.get_next_batch()
if read_all_data_for_stochastic:
train_indices = sample_indices
else:
train_image, train_label = get_data_train_samples(dataset, sample_indices, dataset_file_path)
train_indices = range(0, min(batch_size, len(train_label)))
last_batch_read_count = 0
last_batch_read_count += 1
grad = model.gradient(train_image, train_label, w, train_indices)
if i == 0:
try:
# Note: This has to follow the gradient computation line above
loss_last_global = model.loss_from_prev_gradient_computation()
print('*** Loss computed from previous gradient computation')
except:
# Will get an exception if the model does not support computing loss
# from previous gradient computation
loss_last_global = model.loss(train_image, train_label, w, train_indices)
print('*** Loss computed from data')
w_last_global = w
if use_min_loss:
if (batch_size < total_data) and (w_prev_min_loss is not None):
# Compute loss on w_prev_min_loss so that the batch remains the same
loss_w_prev_min_loss = model2.loss(train_image, train_label, w_prev_min_loss, train_indices)
# w = w - step_size * grad
# update w
w = model.update_w(train_image, train_label, w, step_size, train_indices)
tau_actual += 1
total_iterations += 1
if control_alg is not None:
is_last_local = control_alg.update_after_each_local(i, w, grad, total_iterations)
if is_last_local:
break
# Local operation finished, global aggregation starts
time_local_end = time.time()
time_all_local = time_local_end - time_local_start
print('time_all_local =', time_all_local)
if control_alg is not None:
control_alg.update_after_all_local(model, train_image, train_label, train_indices,
w, w_last_global, loss_last_global)
msg = ['MSG_WEIGHT_TIME_SIZE_CLIENT_TO_SERVER', w, time_all_local, tau_actual, data_size_local,
loss_last_global, loss_w_prev_min_loss]
send_msg(sock, msg)
if control_alg is not None:
control_alg.send_to_server(sock)
if is_last_round:
break
except (struct.error, socket.error):
print('Server has stopped')
pass
| [
11748,
17802,
198,
11748,
640,
198,
11748,
2878,
198,
198,
6738,
1630,
62,
282,
42289,
13,
42552,
425,
62,
83,
559,
1330,
6779,
2348,
70,
48003,
425,
51,
559,
11792,
11,
6779,
2348,
70,
48003,
425,
51,
559,
10697,
198,
6738,
1366,
6... | 2.113463 | 4,063 |
import numpy as np
import cv2
import logging
import os
from os.path import exists
import imageio
import torch
import matplotlib.pyplot as plt
if __name__ == '__main__':
train_lst = 'data/VOCdevkit/VOC2012/ImageSets/Segmentation/train_cls.txt'
root_folder = 'data/VOCdevkit/VOC2012'
im_lst, label_lst = load_dataset(train_lst)
atten_path = 'feat_1'
save_path = 'orig_1'
if not exists(save_path):
os.mkdir(save_path)
for i in range(len(im_lst)):
im_name = '{}/JPEGImages/{}.jpg'.format(root_folder, im_lst[i])
im_labels = label_lst[i]
img = cv2.imread(im_name)
height, width = img.shape[:2]
for label in im_labels:
att_name = '{}/{}_{}.png'.format(atten_path, i, label)
if not exists(att_name):
continue
att = cv2.imread(att_name, 0)
att = cv2.resize(att, (width, height))
att = np.array(att, dtype = np.float32)
save_name = '{}/{}_{}.png'.format(save_path, im_lst[i], label)
cv2.imwrite(save_name,att)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
18931,
198,
11748,
28686,
220,
198,
6738,
28686,
13,
6978,
1330,
7160,
220,
198,
11748,
2939,
952,
198,
11748,
28034,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
35... | 1.939024 | 574 |
import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N = ni()
ans = 0
for i in range(1, N + 1):
a = base_10_to_n(i, 8)
if '7' in str(a) or '7' in str(i):
continue
ans += 1
print(ans) | [
11748,
25064,
198,
11748,
10688,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
12429,
718,
8,
198,
19282,
259,
796,
25064,
13,
19282,
259,
198,
198,
1268,
37,
796,
12178,
10... | 2.364706 | 170 |
#!/usr/bin/env python
# coding=utf-8
'''
@描述:
@版本: V1_0
@作者: LiWanglin
@创建时间: Do not edit
@最后编辑人: LiWanglin
@最后编辑时间: Do not Edit
'''
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from PyQt5.QtCore import QTimer, pyqtSlot, QEvent
from PyQt5.QtGui import QPixmap, QImage, QPainter
from PyQt5.QtChart import QChartView, QChart, QLineSeries, QValueAxis
from ui.gui_main_interface import Ui_main_interface
import cv2
from get_data.get_data import GetFacialData
import numpy as np
from result_process import result_process
import re
class MainInterface(QMainWindow):
'''实现 GUI 以及其连接整个程序的功能
@属性说明:
@方法说明:
@注意:
'''
def _connect_slot(self):
'''初始化信号与槽的连接
@参数说明:
无
@返回值:
无
@注意:
无
'''
self.ui.btn_start.clicked.connect(self._display)
self.time.timeout.connect(self.test)
def paintEvent(self, evevt):
"""绘图软件背景图
Qt里面默认事件处理函数,在界面需要重新绘制时触发
此方法主要是绘制软件的背景图
@参数说明:
evevt:Qt默认事件
@返回值:
无
@注意:
无
"""
temp_painter = QPainter(self)
# 创建背景图QPixmap()对象
soft_background_image = QPixmap("resources/groud.jpg")
# 绘制背景图
temp_painter.drawPixmap(0, 0, self.width(), self.height(), soft_background_image)
# 执行父类的paintEvent()函数,以便父类执行其内建的一些操作
super().paintEvent(evevt)
def eye_data_chart_init(self):
'''初始化左右眼闭合程度曲线图表
@参数说明:
无
@返回值:
无
@注意:
无
'''
# 创建 chart 和 chartview
self._chart = QChart()
self._chart.setTitle("眼睛闭合程度值") # 设置图表标题
self._chartView = QChartView(self)
self._chartView.setChart(self._chart) # chart 添加到 chartview
# 完成布局
self.ui.horizontalLayout_2.addWidget(self._chartView)
## 创建曲线系列
self._series0 = QLineSeries()
self._series1 = QLineSeries()
self._series0.setName("左眼曲线") # 设置曲线名
self._series1.setName("右眼曲线")
self._chart.addSeries(self._series0) # 序列添加到图表
self._chart.addSeries(self._series1)
# 创建坐标轴
self._axis_x = QValueAxis() # x 轴
self._axis_x.setRange(0, 60) # 设置 x 轴坐标范围
self._axis_x.setTitleText("time(secs)") # x 轴标题
self._axis_y = QValueAxis() # y 轴
self._axis_y.setRange(0, 0.5) # 设置 y 轴坐标范围
self._axis_y.setTitleText("value") # y 轴标题
# 为序列设置坐标轴
self._chart.setAxisX(self._axis_x, self._series0)
self._chart.setAxisY(self._axis_y, self._series0)
self._chart.setAxisX(self._axis_x, self._series1)
self._chart.setAxisY(self._axis_y, self._series1)
@pyqtSlot(bool)
def _display(self, checked):
'''
@参数说明:
@返回值:
@注意:
'''
if(checked==False): # 当按键为 False 时,停止检查
self.ui.le_eye_threshold.setEnabled(True)
self.ui.btn_start.setText("启动")
self.ui.lb_fatigue_detection.setText("检测停止")
self.time.stop()
else:
threshold_str = self.ui.le_eye_threshold.text() # 获得睁闭眼阈值
threshold_str = re.match(r"\d\.\d\d$", threshold_str) # 对睁闭眼阈值做限定
if(threshold_str==None):
message_title = "阈值格式错误"
message_text = "请输入正确的阈值格式,格式为 x.xx (x 为数字)"
QMessageBox.critical(self, message_title, message_text)
else:
self.ui.btn_start.setText("停止")
self.ui.le_eye_threshold.setEnabled(False)
model_path = r"E:\make_data\facial" # 人脸关键模型路径
opencv_facial_path = r"E:\Fatigue_Detection\model_data\facial_model\haarcascade_frontalface_default.xml" # 人脸检测模型路径
cap_index = eval(self.ui.cmb_cap_index.currentText()) # 从 combabox 获取设想头索引
# cap_file = r"C:\Users\LWL\Pictures\Camera Roll\WIN_20200503_07_51_19_Pro.mp4"
self.facial_data = GetFacialData(tf_model=model_path, facial_model_file=opencv_facial_path, cap_index=cap_index) # 创建 GetFacialData 类
self.time.start(1000) # 设置每次检测的间隔时间
self.ui.lb_fatigue_detection.setText("检测中")
self._series0.clear()
self._series1.clear()
self._t=0 # 重新开始计数
self._perclos_threshold = eval(threshold_str.group())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
7061,
6,
198,
31,
162,
237,
237,
32573,
108,
25,
220,
198,
31,
48304,
17312,
105,
25,
569,
16,
62,
15,
198,
31,
43291,
38519,
25,
7455,
54,
648,... | 1.438283 | 3,192 |
#! /usr/env/python
"""
Python implementation of VoronoiDelaunayGrid, a class used to create and manage
unstructured, irregular grids for 2D numerical models.
Getting Information about a Grid
--------------------------------
The following attributes, properties, and methods provide data about the grid,
its geometry, and the connectivity among the various elements. Each grid
element has an ID number, which is also its position in an array that
contains information about that type of element. For example, the *x*
coordinate of node 5 would be found at `grid.node_x[5]`.
The naming of grid-element arrays is *attribute*`_at_`*element*, where
*attribute* is the name of the data in question, and *element* is the element
to which the attribute applies. For example, the property `node_at_cell`
contains the ID of the node associated with each cell. For example,
`node_at_cell[3]` contains the *node ID* of the node associated with cell 3.
The *attribute* is singular if there is only one value per element; for
example, there is only one node associated with each cell. It is plural when
there are multiple values per element; for example, the `faces_at_cell` array
contains multiple faces for each cell. Exceptions to these general rules are
functions that return indices of a subset of all elements of a particular type.
For example, you can obtain an array with IDs of only the core nodes using
`core_nodes`, while `active_links` provides an array of IDs of active links
(only). Finally, attributes that represent a measurement of something, such as
the length of a link or the surface area of a cell, are described using `_of_`,
as in the example `area_of_cell`.
Information about the grid as a whole
+++++++++++++++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.axis_name
~landlab.grid.voronoi.VoronoiDelaunayGrid.axis_units
~landlab.grid.voronoi.VoronoiDelaunayGrid.move_origin
~landlab.grid.voronoi.VoronoiDelaunayGrid.ndim
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_axis_coordinates
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_elements
~landlab.grid.voronoi.VoronoiDelaunayGrid.save
~landlab.grid.voronoi.VoronoiDelaunayGrid.size
Information about nodes
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.all_node_azimuths_map
~landlab.grid.voronoi.VoronoiDelaunayGrid.all_node_distances_map
~landlab.grid.voronoi.VoronoiDelaunayGrid.boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_distances_of_nodes_to_point
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_area_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.closed_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.downwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_gradient_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_value_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_downwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_upwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_tail
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_axis_coordinates
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_is_boundary
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_x
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_y
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.open_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_closed
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_sum_xcomponent_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_sum_ycomponent_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.upwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_node
Information about links
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.angle_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.angle_of_link_about_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.downwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.face_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.length_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_downwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_upwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_tail
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.resolve_values_on_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.resolve_values_on_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_xcomponent_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_ycomponent_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.upwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_link
Information about cells
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.area_of_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_area_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_cell
Information about faces
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.face_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.width_of_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_face
Information about patches
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_node
Information about corners
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_corners
Data Fields in ModelGrid
------------------------
:class:`~.ModelGrid` inherits from the :class:`~.ModelDataFields` class. This
provides `~.ModelGrid`, and its subclasses, with the ability to, optionally,
store data values that are associated with the different types grid elements
(nodes, cells, etc.). In particular, as part of ``ModelGrid.__init__()``,
data field *groups* are added to the `ModelGrid` that provide containers to
put data fields into. There is one group for each of the eight grid elements
(node, cell, link, face, core_node, core_cell, active_link, and active_face).
To access these groups, use the same methods as accessing groups with
`~.ModelDataFields`. ``ModelGrid.__init__()`` adds the following attributes to
itself that provide access to the values groups:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_corner
Each of these attributes returns a ``dict``-like object whose keys are value
names as strings and values are numpy arrays that gives quantities at
grid elements.
Create Field Arrays
+++++++++++++++++++
:class:`~.ModelGrid` inherits several useful methods for creating new data
fields and adding new data fields to a ModelGrid instance. Methods to add or
create a new data array follow the ``numpy`` syntax for creating arrays. The
folowing methods create and, optionally, initialize new arrays. These arrays
are of the correct size but a new field will not be added to the field:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.empty
~landlab.field.grouped.ModelDataFields.ones
~landlab.field.grouped.ModelDataFields.zeros
Add Fields to a ModelGrid
+++++++++++++++++++++++++
Unlike with the equivalent numpy functions, these do not take a size argument
as the size of the returned arrays is determined from the size of the
ModelGrid. However, the keyword arguments are the same as those of the numpy
equivalents.
The following methods will create a new array and add a reference to that
array to the ModelGrid:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_empty
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_ones
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_zeros
~landlab.grid.voronoi.VoronoiDelaunayGrid.delete_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_units
These methods operate in the same way as the previous set except that, in
addition to creating a new array, the newly-created array is added to the
ModelGrid. The calling signature is the same but with the addition of an
argument that gives the name of the new field as a string. The additional
method, :meth:`~.ModelDataFields.add_field`, adds a previously allocation
array to the ModelGrid. If the array is of the incorrect size it will raise
``ValueError``.
Query Fields
++++++++++++
Use the following methods/attributes get information about the stored data
fields:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.size
~landlab.field.grouped.ModelDataFields.keys
~landlab.field.grouped.ModelDataFields.has_group
~landlab.field.grouped.ModelDataFields.has_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.field_units
~landlab.grid.voronoi.VoronoiDelaunayGrid.field_values
~landlab.field.grouped.ModelDataFields.groups
i.e., call, e.g. mg.has_field('node', 'my_field_name')
# START HERE check that all functions listed below are included above,
# ignore ones that start with underscores(_)
Gradients, fluxes, and divergences on the grid
----------------------------------------------
Landlab is designed to easily calculate gradients in quantities across the
grid, and to construct fluxes and flux divergences from them. Because these
calculations tend to be a little more involved than property lookups, the
methods tend to start with `calc_`.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_diff_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_flux_div_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_grad_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_grad_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_net_flux_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_unit_normal_at_patch
Mappers
-------
These methods allow mapping of values defined on one grid element type onto a
second, e.g., mapping upwind node values onto links, or mean link values onto
nodes.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_downwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_downwind_node_link_mean_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_head_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_tail_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_vector_sum_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_vector_to_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_node_links_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_mean_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_mean_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_node_links_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_node_to_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_upwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_upwind_node_link_mean_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_downwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_max_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_min_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_upwind_node_link_max_to_node
Boundary condition control
--------------------------
These are the primary properties for getting and setting the grid boundary
conditions. Changes made to :meth:`~.ModelGrid.status_at_node` and
:meth:`~.ModelGrid.status_at_node` will automatically update the conditions
defined at other grid elements automatically.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.closed_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_gradient_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_value_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_is_boundary
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.open_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_closed
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_node
Identifying node subsets
------------------------
These methods are useful in identifying subsets of nodes, e.g., closest node
to a point; nodes at edges.
(None are available for this grid type)
Surface analysis
----------------
These methods permit the kinds of surface analysis that you might expect to
find in GIS software.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_aspect_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_hillshade_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_node
Notes
-----
It is important that when creating a new grid class that inherits from
``ModelGrid``, to call ``ModelGrid.__init__()`` in the new grid's
``__init__()``. For example, the new class's __init__ should contain the
following code,
.. code-block:: python
class NewGrid(ModelGrid):
def __init__(self, *args, **kwds):
ModelGrid.__init__(self, **kwds)
# Code that initializes the NewGrid
Without this, the new grid class will not have the ``at_*`` attributes.
"""
import numpy as np
from six.moves import range
from landlab.grid.base import (ModelGrid, CORE_NODE, BAD_INDEX_VALUE,
INACTIVE_LINK)
from landlab.core.utils import (as_id_array, sort_points_by_x_then_y,
argsort_points_by_x_then_y,
anticlockwise_argsort_points)
from .decorators import return_readonly_id_array
from scipy.spatial import Voronoi
def simple_poly_area(x, y):
"""Calculates and returns the area of a 2-D simple polygon.
Input vertices must be in sequence (clockwise or counterclockwise). *x*
and *y* are arrays that give the x- and y-axis coordinates of the
polygon's vertices.
Parameters
----------
x : ndarray
x-coordinates of of polygon vertices.
y : ndarray
y-coordinates of of polygon vertices.
Returns
-------
out : float
Area of the polygon
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import simple_poly_area
>>> x = np.array([3., 1., 1., 3.])
>>> y = np.array([1.5, 1.5, 0.5, 0.5])
>>> simple_poly_area(x, y)
2.0
If the input coordinate arrays are 2D, calculate the area of each polygon.
Note that when used in this mode, all polygons must have the same
number of vertices, and polygon vertices are listed column-by-column.
>>> x = np.array([[ 3., 1., 1., 3.],
... [-2., -2., -1., -1.]]).T
>>> y = np.array([[1.5, 1.5, 0.5, 0.5],
... [ 0., 1., 2., 0.]]).T
>>> simple_poly_area(x, y)
array([ 2. , 1.5])
"""
# For short arrays (less than about 100 elements) it seems that the
# Python sum is faster than the numpy sum. Likewise for the Python
# built-in abs.
return .5 * abs(sum(x[:-1] * y[1:] - x[1:] * y[:-1]) +
x[-1] * y[0] - x[0] * y[-1])
def calculate_link_lengths(pts, link_from, link_to):
"""Calculates and returns length of links between nodes.
Parameters
----------
pts : Nx2 numpy array containing (x,y) values
link_from : 1D numpy array containing index numbers of nodes at starting
point ("from") of links
link_to : 1D numpy array containing index numbers of nodes at ending point
("to") of links
Returns
-------
out : ndarray
1D numpy array containing horizontal length of each link
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import calculate_link_lengths
>>> pts = np.array([[0.,0.],[3.,0.],[3.,4.]]) # 3:4:5 triangle
>>> lfrom = np.array([0,1,2])
>>> lto = np.array([1,2,0])
>>> calculate_link_lengths(pts, lfrom, lto)
array([ 3., 4., 5.])
"""
dx = pts[link_to, 0] - pts[link_from, 0]
dy = pts[link_to, 1] - pts[link_from, 1]
link_length = np.sqrt(dx * dx + dy * dy)
return link_length
class VoronoiDelaunayGrid(ModelGrid):
"""
This inherited class implements an unstructured grid in which cells are
Voronoi polygons and nodes are connected by a Delaunay triangulation. Uses
scipy.spatial module to build the triangulation.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
>>> import numpy as np
>>> x = [0, 0.1, 0.2, 0.3,
... 1, 1.1, 1.2, 1.3,
... 2, 2.1, 2.2, 2.3,]
>>> y = [0, 1, 2, 3,
... 0, 1, 2, 3,
... 0, 1, 2, 3]
>>> vmg = VoronoiDelaunayGrid(x, y)
>>> vmg.node_x # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 2. ,
0.1, 1.1, 2.1,
0.2, 1.2, 2.2,
0.3, 1.3, 2.3])
>>> vmg.node_y # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0.,
1., 1., 1.,
2., 2., 2.,
3., 3., 3.])
"""
def __init__(self, x=None, y=None, reorient_links=True, **kwds):
"""
Create a Voronoi Delaunay grid from a set of points.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
"""
if (x is not None) and (y is not None):
self._initialize(x, y, reorient_links)
super(VoronoiDelaunayGrid, self).__init__(**kwds)
def _initialize(self, x, y, reorient_links=True):
"""
Creates an unstructured grid around the given (x,y) points.
"""
x = np.asarray(x, dtype=float).reshape((-1, ))
y = np.asarray(y, dtype=float).reshape((-1, ))
if x.size != y.size:
raise ValueError('x and y arrays must have the same size')
# Make a copy of the points in a 2D array (useful for calls to geometry
# routines, but takes extra memory space).
pts = np.zeros((len(x), 2))
pts[:, 0] = x
pts[:, 1] = y
self.pts = sort_points_by_x_then_y(pts)
x = self.pts[:, 0]
y = self.pts[:, 1]
# NODES AND CELLS: Set up information pertaining to nodes and cells:
# - number of nodes
# - node x, y coordinates
# - default boundary status
# - interior and boundary nodes
# - nodes associated with each cell and active cell
# - cells and active cells associated with each node
# (or BAD_VALUE_INDEX if none)
#
# Assumptions we make here:
# - all interior (non-perimeter) nodes have cells (this should be
# guaranteed in a Delaunay triangulation, but there may be
# special cases)
# - all cells are active (later we'll build a mechanism for the user
# specify a subset of cells as active)
#
self._node_x = x
self._node_y = y
[self._node_status, self._core_nodes, self._boundary_nodes] = \
self._find_perimeter_nodes_and_BC_set(pts)
[self._cell_at_node, self._node_at_cell] = \
self._node_to_cell_connectivity(self._node_status,
self.number_of_cells)
active_cell_at_node = self.cell_at_node[self.core_nodes]
# ACTIVE CELLS: Construct Voronoi diagram and calculate surface area of
# each active cell.
vor = Voronoi(self.pts)
self.vor = vor
self._area_of_cell = np.zeros(self.number_of_cells)
for node in self._node_at_cell:
xv = vor.vertices[vor.regions[vor.point_region[node]], 0]
yv = vor.vertices[vor.regions[vor.point_region[node]], 1]
self._area_of_cell[self.cell_at_node[node]] = (
simple_poly_area(xv, yv))
# LINKS: Construct Delaunay triangulation and construct lists of link
# "from" and "to" nodes.
(self._node_at_link_tail,
self._node_at_link_head,
_,
self._face_width) = \
self._create_links_and_faces_from_voronoi_diagram(vor)
self._status_at_link = np.full(len(self._node_at_link_tail),
INACTIVE_LINK, dtype=int)
# Sort them by midpoint coordinates
self._sort_links_by_midpoint()
# Optionally re-orient links so that they all point within upper-right
# semicircle
if reorient_links:
self._reorient_links_upper_right()
# LINKS: Calculate link lengths
self._link_length = calculate_link_lengths(self.pts,
self.node_at_link_tail,
self.node_at_link_head)
# LINKS: inlink and outlink matrices
# SOON TO BE DEPRECATED
self._setup_inlink_and_outlink_matrices()
# ACTIVE LINKS: Create list of active links, as well as "from" and "to"
# nodes of active links.
self._reset_link_status_list()
# NODES & LINKS: IDs and directions of links at each node
self._create_links_and_link_dirs_at_node()
# LINKS: set up link unit vectors and node unit-vector sums
self._create_link_unit_vectors()
# create link x, y:
self._create_link_face_coords()
self._create_neighbors()
@property
def number_of_patches(self):
"""Number of patches.
Returns the number of patches over the grid.
LLCATS: PINF
"""
try:
return self._number_of_patches
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._number_of_patches
@property
def nodes_at_patch(self):
"""Get the four nodes at the corners of each patch in a regular grid.
LLCATS: PINF NINF CONN
"""
try:
return self._nodes_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._nodes_at_patch
@property
@return_readonly_id_array
def patches_at_node(self):
"""
Return a (nnodes, max_voronoi_polygon_sides) array of patches at nodes.
The patches are returned in LL standard order (ccw from E), with any
nonexistent patches recorded after the ids of existing faces.
Nonexistent patches are ID'ed as -1.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 3)
>>> mg.patches_at_node # doctest: +SKIP
array([[ 0, 2, -1, -1, -1, -1],
[ 1, 3, 0, -1, -1, -1],
[ 4, 1, -1, -1, -1, -1],
[ 5, 2, -1, -1, -1, -1],
[ 6, 8, 5, 2, 0, 3],
[ 7, 9, 6, 3, 1, 4],
[ 7, 4, -1, -1, -1, -1],
[ 5, 8, -1, -1, -1, -1],
[ 8, 6, 9, -1, -1, -1],
[ 9, 7, -1, -1, -1, -1]])
LLCATS: NINF PINF CONN
"""
try:
return self._patches_at_node
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_node
@property
@return_readonly_id_array
def links_at_patch(self):
"""Returns the links forming each patch.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.links_at_patch
array([[ 3, 2, 0],
[ 5, 1, 2],
[ 6, 3, 4],
[ 8, 7, 5],
[10, 9, 6],
[11, 8, 9]])
LLCATS: LINF PINF CONN
"""
try:
return self._links_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._links_at_patch
@property
@return_readonly_id_array
def patches_at_link(self):
"""Returns the patches adjoined to each link.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.patches_at_link
array([[ 0, -1],
[ 1, -1],
[ 0, 1],
[ 0, 2],
[ 2, -1],
[ 1, 3],
[ 2, 4],
[ 3, -1],
[ 3, 5],
[ 4, 5],
[ 4, -1],
[ 5, -1]])
LLCATS: PINF LINF CONN
"""
try:
return self._patches_at_link
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_link
def _find_perimeter_nodes_and_BC_set(self, pts):
"""
Uses a convex hull to locate the perimeter nodes of the Voronoi grid,
then sets them as fixed value boundary nodes.
It then sets/updates the various relevant node lists held by the grid,
and returns *node_status*, *core_nodes*, *boundary_nodes*.
"""
# Calculate the convex hull for the set of points
from scipy.spatial import ConvexHull
hull = ConvexHull(pts, qhull_options='Qc') # see below why we use 'Qt'
# The ConvexHull object lists the edges that form the hull. We need to
# get from this list of edges the unique set of nodes. To do this, we
# first flatten the list of vertices that make up all the hull edges
# ("simplices"), so it becomes a 1D array. With that, we can use the
# set() function to turn the array into a set, which removes duplicate
# vertices. Then we turn it back into an array, which now contains the
# set of IDs for the nodes that make up the convex hull.
# The next thing to worry about is the fact that the mesh perimeter
# might contain nodes that are co-planar (that is, co-linear in our 2D
# world). For example, if you make a set of staggered points for a
# hexagonal lattice using make_hex_points(), there will be some
# co-linear points along the perimeter. The ones of these that don't
# form convex corners won't be included in convex_hull_nodes, but they
# are nonetheless part of the perimeter and need to be included in
# the list of boundary_nodes. To deal with this, we pass the 'Qt'
# option to ConvexHull, which makes it generate a list of coplanar
# points. We include these in our set of boundary nodes.
convex_hull_nodes = np.array(list(set(hull.simplices.flatten())))
coplanar_nodes = hull.coplanar[:, 0]
boundary_nodes = as_id_array(np.concatenate(
(convex_hull_nodes, coplanar_nodes)))
# Now we'll create the "node_status" array, which contains the code
# indicating whether the node is interior and active (=0) or a
# boundary (=1). This means that all perimeter (convex hull) nodes are
# initially flagged as boundary code 1. An application might wish to
# change this so that, for example, some boundaries are inactive.
node_status = np.zeros(len(pts[:, 0]), dtype=np.int8)
node_status[boundary_nodes] = 1
# It's also useful to have a list of interior nodes
core_nodes = as_id_array(np.where(node_status == 0)[0])
# save the arrays and update the properties
self._node_status = node_status
self._core_cells = np.arange(len(core_nodes), dtype=np.int)
self._node_at_cell = core_nodes
self._boundary_nodes = boundary_nodes
# Return the results
return node_status, core_nodes, boundary_nodes
def _create_cell_areas_array(self):
"""Set up an array of cell areas."""
self._cell_areas = self.active_cell_areas
return self._cell_areas
@staticmethod
def _node_to_cell_connectivity(node_status, ncells):
"""Set up node connectivity.
Creates and returns the following arrays:
* For each node, the ID of the corresponding cell, or
BAD_INDEX_VALUE if the node has no cell.
* For each cell, the ID of the corresponding node.
Parameters
----------
node_status : ndarray of ints
1D array containing the boundary status code for each node.
ncells : ndarray of ints
Number of cells (must equal the number of occurrences of CORE_NODE
in node_status).
Examples
--------
>>> from landlab import VoronoiDelaunayGrid as vdg
>>> import numpy as np
>>> from landlab.grid import BAD_INDEX_VALUE
>>> ns = np.array([1, 0, 0, 1, 0]) # 3 interior, 2 boundary nodes
>>> [node_cell, cell_node] = vdg._node_to_cell_connectivity(ns, 3)
>>> node_cell[1:3]
array([0, 1])
>>> node_cell[0] == BAD_INDEX_VALUE
True
>>> cell_node
array([1, 2, 4])
"""
assert ncells == np.count_nonzero(node_status == CORE_NODE), \
'ncells must equal number of CORE_NODE values in node_status'
cell = 0
node_cell = np.ones(len(node_status), dtype=int) * BAD_INDEX_VALUE
cell_node = np.zeros(ncells, dtype=int)
for node in range(len(node_cell)):
if node_status[node] == CORE_NODE:
node_cell[node] = cell
cell_node[cell] = node
cell += 1
return node_cell, cell_node
@staticmethod
def _create_links_from_triangulation(tri):
"""Create links from a Delaunay triangulation.
From a Delaunay Triangulation of a set of points, contained in a
scipy.spatial.Delaunay object "tri", creates and returns:
* a numpy array containing the ID of the "from" node for each link
* a numpy array containing the ID of the "to" node for each link
* the number of links in the triangulation
Examples
--------
>>> from scipy.spatial import Delaunay
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[ 0., 0.], [ 1., 0.], [ 1., 0.87],
... [-0.5, 0.87], [ 0.5, 0.87], [ 0., 1.73],
... [ 1., 1.73]])
>>> dt = Delaunay(pts)
>>> [myfrom,myto,nl] = vdg._create_links_from_triangulation(dt)
>>> print myfrom, myto, nl # doctest: +SKIP
[5 3 4 6 4 3 0 4 1 1 2 6] [3 4 5 5 6 0 4 1 0 2 4 2] 12
"""
# Calculate how many links there will be and create the arrays.
#
# The number of links equals 3 times the number of triangles minus
# half the number of shared links. Finding out the number of shared
# links is easy: for every shared link, there is an entry in the
# tri.neighbors array that is > -1 (indicating that the triangle has a
# neighbor opposite a given vertex; in other words, two triangles are
# sharing an edge).
num_shared_links = np.count_nonzero(tri.neighbors > -1)
num_links = 3 * tri.nsimplex - num_shared_links // 2
link_fromnode = np.zeros(num_links, dtype=int)
link_tonode = np.zeros(num_links, dtype=int)
# Sweep through the list of triangles, assigning "from" and "to" nodes
# to the list of links.
#
# The basic algorithm works as follows. For each triangle, we will add
# its 3 edges as links. However, we have to make sure that each shared
# edge is added only once. To do this, we keep track of whether or not
# each triangle has been processed yet using a boolean array called
# "tridone". When we look at a given triangle, we check each vertex in
# turn. If there is no neighboring triangle opposite that vertex, then
# we need to add the corresponding edge. If there is a neighboring
# triangle but we haven't processed it yet, we also need to add the
# edge. If neither condition is true, then this edge has already been
# added, so we skip it.
link_id = 0
tridone = np.zeros(tri.nsimplex, dtype=bool)
for t in range(tri.nsimplex): # loop over triangles
for i in range(0, 3): # loop over vertices & neighbors
if tri.neighbors[t, i] == -1 or not tridone[
tri.neighbors[t, i]]:
link_fromnode[link_id] = tri.simplices[
t, np.mod(i + 1, 3)]
link_tonode[link_id] = tri.simplices[
t, np.mod(i + 2, 3)]
link_id += 1
tridone[t] = True
# save the results
# self.node_at_link_tail = link_fromnode
# self.node_at_link_head = link_tonode
# Return the results
return link_fromnode, link_tonode, num_links
@staticmethod
@staticmethod
def _create_links_and_faces_from_voronoi_diagram(vor):
"""
From a Voronoi diagram object created by scipy.spatial.Voronoi(),
builds and returns:
1. Arrays of link tail and head nodes
2. Array of link IDs for each active link
3. Array containing with of each face
Parameters
----------
vor : scipy.spatial.Voronoi
Voronoi object initialized with the grid nodes.
Returns
-------
out : tuple of ndarrays
- link_fromnode = "from" node for each link (len=num_links)
- link_tonode = "to" node for each link (len=num_links)
- active_links = link ID for each active link
(len=num_active_links)
- face_width = width of each face (len=num_active_links
Examples
--------
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[0., 0.], [1., 0.], [-0.5, 0.87], [0.5, 0.87],
... [1.5, 0.87], [0., 1.73], [1., 1.73]])
>>> from scipy.spatial import Voronoi
>>> vor = Voronoi(pts)
>>> [tn,hn,al,fw] = vdg._create_links_and_faces_from_voronoi_diagram(
... vor)
>>> tn
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 6, 6, 6])
>>> hn
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 3, 4, 5])
>>> al
array([2, 3, 5, 6, 8, 9])
>>> fw
array([ 0.57669199, 0.57669199, 0.575973 , 0.575973 , 0.57836419,
0.57836419])
"""
# Each Voronoi "ridge" corresponds to a link. The Voronoi object has an
# attribute ridge_points that contains the IDs of the nodes on either
# side (including ridges that have one of their endpoints undefined).
# So, we set the number of links equal to the number of ridges.
num_links = len(vor.ridge_points)
# Create the arrays for link from and to nodes
link_fromnode = -np.ones(num_links, dtype=int)
link_tonode = -np.ones(num_links, dtype=int)
# Ridges along the perimeter of the grid will have one of their
# endpoints undefined. The endpoints of each ridge are contained in
# vor.ridge_vertices, and an undefined vertex is flagged with -1.
# Ridges with both vertices defined correspond to faces and active
# links, while ridges with an undefined vertex correspond to inactive
# links. So, to find the number of active links, we subtract from the
# total number of links the number of occurrences of an undefined
# vertex.
num_active_links = num_links \
- np.count_nonzero(np.array(vor.ridge_vertices) == -1)
# Create arrays for active links and width of faces (which are Voronoi
# ridges).
active_links = -np.ones(num_active_links, dtype=int)
face_width = -np.ones(num_active_links)
# Find the order to sort by link midpoints
link_midpoints = np.zeros((num_links, 2))
for i in range(num_links):
link_midpoints[i][:] = (vor.points[vor.ridge_points[i, 0]] +
vor.points[vor.ridge_points[i, 1]])/2.
ind = argsort_points_by_x_then_y(link_midpoints)
# Loop through the list of ridges. For each ridge, there is a link, and
# its "from" and "to" nodes are the associated "points". In addition,
# if the ridge endpoints are defined, we have a face and an active
# link, so we add them to our arrays as well.
j = 0
for i in range(num_links):
link_fromnode[i] = vor.ridge_points[ind[i], 0]
link_tonode[i] = vor.ridge_points[ind[i], 1]
face_corner1 = vor.ridge_vertices[ind[i]][0]
face_corner2 = vor.ridge_vertices[ind[i]][1]
# means it's a valid face
if VoronoiDelaunayGrid._is_valid_voronoi_ridge(vor, ind[i]):
dx = vor.vertices[face_corner2, 0] - \
vor.vertices[face_corner1, 0]
dy = vor.vertices[face_corner2, 1] - \
vor.vertices[face_corner1, 1]
face_width[j] = np.sqrt(dx * dx + dy * dy)
active_links[j] = i
j += 1
return link_fromnode, link_tonode, active_links, face_width
def _reorient_links_upper_right(self):
"""Reorient links to all point within the upper-right semi-circle.
Notes
-----
"Upper right semi-circle" means that the angle of the link with respect
to the vertical (measured clockwise) falls between -45 and +135. More
precisely, if :math:`\theta' is the angle,
:math:`-45 \ge \theta < 135`.
For example, the link could point up and left as much as -45, but not
-46. It could point down and right as much as 134.9999, but not 135. It
will never point down and left, or up-but-mostly-left, or
right-but-mostly-down.
Examples
--------
>>> from landlab.grid import HexModelGrid
>>> hg = HexModelGrid(3, 2, 1., reorient_links=True)
>>> hg.node_at_link_tail
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 3, 4, 5])
>>> hg.node_at_link_head
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 6, 6, 6])
"""
# Calculate the horizontal (dx) and vertical (dy) link offsets
link_dx = self.node_x[self.node_at_link_head] - \
self.node_x[self.node_at_link_tail]
link_dy = self.node_y[self.node_at_link_head] - \
self.node_y[self.node_at_link_tail]
# Calculate the angle, clockwise, with respect to vertical, then rotate
# by 45 degrees counter-clockwise (by adding pi/4)
link_angle = np.arctan2(link_dx, link_dy) + np.pi / 4
# The range of values should be -180 to +180 degrees (but in radians).
# It won't be after the above operation, because angles that were
# > 135 degrees will now have values > 180. To correct this, we
# subtract 360 (i.e., 2 pi radians) from those that are > 180 (i.e.,
# > pi radians).
link_angle -= 2 * np.pi * (link_angle >= np.pi)
# Find locations where the angle is negative; these are the ones we
# want to flip
(flip_locs, ) = np.where(link_angle < 0.)
# If there are any flip locations, proceed to switch their fromnodes
# and tonodes; otherwise, we're done
if len(flip_locs) > 0:
# Temporarily story the fromnode for these
fromnode_temp = self.node_at_link_tail[flip_locs]
# The fromnodes now become the tonodes, and vice versa
self._node_at_link_tail[
flip_locs] = self.node_at_link_head[flip_locs]
self._node_at_link_head[flip_locs] = fromnode_temp
def _create_patches_from_delaunay_diagram(self, pts, vor):
"""
Uses a delaunay diagram drawn from the provided points to
generate an array of patches and patch-node-link connectivity.
Returns ...
DEJH, 10/3/14, modified May 16.
"""
from scipy.spatial import Delaunay
from landlab.core.utils import anticlockwise_argsort_points_multiline
from .cfuncs import find_rows_containing_ID, \
create_patches_at_element, create_links_at_patch
tri = Delaunay(pts)
assert np.array_equal(tri.points, vor.points)
nodata = -1
self._nodes_at_patch = as_id_array(tri.simplices)
# self._nodes_at_patch = np.empty_like(_nodes_at_patch)
self._number_of_patches = tri.simplices.shape[0]
# get the patches in order:
patches_xy = np.empty((self._number_of_patches, 2), dtype=float)
patches_xy[:, 0] = np.mean(self.node_x[self._nodes_at_patch],
axis=1)
patches_xy[:, 1] = np.mean(self.node_y[self._nodes_at_patch],
axis=1)
orderforsort = argsort_points_by_x_then_y(patches_xy)
self._nodes_at_patch = self._nodes_at_patch[orderforsort, :]
patches_xy = patches_xy[orderforsort, :]
# get the nodes around the patch in order:
nodes_xy = np.empty((3, 2), dtype=float)
# perform a CCW sort without a line-by-line loop:
patch_nodes_x = self.node_x[self._nodes_at_patch]
patch_nodes_y = self.node_y[self._nodes_at_patch]
anticlockwise_argsort_points_multiline(patch_nodes_x, patch_nodes_y,
out=self._nodes_at_patch)
# need to build a squared off, masked array of the patches_at_node
# the max number of patches for a node in the grid is the max sides of
# the side-iest voronoi region.
max_dimension = len(max(vor.regions, key=len))
self._patches_at_node = np.full(
(self.number_of_nodes, max_dimension), nodata, dtype=int)
self._nodes_at_patch = as_id_array(self._nodes_at_patch)
self._patches_at_node = as_id_array(self._patches_at_node)
create_patches_at_element(self._nodes_at_patch,
self.number_of_nodes,
self._patches_at_node)
# build the patch-link connectivity:
self._links_at_patch = np.empty((self._number_of_patches, 3),
dtype=int)
create_links_at_patch(self._nodes_at_patch, self._links_at_node,
self._number_of_patches, self._links_at_patch)
patch_links_x = self.x_of_link[self._links_at_patch]
patch_links_y = self.y_of_link[self._links_at_patch]
anticlockwise_argsort_points_multiline(patch_links_x, patch_links_y,
out=self._links_at_patch)
self._patches_at_link = np.empty((self.number_of_links, 2),
dtype=int)
self._patches_at_link.fill(-1)
create_patches_at_element(self._links_at_patch, self.number_of_links,
self._patches_at_link)
# a sort of the links will be performed here once we have corners
self._patches_created = True
def _create_neighbors(self):
"""Create the _neighbors_at_node property.
"""
self._neighbors_at_node = self.links_at_node.copy()
nodes_at_link = np.empty((self.number_of_links, 2))
nodes_at_link[:, 0] = self.node_at_link_tail
nodes_at_link[:, 1] = self.node_at_link_head
both_nodes = nodes_at_link[self.links_at_node]
nodes = np.arange(self.number_of_nodes, dtype=int)
# ^we have to do this, as for a hex it's possible that mg.nodes is
# returned not just in ID order.
for i in range(both_nodes.shape[1]):
centernottail = np.not_equal(both_nodes[:, i, 0], nodes)
centernothead = np.not_equal(both_nodes[:, i, 1], nodes)
self._neighbors_at_node[centernottail, i] = both_nodes[
centernottail, i, 0]
self._neighbors_at_node[centernothead, i] = both_nodes[
centernothead, i, 1]
# restamp the missing links:
self._neighbors_at_node[
self.links_at_node == BAD_INDEX_VALUE] = BAD_INDEX_VALUE
def save(self, path, clobber=False):
"""Save a grid and fields.
This method uses cPickle to save a Voronoi grid as a cPickle file.
At the time of coding, this is the only convenient output format
for Voronoi grids, but support for netCDF is likely coming.
All fields will be saved, along with the grid.
The recommended suffix for the save file is '.grid'. This will
be added to your save if you don't include it.
This method is equivalent to
:py:func:`~landlab.io.native_landlab.save_grid`, and
:py:func:`~landlab.io.native_landlab.load_grid` can be used to
load these files.
Caution: Pickling can be slow, and can produce very large files.
Caution 2: Future updates to Landlab could potentially render old
saves unloadable.
Parameters
----------
path : str
Path to output file.
clobber : bool (defaults to false)
Set to true to allow overwriting
Examples
--------
>>> from landlab import VoronoiDelaunayGrid
>>> import numpy as np
>>> import os
>>> x = np.random.rand(20)
>>> y = np.random.rand(20)
>>> vmg = VoronoiDelaunayGrid(x,y)
>>> vmg.save('./mytestsave.grid')
>>> os.remove('mytestsave.grid') #to remove traces of this test
LLCATS: GINF
"""
import os
from six.moves import cPickle
if os.path.exists(path) and not clobber:
raise ValueError('file exists')
(base, ext) = os.path.splitext(path)
if ext != '.grid':
ext = ext + '.grid'
path = base + ext
with open(path, 'wb') as fp:
cPickle.dump(self, fp)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
2,
0,
1220,
14629,
14,
24330,
14,
29412,
198,
37811,
198,
37906,
7822,
286,
44143,
261,
23013,
13856,
1942,
323,
41339,
11,
257,
1398,
973,
284,
2251,
290,
6687,
198,
403,
7249,
1522,
11,
21388,
50000,
329,
362,
35,
29052,
4981,
13,
... | 2.284326 | 23,568 |
#!/usr/bin/env python3
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 07:38:29 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
# 2018-09-01 06:22:21.180029: Step 341, loss=3.60 (33.8 examples/sec; 0.740 sec/batch)
ptn_iter = re.compile(r"""(?P<timestamp>.+): \s [sS]tep \s (?P<Step>\d+),\s
(loss|perplexity) .* \(
(?P<Speed>[\d.]+) \s examples/sec; \s
(?P<Duration>[\d.]+) \s sec/batch\)?""", re.VERBOSE)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
13130,
2631,
361,
1516,
10605,
1279,
431,
361,
1516,
31,
388,
488,
13,
15532,
29,
198,
2,
220,
198,
2,
770,
2393,
318,
636,
286,
4849,
385,
198,
2,
357,
382... | 2.665474 | 559 |
# _*_ coding:UTF-8 _*_
# b64 decode and encode
import base64
| [
2,
4808,
9,
62,
19617,
25,
48504,
12,
23,
4808,
9,
62,
198,
2,
275,
2414,
36899,
290,
37773,
198,
198,
11748,
2779,
2414,
628,
198
] | 2.461538 | 26 |
default_app_config = "govuk_template.apps.GovukTemplateConfig"
| [
12286,
62,
1324,
62,
11250,
796,
366,
9567,
2724,
62,
28243,
13,
18211,
13,
23774,
2724,
30800,
16934,
1,
198
] | 3.15 | 20 |
import os
import unittest
import shutil
import jingerly
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
4423,
346,
198,
198,
11748,
474,
3889,
306,
628
] | 3.222222 | 18 |
"""Python library for Simple Service Discovery Protocol (SSDP)."""
import ssdp.asyncio as asyncio
import ssdp.entity as entity
import ssdp.network as network
from ssdp.asyncio import *
from ssdp.entity import *
__all__ = entity.__all__ + asyncio.__all__
| [
37811,
37906,
5888,
329,
17427,
4809,
23455,
20497,
357,
5432,
6322,
21387,
15931,
198,
198,
11748,
264,
21282,
79,
13,
292,
13361,
952,
355,
30351,
952,
198,
11748,
264,
21282,
79,
13,
26858,
355,
9312,
198,
11748,
264,
21282,
79,
13,
... | 3.084337 | 83 |
"""
author: Dominik Stec,
index: s12623,
email: s12623@pja.edu.pl
source link:
https://www.codingame.com/ide/puzzle/defibrillators
"""
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
lon = input()
lat = input()
n = int(input())
defi_list = []
for i in range(n):
defib = input()
defi_item = defib.rsplit(';')
defi_list.append(defi_item)
lon = lon.replace(',', '.')
lat = lat.replace(',', '.')
lon = float(lon)
lat = float(lat)
lon_r = (lon*2*math.pi) / 360
lat_r = (lat*2*math.pi) / 360
distance_list = []
for data in defi_list:
lon_b = data[-2]
lat_b = data[-1]
lon_b = lon_b.replace(',', '.')
lat_b = lat_b.replace(',', '.')
lon_b = float(lon_b)
lat_b = float(lat_b)
lon_b_r = (lon_b*2*math.pi) / 360
lat_b_r = (lat_b*2*math.pi) / 360
x = (lon_b_r - lon_r) * math.cos((lat + lat_b_r) / 2)
y = lat_b_r - lat_r
distance = math.sqrt(math.pow(x,2) + math.pow(y,2)) * 6371
distance_list.append(distance)
min = min(distance_list)
idx = distance_list.index(min)
desc = defi_list[idx][1]
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
print(desc)
| [
37811,
201,
198,
9800,
25,
11817,
1134,
520,
721,
11,
201,
198,
9630,
25,
220,
264,
19420,
1954,
11,
201,
198,
12888,
25,
220,
264,
19420,
1954,
31,
79,
6592,
13,
15532,
13,
489,
201,
198,
201,
198,
10459,
2792,
25,
201,
198,
5450... | 2.080442 | 634 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\pyqt_file\untitled_2.ui'
#
# Created: Sat Apr 15 09:51:16 2017
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
import sys
import serial
import threading
import binascii
from PyQt5 import QtCore, QtGui, QtWidgets
import serial.tools.list_ports
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
35,
7479,
9078,
39568,
62,
7753,
59,
2797,
7803,
62,
17,
13,
9019,
6,
198,
2,
198,
2,
15622,
25,
7031,
275... | 2.549784 | 231 |
# -*- coding: utf-8 -*-
# Stdlib imports
import logging
import logging.config
import os
# Third-party imports
import yaml
LOG_CONF_FILENAME_DEFAULT = ""
LOG_CONF_FILENAME_ENV = os.environ.get(
"LAUNCHPAD_LOG", LOG_CONF_FILENAME_DEFAULT
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
520,
67,
8019,
17944,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
28686,
198,
198,
2,
10467,
12,
10608,
17944,
198,
11748,
331,
43695,
628,
198,... | 2.398058 | 103 |
import pytest
from ehub.users.models import User
from ehub.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
32622,
549,
13,
18417,
13,
27530,
1330,
11787,
198,
6738,
32622,
549,
13,
18417,
13,
41989,
13,
22584,
1749,
1330,
11787,
22810,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
2306,
1076,
28,
17821,
8,
... | 2.960784 | 51 |
import pytest
import torch
from torch.utils.data import DataLoader
from lhotse.cut import CutSet
from lhotse.dataset.speech_recognition import K2SpeechRecognitionIterableDataset, concat_cuts
from lhotse.testing.dummies import DummyManifest, dummy_cut
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize('num_workers', [0, 1])
@pytest.mark.parametrize('num_workers', [2, 3, 4])
| [
11748,
12972,
9288,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
198,
6738,
300,
8940,
325,
13,
8968,
1330,
9712,
7248,
198,
6738,
300,
8940,
325,
13,
19608,
292,
316,
13,
45862,
62,
26243,
653,
13... | 2.774648 | 142 |
"""
A setuptools based setup module.
"""
from setuptools import setup, find_packages
import daemon
setup(
name='daemon',
version=daemon.__version__,
description="Daemon class to transform any python script into a daemon",
long_description=open('README.md').read(),
license='MIT',
author='Sylvain Carlioz',
author_email='sylvain.carlioz@gmail.com',
keywords='daemon',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
#install_requires=[],
)
| [
37811,
198,
32,
900,
37623,
10141,
1912,
9058,
8265,
13,
198,
37811,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
33386,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
6814,
7966,
3256,
198,
22... | 2.634271 | 391 |
# coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from paasta_tools.paastaapi.configuration import Configuration
class MarathonDashboardItem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'instance': 'str',
'service': 'str',
'shard_url': 'str'
}
attribute_map = {
'instance': 'instance',
'service': 'service',
'shard_url': 'shard_url'
}
def __init__(self, instance=None, service=None, shard_url=None, local_vars_configuration=None): # noqa: E501
"""MarathonDashboardItem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._instance = None
self._service = None
self._shard_url = None
self.discriminator = None
if instance is not None:
self.instance = instance
if service is not None:
self.service = service
if shard_url is not None:
self.shard_url = shard_url
@property
def instance(self):
"""Gets the instance of this MarathonDashboardItem. # noqa: E501
Instance name # noqa: E501
:return: The instance of this MarathonDashboardItem. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this MarathonDashboardItem.
Instance name # noqa: E501
:param instance: The instance of this MarathonDashboardItem. # noqa: E501
:type instance: str
"""
self._instance = instance
@property
def service(self):
"""Gets the service of this MarathonDashboardItem. # noqa: E501
Service name # noqa: E501
:return: The service of this MarathonDashboardItem. # noqa: E501
:rtype: str
"""
return self._service
@service.setter
def service(self, service):
"""Sets the service of this MarathonDashboardItem.
Service name # noqa: E501
:param service: The service of this MarathonDashboardItem. # noqa: E501
:type service: str
"""
self._service = service
@property
def shard_url(self):
"""Gets the shard_url of this MarathonDashboardItem. # noqa: E501
Marathon Shard URL # noqa: E501
:return: The shard_url of this MarathonDashboardItem. # noqa: E501
:rtype: str
"""
return self._shard_url
@shard_url.setter
def shard_url(self, shard_url):
"""Sets the shard_url of this MarathonDashboardItem.
Marathon Shard URL # noqa: E501
:param shard_url: The shard_url of this MarathonDashboardItem. # noqa: E501
:type shard_url: str
"""
self._shard_url = shard_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarathonDashboardItem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarathonDashboardItem):
return True
return self.to_dict() != other.to_dict()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
11243,
40197,
7824,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
4946,
15042,
35986,
3740,
1378,
12567,
13,
785,
14,
9654,
499,
270,
10141,
14,
9654,
150... | 2.26987 | 2,227 |
"""
Purpose:
Simulate 2-bit Predictor and measure it's hit rate.
"""
from enum import Enum
from random import randint
from typing import List
if __name__ == "__main__":
predictor_with_branch_result = TwoBitPredictorByBranchResult()
predictor_with_prediction_result = TwoBitPredictorByPredictionResult()
branch_history = get_branch_history(history_size=5)
# NOTE: Change here to show/not show state transition
# True: show
# False: not show
SHOW_STATE_TRANSITION = True
print('Branch History')
print(branch_history)
print('Predictor with Branch Result')
hit_rate_1 = simulate(branch_history, predictor_with_branch_result, verbose=SHOW_STATE_TRANSITION)
print(f'Hit Rate: {hit_rate_1}')
print('Predictor with Prediction Result')
hit_rate_2 = simulate(branch_history, predictor_with_prediction_result, verbose=SHOW_STATE_TRANSITION)
print(f'Hit Rate: {hit_rate_2}')
| [
37811,
198,
30026,
3455,
25,
198,
8890,
5039,
362,
12,
2545,
49461,
273,
290,
3953,
340,
338,
2277,
2494,
13,
198,
37811,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
19720,
1330,
7343,
628,
19... | 2.847095 | 327 |
import numpy as np
import tensorflow as tf
import sys
import time
import random
import os
sys.path.append("..")
from lm_model import lm_model
random.seed(time.time())
# Evaluation on development set
# Infer the sampling probability based on language model
# Create language model
# Pre-train the forward/backward language model
# Output the probability based on the language model
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
28686,
198,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
198,
6738,
300,
76,
62,
19849... | 3.688679 | 106 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class SettleBill(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, bill_index: int=None, _date: date=None): # noqa: E501
"""SettleBill - a model defined in Swagger
:param bill_index: The bill_index of this SettleBill. # noqa: E501
:type bill_index: int
:param _date: The _date of this SettleBill. # noqa: E501
:type _date: date
"""
self.swagger_types = {
'bill_index': int,
'_date': date
}
self.attribute_map = {
'bill_index': 'billIndex',
'_date': 'date'
}
self._bill_index = bill_index
self.__date = _date
@classmethod
def from_dict(cls, dikt) -> 'SettleBill':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SettleBill of this SettleBill. # noqa: E501
:rtype: SettleBill
"""
return util.deserialize_model(dikt, cls)
@property
def bill_index(self) -> int:
"""Gets the bill_index of this SettleBill.
:return: The bill_index of this SettleBill.
:rtype: int
"""
return self._bill_index
@bill_index.setter
def bill_index(self, bill_index: int):
"""Sets the bill_index of this SettleBill.
:param bill_index: The bill_index of this SettleBill.
:type bill_index: int
"""
self._bill_index = bill_index
@property
def _date(self) -> date:
"""Gets the _date of this SettleBill.
:return: The _date of this SettleBill.
:rtype: date
"""
return self.__date
@_date.setter
def _date(self, _date: date):
"""Sets the _date of this SettleBill.
:param _date: The _date of this SettleBill.
:type _date: date
"""
self.__date = _date
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
220,
1303,
645,
20402,
25,
376,
21844,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
220... | 2.232323 | 990 |
# Register your models here.
from django.contrib import admin
from .models import Books
admin.site.register(Books) | [
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
13661,
198,
28482,
13,
15654,
13,
30238,
7,
30650,
8
] | 3.545455 | 33 |
from .exceptions.exceptions import AuthFailed
from .services.user_service import UserService
from .services.target_service import (TargetService, TargetsTreeBuilder)
from .services.scanner_service import ScannerService
from .clients.hiabclient import HiabClient
assert AuthFailed
assert ScannerService
assert UserService
assert TargetService
assert TargetsTreeBuilder
assert HiabClient | [
6738,
764,
1069,
11755,
13,
1069,
11755,
1330,
26828,
37,
6255,
198,
6738,
764,
30416,
13,
7220,
62,
15271,
1330,
11787,
16177,
198,
6738,
764,
30416,
13,
16793,
62,
15271,
1330,
357,
21745,
16177,
11,
31089,
1039,
27660,
32875,
8,
198,... | 3.989691 | 97 |
#!/usr/bin/env python3
'''
This service listens for new events submitted to Sealog and performs additional
actions depending on the recieved event.
This service listens for 'Off deck' and 'On deck' milestones and enables/
disables the ASNAP functionality and if a lowering is currently active it will
set the start/stop time to the time of the event.
This service listens for 'On bottom' and 'Off bottom' milestones and if a
lowering is currently active it will set the lowering_on/off_bottom milestone
time to the time of the event.
'''
import asyncio
import json
import logging
import time
import requests
import websockets
from python_sealog.custom_vars import getCustomVarUIDByName, setCustomVar
from python_sealog.lowerings import getLoweringByEvent
from python_sealog.settings import apiServerURL, headers, loweringsAPIPath, \
wsServerURL
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
ASNAP_STATUS_VAR_NAME = 'asnapStatus'
ASNAP_STATUS_VAR_ID = None
CLIENT_WSID = 'autoActions'
HELLO = {
'type': 'hello',
'id': CLIENT_WSID,
'auth': {'headers': headers},
'version': '2',
'subs': ['/ws/status/newEvents', '/ws/status/updateEvents'],
}
PING = {
'type': 'ping',
'id': CLIENT_WSID,
}
AUX_DATA_TEMPLATE = {
'event_id': None,
'data_source': None,
'data_array': [],
}
if __name__ == '__main__':
try:
init_asnap_status_var_id()
except:
logger.exception('Could not resolve asnapStatus variable ID')
quit()
asyncio.run(event_listener())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
1212,
2139,
35019,
329,
649,
2995,
8948,
284,
1001,
11794,
290,
17706,
3224,
220,
198,
4658,
6906,
319,
262,
664,
39591,
1785,
13,
198,
198,
1212,
2139,
35019,
329,
... | 2.722317 | 587 |
import copy
import json
import synapse.exc as s_exc
import synapse.lib.stormlib.stix as s_stix
import synapse.tests.utils as s_test
# flake8: noqa: E501
from pprint import pprint
| [
11748,
4866,
198,
11748,
33918,
198,
198,
11748,
6171,
7512,
13,
41194,
355,
264,
62,
41194,
198,
198,
11748,
6171,
7512,
13,
8019,
13,
12135,
8019,
13,
301,
844,
355,
264,
62,
301,
844,
198,
198,
11748,
6171,
7512,
13,
41989,
13,
2... | 2.705882 | 68 |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
781,
539,
23,
25,
645,
20402,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
... | 2.807692 | 52 |