hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7254caf0aa3637ad03dd57110d9475938728d0b | 301 | py | Python | compiler_gym/util/flags/output_dir.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 562 | 2020-12-21T14:10:20.000Z | 2022-03-31T21:23:55.000Z | compiler_gym/util/flags/output_dir.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 433 | 2020-12-22T03:40:41.000Z | 2022-03-31T18:16:17.000Z | compiler_gym/util/flags/output_dir.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 88 | 2020-12-22T08:22:00.000Z | 2022-03-20T19:00:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_string(
"output_dir",
None,
"The directory to read and write files to.",
)
| 25.083333 | 65 | 0.724252 |
from absl import flags
flags.DEFINE_string(
"output_dir",
None,
"The directory to read and write files to.",
)
| true | true |
f7254ce72ce8155285a2f4a9a1febb88f4b64006 | 3,161 | py | Python | leasing/models/basis_of_rent.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | leasing/models/basis_of_rent.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | leasing/models/basis_of_rent.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | from auditlog.registry import auditlog
from django.db import models
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumField
from leasing.enums import PeriodType
from .mixins import NameModel, TimeStampedSafeDeleteModel
class BasisOfRentPlotType(NameModel):
"""
In Finnish: Tonttityyppi
"""
class BasisOfRent(TimeStampedSafeDeleteModel):
"""
In Finnish: Vuokrausperuste
"""
# In Finnish: Tonttityyppi
plot_type = models.ForeignKey(BasisOfRentPlotType, verbose_name=_("Plot type"), on_delete=models.PROTECT)
# In Finnish: Alkupäivämäärä
start_date = models.DateField(verbose_name=_("Start date"), null=True, blank=True)
# In Finnish: Loppupäivämäärä
end_date = models.DateField(verbose_name=_("End date"), null=True, blank=True)
# In Finnish: Asemakaava
detailed_plan_identifier = models.CharField(verbose_name=_("Detailed plan identifier"), null=True, blank=True,
max_length=255)
# In Finnish: Hallintamuoto
management = models.ForeignKey('leasing.Management', verbose_name=_("Form of management"), null=True, blank=True,
on_delete=models.PROTECT)
# In Finnish: Rahoitusmuoto
financing = models.ForeignKey('leasing.Financing', verbose_name=_("Form of financing"), null=True, blank=True,
on_delete=models.PROTECT)
# In Finnish: Vuokraoikeus päättyy
lease_rights_end_date = models.DateField(verbose_name=_("Lease rights end date"), null=True, blank=True)
# In Finnish: Indeksi
index = models.PositiveIntegerField(verbose_name=_("Index"))
# In Finnish: Kommentti
note = models.TextField(verbose_name=_("Note"), null=True, blank=True)
class BasisOfRentRate(TimeStampedSafeDeleteModel):
"""
In Finnish: Hinta
"""
basis_of_rent = models.ForeignKey(BasisOfRent, verbose_name=_("Basis of rent"), related_name='rent_rates',
on_delete=models.CASCADE)
# In Finnish: Pääkäyttötarkoitus
intended_use = models.ForeignKey('leasing.RentIntendedUse', verbose_name=_("Intended use"), null=True, blank=True,
on_delete=models.PROTECT)
# In Finnish: Euroa
amount = models.DecimalField(verbose_name=_("Amount"), decimal_places=2, max_digits=12)
# In Finnish: Yksikkö
period = EnumField(PeriodType, verbose_name=_("Period"), max_length=20)
class BasisOfRentPropertyIdentifier(models.Model):
"""
In Finnish: Kiinteistötunnus
"""
basis_of_rent = models.ForeignKey(BasisOfRent, verbose_name=_("Basis of rent"), related_name='property_identifiers',
on_delete=models.CASCADE)
identifier = models.CharField(verbose_name=_("Identifier"), max_length=255)
class BasisOfRentDecision(models.Model):
"""
In Finnish: Päätös
"""
basis_of_rent = models.ForeignKey(BasisOfRent, related_name='decisions', on_delete=models.CASCADE)
identifier = models.CharField(verbose_name=_("Identifier"), max_length=255)
auditlog.register(BasisOfRent)
| 35.920455 | 120 | 0.691237 | from auditlog.registry import auditlog
from django.db import models
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumField
from leasing.enums import PeriodType
from .mixins import NameModel, TimeStampedSafeDeleteModel
class BasisOfRentPlotType(NameModel):
class BasisOfRent(TimeStampedSafeDeleteModel):
plot_type = models.ForeignKey(BasisOfRentPlotType, verbose_name=_("Plot type"), on_delete=models.PROTECT)
start_date = models.DateField(verbose_name=_("Start date"), null=True, blank=True)
end_date = models.DateField(verbose_name=_("End date"), null=True, blank=True)
detailed_plan_identifier = models.CharField(verbose_name=_("Detailed plan identifier"), null=True, blank=True,
max_length=255)
management = models.ForeignKey('leasing.Management', verbose_name=_("Form of management"), null=True, blank=True,
on_delete=models.PROTECT)
financing = models.ForeignKey('leasing.Financing', verbose_name=_("Form of financing"), null=True, blank=True,
on_delete=models.PROTECT)
lease_rights_end_date = models.DateField(verbose_name=_("Lease rights end date"), null=True, blank=True)
index = models.PositiveIntegerField(verbose_name=_("Index"))
note = models.TextField(verbose_name=_("Note"), null=True, blank=True)
class BasisOfRentRate(TimeStampedSafeDeleteModel):
basis_of_rent = models.ForeignKey(BasisOfRent, verbose_name=_("Basis of rent"), related_name='rent_rates',
on_delete=models.CASCADE)
intended_use = models.ForeignKey('leasing.RentIntendedUse', verbose_name=_("Intended use"), null=True, blank=True,
on_delete=models.PROTECT)
amount = models.DecimalField(verbose_name=_("Amount"), decimal_places=2, max_digits=12)
period = EnumField(PeriodType, verbose_name=_("Period"), max_length=20)
class BasisOfRentPropertyIdentifier(models.Model):
basis_of_rent = models.ForeignKey(BasisOfRent, verbose_name=_("Basis of rent"), related_name='property_identifiers',
on_delete=models.CASCADE)
identifier = models.CharField(verbose_name=_("Identifier"), max_length=255)
class BasisOfRentDecision(models.Model):
basis_of_rent = models.ForeignKey(BasisOfRent, related_name='decisions', on_delete=models.CASCADE)
identifier = models.CharField(verbose_name=_("Identifier"), max_length=255)
auditlog.register(BasisOfRent)
| true | true |
f7254d485119f2dd92ad0be5fa608833d0405c1d | 38 | py | Python | irclogs/__init__.py | dokipen/trac-irclogs-plugin | 811aa16fdaf7f6de9bfa6200073f5b33da09fc1a | [
"BSD-3-Clause"
] | null | null | null | irclogs/__init__.py | dokipen/trac-irclogs-plugin | 811aa16fdaf7f6de9bfa6200073f5b33da09fc1a | [
"BSD-3-Clause"
] | 1 | 2015-02-26T23:17:12.000Z | 2015-03-02T15:03:45.000Z | irclogs/__init__.py | dokipen/trac-irclogs-plugin | 811aa16fdaf7f6de9bfa6200073f5b33da09fc1a | [
"BSD-3-Clause"
] | null | null | null | from console import update_irc_search
| 19 | 37 | 0.894737 | from console import update_irc_search
| true | true |
f7254e36b7c014cfeea985736099019756c9cb78 | 1,026 | py | Python | program/experiments/localization/calibrate.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | 1 | 2018-11-29T14:13:47.000Z | 2018-11-29T14:13:47.000Z | program/experiments/localization/calibrate.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | 3 | 2018-04-24T18:30:00.000Z | 2018-05-11T23:25:07.000Z | program/experiments/localization/calibrate.py | JankaSvK/thesis | c440ab8242b058f580fdf9d5a1d00708a1696561 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
from sys import argv
if os.name == 'nt':
python = "python"
else:
python = "python3"
try:
experiment_id = int(argv[1])
except Exception:
experiment_id = 63
def command(video1, video2, chessboard):
return "{} ../../Main.py --video1={} --video2={} --chessboard={}".format(python, video1, video2, chessboard)
calib_videos = "../videos/calibration/"
if experiment_id == 38:
chessboard = "7,8,22"
video1 = calib_videos + "38/1.avi"
video2 = calib_videos + "38/2.avi"
elif experiment_id == 16:
chessboard = "7,8,22"
video1 = calib_videos + "16/1.avi"
video2 = calib_videos + "16/2.avi"
elif experiment_id == 43:
chessboard = "6,9,26"
video1 = calib_videos + "43/1.avi"
video2 = calib_videos + "43/2.avi"
elif experiment_id == 63:
chessboard = "7,8,22"
video1 = calib_videos + "63/1.avi"
video2 = calib_videos + "63/2.avi"
else:
print("Not recognized set of videos")
exit(0)
os.system(command(video1, video2, chessboard))
| 24.428571 | 112 | 0.640351 |
import os
from sys import argv
if os.name == 'nt':
python = "python"
else:
python = "python3"
try:
experiment_id = int(argv[1])
except Exception:
experiment_id = 63
def command(video1, video2, chessboard):
return "{} ../../Main.py --video1={} --video2={} --chessboard={}".format(python, video1, video2, chessboard)
calib_videos = "../videos/calibration/"
if experiment_id == 38:
chessboard = "7,8,22"
video1 = calib_videos + "38/1.avi"
video2 = calib_videos + "38/2.avi"
elif experiment_id == 16:
chessboard = "7,8,22"
video1 = calib_videos + "16/1.avi"
video2 = calib_videos + "16/2.avi"
elif experiment_id == 43:
chessboard = "6,9,26"
video1 = calib_videos + "43/1.avi"
video2 = calib_videos + "43/2.avi"
elif experiment_id == 63:
chessboard = "7,8,22"
video1 = calib_videos + "63/1.avi"
video2 = calib_videos + "63/2.avi"
else:
print("Not recognized set of videos")
exit(0)
os.system(command(video1, video2, chessboard))
| true | true |
f7254e6e57d13e131c3fc738bd9c4a2d139d00b0 | 5,481 | py | Python | src/polytopes/run_polychora_examples.py | mohi7solanki/pywonderland | 2b9d61a8414d4cfa92d34325e5e2b9b5d501abca | [
"MIT"
] | null | null | null | src/polytopes/run_polychora_examples.py | mohi7solanki/pywonderland | 2b9d61a8414d4cfa92d34325e5e2b9b5d501abca | [
"MIT"
] | null | null | null | src/polytopes/run_polychora_examples.py | mohi7solanki/pywonderland | 2b9d61a8414d4cfa92d34325e5e2b9b5d501abca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Render curved 4d polychoron examples
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This script draws uniform polychoron whose vertices lie
on the unit sphere S^3 by using stereographic projection
to map them into 3d space.
:copyright (c) 2018 by Zhao Liang.
"""
import subprocess
from fractions import Fraction
import numpy as np
from models import Polychora
import helpers
POV_EXE = "povray" # POV-Ray exe binary
SCENE_FILE = "polychora_curved.pov" # the main scene file
IMAGE_SIZE = 600 # image size in pixels
IMAGE_QUALITY_LEVEL = 11 # between 0-11
SUPER_SAMPLING_LEVEL = 7 # between 1-9
ANTIALIASING_LEVEL = 0.001 # lower for better quality
POV_COMMAND = " cd povray && " + \
" {} +I{}".format(POV_EXE, SCENE_FILE) + \
" +W{} +H{}".format(IMAGE_SIZE, IMAGE_SIZE) + \
" +Q{}".format(IMAGE_QUALITY_LEVEL) + \
" +A{}".format(ANTIALIASING_LEVEL) + \
" +R{}".format(SUPER_SAMPLING_LEVEL) + \
" +O../{}"
POV_TEMPLATE = """
#declare vertex_size = {};
#declare edge_size = {};
#declare camera_location = {};
#declare object_rotation = {};
#declare extent = {};
#declare vertices = array[{}] {{{}}};
#declare size_func = {};
#declare face_max= {};
#declare face_min = {};
#declare face_index = {};
// this macro is used for adjusting the size of edges
// according to their positions in the space.
#macro get_size(q)
#local len = vlength(q);
#if (size_func = 0)
#local len = (1.0 + len * len) / 4;
#else #if (size_func = 1)
#local len = 2.0 * log(2.0 + len * len);
#else
#local len = 2.0 * log(1.13 + len * len);
#end
#end
len
#end
#macro choose_face(i, face_size)
#local chosen = false;
#for (ind, 0, dimension_size(face_index, 1) - 1)
#if (i = face_index[ind])
#if (face_size > face_min & face_size < face_max)
#local chosen = true;
#end
#end
#end
chosen
#end
{}
{}
{}
"""
VERT_MACRO = "Vert(vertices, {})"
EDGE_MACRO = "Edge(vertices, {}, {}, {})"
def write_to_pov(P,
camera=(0, 0, 180),
rotation=(0, 0, 0),
vertex_size=0.04,
edge_size=0.02,
size_func=0,
face_index=(0,),
face_max=3,
face_min=0.5):
"""Write the data of a polytope `P` to the include file.
:param camera: camera location.
:param rotation: rotation angles (in degree) of the polytope.
:param vertex_size: controls size of the vertices.
:param edge_size: controls size of the edges.
:param size_func: choose which way to adjust the size of the edges.
currently there are three choices, so it can only be 0-2.
:param face_index: controls which type of faces are rendered,
must be a list of integers.
:param face_max: faces larger than this value will not be rendered.
:param face_min: faces smaller than this value will not be rendered.
"""
with open("./povray/polychora-data.inc", "w") as f:
extent = max(np.linalg.norm(helpers.proj3d(v)) for v in P.vertex_coords)
vert_macros = "\n".join(VERT_MACRO.format(k) for k in range(P.num_vertices))
edge_macros = "\n".join(EDGE_MACRO.format(i, e[0], e[1])
for i, elist in enumerate(P.edge_indices)
for e in elist)
face_macros = "\n".join(helpers.export_face(i, face)
for i, flist in enumerate(P.face_coords)
for face in flist)
f.write(POV_TEMPLATE.format(
vertex_size,
edge_size,
helpers.pov_vector(camera),
helpers.pov_vector(rotation),
extent,
P.num_vertices,
helpers.pov_vector_list(P.vertex_coords),
size_func,
face_max,
face_min,
helpers.pov_array(face_index),
vert_macros,
edge_macros,
face_macros))
def draw(coxeter_diagram,
trunc_type,
description="polychora",
extra_relations=(),
**kwargs):
P = Polychora(coxeter_diagram, trunc_type, extra_relations)
P.build_geometry()
write_to_pov(P, **kwargs)
print("rendering {} with {} vertices, {} edges, {} faces".format(
description,
P.num_vertices,
P.num_edges,
P.num_faces))
process = subprocess.Popen(
POV_COMMAND.format(description),
shell=True,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
_, err = process.communicate()
if process.returncode:
print(type(err), err)
raise IOError("POVRay error: " + err.decode("ascii"))
def main():
"""
draw((3, 2, 2, 3, 2, 3), (1, 0, 0, 0), "5-cell", camera=(0, 0, 200),
vertex_size=0.08, edge_size=0.04, rotation=(-30, 60, 0), size_func=1)
draw((5, 2, 2, 3, 2, 3), (1, 0, 0, 1), "runcinated-120-cell", camera=(0, 0, 105),
vertex_size=0.028, edge_size=0.014, face_min=20)
"""
draw((3, 2, 2, 3, 2, 5), (1, 0, 0, 0), "600-cell", camera=(0, 0, 200),
vertex_size=0.12, edge_size=0.04, size_func=2, face_max=4.0, face_min=3.0)
if __name__ == "__main__":
main()
| 29.95082 | 85 | 0.561759 |
import subprocess
from fractions import Fraction
import numpy as np
from models import Polychora
import helpers
POV_EXE = "povray"
SCENE_FILE = "polychora_curved.pov"
IMAGE_SIZE = 600
IMAGE_QUALITY_LEVEL = 11
SUPER_SAMPLING_LEVEL = 7
ANTIALIASING_LEVEL = 0.001
POV_COMMAND = " cd povray && " + \
" {} +I{}".format(POV_EXE, SCENE_FILE) + \
" +W{} +H{}".format(IMAGE_SIZE, IMAGE_SIZE) + \
" +Q{}".format(IMAGE_QUALITY_LEVEL) + \
" +A{}".format(ANTIALIASING_LEVEL) + \
" +R{}".format(SUPER_SAMPLING_LEVEL) + \
" +O../{}"
POV_TEMPLATE = """
#declare vertex_size = {};
#declare edge_size = {};
#declare camera_location = {};
#declare object_rotation = {};
#declare extent = {};
#declare vertices = array[{}] {{{}}};
#declare size_func = {};
#declare face_max= {};
#declare face_min = {};
#declare face_index = {};
// this macro is used for adjusting the size of edges
// according to their positions in the space.
#macro get_size(q)
#local len = vlength(q);
#if (size_func = 0)
#local len = (1.0 + len * len) / 4;
#else #if (size_func = 1)
#local len = 2.0 * log(2.0 + len * len);
#else
#local len = 2.0 * log(1.13 + len * len);
#end
#end
len
#end
#macro choose_face(i, face_size)
#local chosen = false;
#for (ind, 0, dimension_size(face_index, 1) - 1)
#if (i = face_index[ind])
#if (face_size > face_min & face_size < face_max)
#local chosen = true;
#end
#end
#end
chosen
#end
{}
{}
{}
"""
VERT_MACRO = "Vert(vertices, {})"
EDGE_MACRO = "Edge(vertices, {}, {}, {})"
def write_to_pov(P,
camera=(0, 0, 180),
rotation=(0, 0, 0),
vertex_size=0.04,
edge_size=0.02,
size_func=0,
face_index=(0,),
face_max=3,
face_min=0.5):
with open("./povray/polychora-data.inc", "w") as f:
extent = max(np.linalg.norm(helpers.proj3d(v)) for v in P.vertex_coords)
vert_macros = "\n".join(VERT_MACRO.format(k) for k in range(P.num_vertices))
edge_macros = "\n".join(EDGE_MACRO.format(i, e[0], e[1])
for i, elist in enumerate(P.edge_indices)
for e in elist)
face_macros = "\n".join(helpers.export_face(i, face)
for i, flist in enumerate(P.face_coords)
for face in flist)
f.write(POV_TEMPLATE.format(
vertex_size,
edge_size,
helpers.pov_vector(camera),
helpers.pov_vector(rotation),
extent,
P.num_vertices,
helpers.pov_vector_list(P.vertex_coords),
size_func,
face_max,
face_min,
helpers.pov_array(face_index),
vert_macros,
edge_macros,
face_macros))
def draw(coxeter_diagram,
trunc_type,
description="polychora",
extra_relations=(),
**kwargs):
P = Polychora(coxeter_diagram, trunc_type, extra_relations)
P.build_geometry()
write_to_pov(P, **kwargs)
print("rendering {} with {} vertices, {} edges, {} faces".format(
description,
P.num_vertices,
P.num_edges,
P.num_faces))
process = subprocess.Popen(
POV_COMMAND.format(description),
shell=True,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
_, err = process.communicate()
if process.returncode:
print(type(err), err)
raise IOError("POVRay error: " + err.decode("ascii"))
def main():
draw((3, 2, 2, 3, 2, 5), (1, 0, 0, 0), "600-cell", camera=(0, 0, 200),
vertex_size=0.12, edge_size=0.04, size_func=2, face_max=4.0, face_min=3.0)
if __name__ == "__main__":
main()
| true | true |
f7254fe63f765868c8428af33d90ac77ae356bbd | 7,055 | py | Python | deltametrics/sample_data/sample_data.py | amoodie/DeltaMetrics | 9b823bea36851adcebc446c8941f8783325b1a4f | [
"MIT"
] | null | null | null | deltametrics/sample_data/sample_data.py | amoodie/DeltaMetrics | 9b823bea36851adcebc446c8941f8783325b1a4f | [
"MIT"
] | null | null | null | deltametrics/sample_data/sample_data.py | amoodie/DeltaMetrics | 9b823bea36851adcebc446c8941f8783325b1a4f | [
"MIT"
] | null | null | null | import sys
import os
import pkg_resources
import warnings
import numpy as np
import netCDF4
import pooch
from .. import cube
from .. import utils
# deltametrics version
__version__ = utils._get_version()
# enusre DeprecationWarning is shown
warnings.simplefilter("default")
# configure the data registry
REGISTRY = pooch.create(
path=pooch.os_cache("deltametrics"),
base_url='https://github.com/DeltaRCM/DeltaMetrics/raw/develop/deltametrics/sample_data/files/',
env="DELTAMETRICS_DATA_DIR",
)
with pkg_resources.resource_stream("deltametrics.sample_data", "registry.txt") as registry_file:
REGISTRY.load_registry(registry_file)
def _get_golf_path():
unpack = pooch.Unzip()
fnames = REGISTRY.fetch('golf.zip', processor=unpack)
nc_bool = [os.path.splitext(fname)[1] == '.nc' for fname in fnames]
nc_idx = [i for i, b in enumerate(nc_bool) if b]
golf_path = fnames[nc_idx[0]]
return golf_path
def golf():
"""Golf Delta dataset.
This is a synthetic delta dataset generated from the pyDeltaRCM numerical
model. This model run was created to generate sample data. Model was run
on 10/14/2021, at the University of Texas at Austin.
Run was computed with pyDeltaRCM v2.1.0. See log file for complete
information on system and model configuration.
Data available at Zenodo, https://doi.org/10.5281/zenodo.4456143.
Version history:
* v1.1: 10.5281/zenodo.5570962
* v1.0: 10.5281/zenodo.4456144
.. plot::
golf = dm.sample_data.golf()
nt = 5
ts = np.linspace(0, golf['eta'].shape[0]-1, num=nt, dtype=np.int)
fig, ax = plt.subplots(1, nt, figsize=(12, 2))
for i, t in enumerate(ts):
ax[i].imshow(golf['eta'][t, :, :], vmin=-2, vmax=0.5)
ax[i].set_title('t = ' + str(t))
ax[i].axes.get_xaxis().set_ticks([])
ax[i].axes.get_yaxis().set_ticks([])
ax[0].set_ylabel('dim1 direction')
ax[0].set_xlabel('dim2 direction')
plt.show()
"""
golf_path = _get_golf_path()
return cube.DataCube(golf_path, coordinates={'x': 'y', 'y': 'x'})
def tdb12():
raise NotImplementedError
def _get_aeolian_path():
aeolian_path = REGISTRY.fetch('swanson_aeolian_expt1.nc')
return aeolian_path
def aeolian():
"""An aeolian dune field dataset.
This is a synthetic delta dataset generated from the Swanson et al.,
2017 "A Surface Model for Aeolian Dune Topography" numerical model. The
data have been subsetted, only keeping the first 500 saved timesteps, and
formatted into a netCDF file.
Swanson, T., Mohrig, D., Kocurek, G. et al. A Surface Model for Aeolian
Dune Topography. Math Geosci 49, 635–655
(2017). https://doi.org/10.1007/s11004-016-9654-x
Dataset reference: https://doi.org/10.6084/m9.figshare.17118827.v1
Details:
* default simualtion parameters were used.
* only the first 500 timesteps of the simulation were recorded into
the netcdf file.
* the *ordering* for "easting" and "northing" coordinates in the
netCDF file is opposite from the paper---that is the source region
is along the second axis, i.e., ``dim1[source_regiom]==0``. The
display of this dataset is thus different from the original
paper, *but the data are the same*.
* simulation used the model code included as a supplement to the paper
found here:
https://static-content.springer.com/esm/art%3A10.1007%2Fs11004-016-9654-x/MediaObjects/11004_2016_9654_MOESM5_ESM.txt
* simulation was executed on 12/02/2021 with Matlab R2021a on Ubuntu
20.04.
.. plot::
aeolian = dm.sample_data.aeolian()
nt = 5
ts = np.linspace(0, aeolian['eta'].shape[0]-1, num=nt, dtype=np.int)
fig, ax = plt.subplots(1, nt, figsize=(8, 4))
for i, t in enumerate(ts):
ax[i].imshow(aeolian['eta'][t, :, :], vmin=-5, vmax=7)
ax[i].set_title('t = ' + str(t))
ax[i].axes.get_xaxis().set_ticks([])
ax[i].axes.get_yaxis().set_ticks([])
ax[0].set_ylabel('northing')
ax[0].set_xlabel('easting')
plt.show()
"""
aeolian_path = _get_aeolian_path()
return cube.DataCube(aeolian_path)
def _get_rcm8_path():
rcm8_path = REGISTRY.fetch('pyDeltaRCM_Output_8.nc')
return rcm8_path
def rcm8():
"""Rcm8 Delta dataset.
This is a synthetic delta dataset generated from the pyDeltaRCM numerical
model. Unfortunately, we do not know the specific version of pyDeltaRCM
the model run was executed with. Moreover, many new coupling features have
been added to pyDeltaRCM and DeltaMetrics since this run. As a result,
this dataset is slated to be deprecated at some point, in favor of the
:obj:`golf` dataset.
.. important::
If you are learning to use DeltaMetrics or developing new codes or
documentation, please use the :obj:`golf` delta dataset.
.. warning:: This cube may be removed in future releases.
.. plot::
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rcm8 = dm.sample_data.rcm8()
nt = 5
ts = np.linspace(0, rcm8['eta'].shape[0]-1, num=nt, dtype=np.int)
fig, ax = plt.subplots(1, nt, figsize=(12, 2))
for i, t in enumerate(ts):
ax[i].imshow(rcm8['eta'][t, :, :], vmin=-2, vmax=0.5)
ax[i].set_title('t = ' + str(t))
ax[i].axes.get_xaxis().set_ticks([])
ax[i].axes.get_yaxis().set_ticks([])
ax[0].set_ylabel('y-direction')
ax[0].set_xlabel('x-direction')
plt.show()
"""
rcm8_path = _get_rcm8_path()
return cube.DataCube(rcm8_path)
def _get_landsat_path():
landsat_path = REGISTRY.fetch('LandsatEx.hdf5')
return landsat_path
def landsat():
"""Landsat image dataset.
This is a set of satellite images from the Landsat 5 satellite, collected
over the Krishna River delta, India. The dataset includes annual-composite
scenes from four different years (`[1995, 2000, 2005, 2010]`) and includes
data collected from four bands (`['Red', 'Green', 'Blue', 'NIR']`).
.. plot::
landsat = dm.sample_data.landsat()
nt = landsat.shape[0]
maxr = np.max(landsat['Red'][:])
maxg = np.max(landsat['Green'][:])
maxb = np.max(landsat['Blue'][:])
fig, ax = plt.subplots(1, nt, figsize=(12, 2))
for i in np.arange(nt):
_arr = np.dstack((landsat['Red'][i, :, :]/maxr,
landsat['Green'][i, :, :]/maxg,
landsat['Blue'][i, :, :]/maxb))
ax[i].imshow(_arr)
ax[i].set_title('year = ' + str(landsat.t[i]))
ax[i].axes.get_xaxis().set_ticks([])
ax[i].axes.get_yaxis().set_ticks([])
plt.show()
"""
landsat_path = _get_landsat_path()
return cube.DataCube(landsat_path)
| 32.813953 | 125 | 0.632034 | import sys
import os
import pkg_resources
import warnings
import numpy as np
import netCDF4
import pooch
from .. import cube
from .. import utils
__version__ = utils._get_version()
warnings.simplefilter("default")
REGISTRY = pooch.create(
path=pooch.os_cache("deltametrics"),
base_url='https://github.com/DeltaRCM/DeltaMetrics/raw/develop/deltametrics/sample_data/files/',
env="DELTAMETRICS_DATA_DIR",
)
with pkg_resources.resource_stream("deltametrics.sample_data", "registry.txt") as registry_file:
REGISTRY.load_registry(registry_file)
def _get_golf_path():
unpack = pooch.Unzip()
fnames = REGISTRY.fetch('golf.zip', processor=unpack)
nc_bool = [os.path.splitext(fname)[1] == '.nc' for fname in fnames]
nc_idx = [i for i, b in enumerate(nc_bool) if b]
golf_path = fnames[nc_idx[0]]
return golf_path
def golf():
golf_path = _get_golf_path()
return cube.DataCube(golf_path, coordinates={'x': 'y', 'y': 'x'})
def tdb12():
raise NotImplementedError
def _get_aeolian_path():
aeolian_path = REGISTRY.fetch('swanson_aeolian_expt1.nc')
return aeolian_path
def aeolian():
aeolian_path = _get_aeolian_path()
return cube.DataCube(aeolian_path)
def _get_rcm8_path():
rcm8_path = REGISTRY.fetch('pyDeltaRCM_Output_8.nc')
return rcm8_path
def rcm8():
rcm8_path = _get_rcm8_path()
return cube.DataCube(rcm8_path)
def _get_landsat_path():
landsat_path = REGISTRY.fetch('LandsatEx.hdf5')
return landsat_path
def landsat():
landsat_path = _get_landsat_path()
return cube.DataCube(landsat_path)
| true | true |
f72550174de22fb7301842e0d293f4de18995253 | 3,836 | py | Python | cirq-core/cirq/ops/two_qubit_diagonal_gate_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | null | null | null | cirq-core/cirq/ops/two_qubit_diagonal_gate_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | null | null | null | cirq-core/cirq/ops/two_qubit_diagonal_gate_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import sympy
import cirq
@pytest.mark.parametrize(
'gate',
(
(
cirq.TwoQubitDiagonalGate([2, 3, 5, 7]),
cirq.TwoQubitDiagonalGate([0, 0, 0, 0]),
cirq.TwoQubitDiagonalGate([2, 3, 5, sympy.Symbol('a')]),
cirq.TwoQubitDiagonalGate([0.34, 0.12, 0, 0.96]),
)
),
)
def test_consistent_protocols(gate):
cirq.testing.assert_implements_consistent_protocols(gate)
def test_parameterized_decompose():
angles = sympy.symbols('x0, x1, x2, x3')
parameterized_op = cirq.TwoQubitDiagonalGate(angles).on(*cirq.LineQubit.range(2))
decomposed_circuit = cirq.Circuit(cirq.decompose(parameterized_op))
for resolver in (
cirq.Linspace('x0', -2, 2, 6)
* cirq.Linspace('x1', -2, 2, 6)
* cirq.Linspace('x2', -2, 2, 6)
* cirq.Linspace('x3', -2, 2, 6)
):
np.testing.assert_allclose(
cirq.unitary(cirq.resolve_parameters(parameterized_op, resolver)),
cirq.unitary(cirq.resolve_parameters(decomposed_circuit, resolver)),
)
def test_unitary():
diagonal_angles = [2, 3, 5, 7]
assert cirq.has_unitary(cirq.TwoQubitDiagonalGate(diagonal_angles))
np.testing.assert_allclose(
cirq.unitary(cirq.TwoQubitDiagonalGate(diagonal_angles)),
np.diag([np.exp(1j * angle) for angle in diagonal_angles]),
atol=1e-8,
)
def test_diagram():
a, b = cirq.LineQubit.range(2)
diagonal_circuit = cirq.Circuit(cirq.TwoQubitDiagonalGate([2, 3, 5, 7])(a, b))
cirq.testing.assert_has_diagram(
diagonal_circuit,
"""
0: ───diag(2, 3, 5, 7)───
│
1: ───#2─────────────────
""",
)
cirq.testing.assert_has_diagram(
diagonal_circuit,
"""
0: ---diag(2, 3, 5, 7)---
|
1: ---#2-----------------
""",
use_unicode_characters=False,
)
def test_diagonal_exponent():
diagonal_angles = [2, 3, 5, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)
sqrt_diagonal_gate = diagonal_gate**0.5
expected_angles = [prime / 2 for prime in diagonal_angles]
assert cirq.approx_eq(sqrt_diagonal_gate, cirq.TwoQubitDiagonalGate(expected_angles))
assert cirq.pow(cirq.TwoQubitDiagonalGate(diagonal_angles), "test", None) is None
def test_protocols_mul_not_implemented():
diagonal_angles = [2, 3, None, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)
with pytest.raises(TypeError):
cirq.protocols.pow(diagonal_gate, 3)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_resolve(resolve_fn):
diagonal_angles = [2, 3, 5, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(
diagonal_angles[:2] + [sympy.Symbol('a'), sympy.Symbol('b')]
)
assert cirq.is_parameterized(diagonal_gate)
diagonal_gate = resolve_fn(diagonal_gate, {'a': 5})
assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles[:3] + [sympy.Symbol('b')])
assert cirq.is_parameterized(diagonal_gate)
diagonal_gate = resolve_fn(diagonal_gate, {'b': 7})
assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles)
assert not cirq.is_parameterized(diagonal_gate)
| 31.966667 | 96 | 0.676747 |
import numpy as np
import pytest
import sympy
import cirq
@pytest.mark.parametrize(
'gate',
(
(
cirq.TwoQubitDiagonalGate([2, 3, 5, 7]),
cirq.TwoQubitDiagonalGate([0, 0, 0, 0]),
cirq.TwoQubitDiagonalGate([2, 3, 5, sympy.Symbol('a')]),
cirq.TwoQubitDiagonalGate([0.34, 0.12, 0, 0.96]),
)
),
)
def test_consistent_protocols(gate):
cirq.testing.assert_implements_consistent_protocols(gate)
def test_parameterized_decompose():
angles = sympy.symbols('x0, x1, x2, x3')
parameterized_op = cirq.TwoQubitDiagonalGate(angles).on(*cirq.LineQubit.range(2))
decomposed_circuit = cirq.Circuit(cirq.decompose(parameterized_op))
for resolver in (
cirq.Linspace('x0', -2, 2, 6)
* cirq.Linspace('x1', -2, 2, 6)
* cirq.Linspace('x2', -2, 2, 6)
* cirq.Linspace('x3', -2, 2, 6)
):
np.testing.assert_allclose(
cirq.unitary(cirq.resolve_parameters(parameterized_op, resolver)),
cirq.unitary(cirq.resolve_parameters(decomposed_circuit, resolver)),
)
def test_unitary():
diagonal_angles = [2, 3, 5, 7]
assert cirq.has_unitary(cirq.TwoQubitDiagonalGate(diagonal_angles))
np.testing.assert_allclose(
cirq.unitary(cirq.TwoQubitDiagonalGate(diagonal_angles)),
np.diag([np.exp(1j * angle) for angle in diagonal_angles]),
atol=1e-8,
)
def test_diagram():
a, b = cirq.LineQubit.range(2)
diagonal_circuit = cirq.Circuit(cirq.TwoQubitDiagonalGate([2, 3, 5, 7])(a, b))
cirq.testing.assert_has_diagram(
diagonal_circuit,
"""
0: ───diag(2, 3, 5, 7)───
│
1: ───#2─────────────────
""",
)
cirq.testing.assert_has_diagram(
diagonal_circuit,
"""
0: ---diag(2, 3, 5, 7)---
|
1: ---#2-----------------
""",
use_unicode_characters=False,
)
def test_diagonal_exponent():
diagonal_angles = [2, 3, 5, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)
sqrt_diagonal_gate = diagonal_gate**0.5
expected_angles = [prime / 2 for prime in diagonal_angles]
assert cirq.approx_eq(sqrt_diagonal_gate, cirq.TwoQubitDiagonalGate(expected_angles))
assert cirq.pow(cirq.TwoQubitDiagonalGate(diagonal_angles), "test", None) is None
def test_protocols_mul_not_implemented():
diagonal_angles = [2, 3, None, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)
with pytest.raises(TypeError):
cirq.protocols.pow(diagonal_gate, 3)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_resolve(resolve_fn):
diagonal_angles = [2, 3, 5, 7]
diagonal_gate = cirq.TwoQubitDiagonalGate(
diagonal_angles[:2] + [sympy.Symbol('a'), sympy.Symbol('b')]
)
assert cirq.is_parameterized(diagonal_gate)
diagonal_gate = resolve_fn(diagonal_gate, {'a': 5})
assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles[:3] + [sympy.Symbol('b')])
assert cirq.is_parameterized(diagonal_gate)
diagonal_gate = resolve_fn(diagonal_gate, {'b': 7})
assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles)
assert not cirq.is_parameterized(diagonal_gate)
| true | true |
f725507deeef1871cc78aa29f14a4bf893392a8e | 3,010 | py | Python | mt4forexparser/UniLogger.py | Tim55667757/MT4ForexParser | 4aceab05f150cfccc0ad4622c612476e279b68f0 | [
"MIT"
] | 11 | 2020-07-23T22:34:07.000Z | 2022-03-03T04:42:22.000Z | mt4forexparser/UniLogger.py | Tim55667757/MT4ForexParser | 4aceab05f150cfccc0ad4622c612476e279b68f0 | [
"MIT"
] | null | null | null | mt4forexparser/UniLogger.py | Tim55667757/MT4ForexParser | 4aceab05f150cfccc0ad4622c612476e279b68f0 | [
"MIT"
] | 4 | 2021-03-02T08:04:15.000Z | 2021-08-24T13:46:35.000Z | # -*- coding: utf-8 -*-
#
# Author: Timur Gilmullin
# This module initialize standard python logging system.
import sys
import logging.handlers
# initialize Main Parent Logger:
UniLogger = logging.getLogger("UniLogger")
formatString = "%(filename)-20sL:%(lineno)-5d%(levelname)-8s[%(asctime)s] %(message)s"
formatter = logging.Formatter(formatString)
sys.stderr = sys.stdout
def SetLevel(vLevel='ERROR'):
"""
This procedure setting up UniLogger verbosity level.
"""
UniLogger.level = logging.NOTSET
if isinstance(vLevel, str):
if vLevel == '5' or vLevel.upper() == 'CRITICAL':
UniLogger.level = logging.CRITICAL
elif vLevel == '4' or vLevel.upper() == 'ERROR':
UniLogger.level = logging.ERROR
elif vLevel == '3' or vLevel.upper() == 'WARNING':
UniLogger.level = logging.WARNING
elif vLevel == '2' or vLevel.upper() == 'INFO':
UniLogger.level = logging.INFO
elif vLevel == '1' or vLevel.upper() == 'DEBUG':
UniLogger.level = logging.DEBUG
class LevelFilter(logging.Filter):
"""
Class using to set up log level filtering.
"""
def __init__(self, level):
super().__init__()
self.level = level
def filter(self, record):
return record.levelno >= self.level
def EnableLogger(logFile, parentHandler=UniLogger, useFormat=formatter):
"""
Adding new file logger with rotation.
"""
# logHandler = logging.FileHandler(logFile)
maxSizeBytes = 50 * 1024 * 1024 # 5Mb log rotate by default
logHandler = logging.handlers.RotatingFileHandler(logFile, encoding="UTF-8", maxBytes=maxSizeBytes, backupCount=4)
logHandler.level = logging.DEBUG # set up DEBUG verbosity level by default for file logging
logHandler.addFilter(LevelFilter(logging.DEBUG))
if useFormat:
logHandler.setFormatter(useFormat)
else:
logHandler.setFormatter(formatter)
parentHandler.addHandler(logHandler)
return logHandler
def DisableLogger(handler, parentHandler=UniLogger):
"""
Disable given file logger.
"""
if handler:
handler.flush()
handler.close()
if handler in parentHandler.handlers:
parentHandler.removeHandler(handler)
# --- Main init:
SetLevel('DEBUG') # set up DEBUG verbosity level by default for UniLogger
streamHandler = logging.StreamHandler() # initialize STDOUT UniLogger
streamHandler.setFormatter(formatter) # set formatter for STDOUT UniLogger
streamHandler.level = logging.INFO # set up INFO verbosity level by default for STDOUT UniLogger
UniLogger.addHandler(streamHandler) # adding STDOUT UniLogger handler to Parent UniLogger
# fileLogHandler = EnableLogger(logFile='log.txt', parentHandler=UniLogger, useFormat=formatter) # add logging to file
sepWide = '-' * 120 # long-long log separator
sepLong = '-' * 80 # long log separator
sepShort = '-' * 40 # short log separator
sepLine = '=--=' * 20 # log part separator
| 29.223301 | 119 | 0.68206 |
import sys
import logging.handlers
UniLogger = logging.getLogger("UniLogger")
formatString = "%(filename)-20sL:%(lineno)-5d%(levelname)-8s[%(asctime)s] %(message)s"
formatter = logging.Formatter(formatString)
sys.stderr = sys.stdout
def SetLevel(vLevel='ERROR'):
UniLogger.level = logging.NOTSET
if isinstance(vLevel, str):
if vLevel == '5' or vLevel.upper() == 'CRITICAL':
UniLogger.level = logging.CRITICAL
elif vLevel == '4' or vLevel.upper() == 'ERROR':
UniLogger.level = logging.ERROR
elif vLevel == '3' or vLevel.upper() == 'WARNING':
UniLogger.level = logging.WARNING
elif vLevel == '2' or vLevel.upper() == 'INFO':
UniLogger.level = logging.INFO
elif vLevel == '1' or vLevel.upper() == 'DEBUG':
UniLogger.level = logging.DEBUG
class LevelFilter(logging.Filter):
def __init__(self, level):
super().__init__()
self.level = level
def filter(self, record):
return record.levelno >= self.level
def EnableLogger(logFile, parentHandler=UniLogger, useFormat=formatter):
maxSizeBytes = 50 * 1024 * 1024
logHandler = logging.handlers.RotatingFileHandler(logFile, encoding="UTF-8", maxBytes=maxSizeBytes, backupCount=4)
logHandler.level = logging.DEBUG
logHandler.addFilter(LevelFilter(logging.DEBUG))
if useFormat:
logHandler.setFormatter(useFormat)
else:
logHandler.setFormatter(formatter)
parentHandler.addHandler(logHandler)
return logHandler
def DisableLogger(handler, parentHandler=UniLogger):
if handler:
handler.flush()
handler.close()
if handler in parentHandler.handlers:
parentHandler.removeHandler(handler)
SetLevel('DEBUG')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
streamHandler.level = logging.INFO
UniLogger.addHandler(streamHandler)
sepLong = '-' * 80
sepShort = '-' * 40
sepLine = '=--=' * 20
| true | true |
f72551baa697a8dbad37b7a7ff3d920bbdc9c06b | 271 | py | Python | Functional Programming/Sum_of_numbers_Recursion.py | youngtech515/PythonScripts | c890c84113ba4e05aea15d1347886dbfb52d3bf9 | [
"MIT"
] | null | null | null | Functional Programming/Sum_of_numbers_Recursion.py | youngtech515/PythonScripts | c890c84113ba4e05aea15d1347886dbfb52d3bf9 | [
"MIT"
] | null | null | null | Functional Programming/Sum_of_numbers_Recursion.py | youngtech515/PythonScripts | c890c84113ba4e05aea15d1347886dbfb52d3bf9 | [
"MIT"
] | null | null | null | print("To print the sum of numbers using recursion")
def calculatatesum(num):
if(num):
a=num+calculatatesum(num-1)
return a
else:
return 0
n=int(input("Enter the Number value:"))
print("The Sum of numbers is,",calculatatesum(n))
| 27.1 | 53 | 0.630996 | print("To print the sum of numbers using recursion")
def calculatatesum(num):
if(num):
a=num+calculatatesum(num-1)
return a
else:
return 0
n=int(input("Enter the Number value:"))
print("The Sum of numbers is,",calculatatesum(n))
| true | true |
f725525a43e73f257b95eb328de9c2b2b9780b5e | 494 | py | Python | optoanalysis/optoanalysis/__init__.py | markusrademacher/DataHandling | 240c7c8378541cc2624fec049a185646f3016233 | [
"MIT"
] | 2 | 2017-07-12T11:18:51.000Z | 2018-08-26T10:31:00.000Z | optoanalysis/optoanalysis/__init__.py | markusrademacher/DataHandling | 240c7c8378541cc2624fec049a185646f3016233 | [
"MIT"
] | 7 | 2017-04-24T18:42:23.000Z | 2017-06-20T13:00:09.000Z | optoanalysis/optoanalysis/__init__.py | AshleySetter/optoanalysis | 2b24a4176508d5e0e5e8644bb617a34f73b041f7 | [
"MIT"
] | 3 | 2017-04-09T19:15:06.000Z | 2017-04-28T09:31:32.000Z | """
optoanalysis
============
Package of functions for the Matter-Wave Interferometry
group for handling experimental data.
"""
# init file
import os
_mypackage_root_dir = os.path.dirname(__file__)
_version_file = open(os.path.join(_mypackage_root_dir, 'VERSION'))
__version__ = _version_file.read().strip()
# the following line imports all the functions from optoanalysis.py
from .optoanalysis import *
import optoanalysis.thermo
import optoanalysis.LeCroy
import optoanalysis.Saleae
| 19.76 | 67 | 0.777328 |
import os
_mypackage_root_dir = os.path.dirname(__file__)
_version_file = open(os.path.join(_mypackage_root_dir, 'VERSION'))
__version__ = _version_file.read().strip()
from .optoanalysis import *
import optoanalysis.thermo
import optoanalysis.LeCroy
import optoanalysis.Saleae
| true | true |
f72552862a7eea93f641832290eb243900232978 | 1,055 | py | Python | web_audio/helpers.py | SaxAlien/crap-code | bff99bc4501d1c7bc8c169c1b66a2d6bd7ad3494 | [
"Apache-2.0"
] | null | null | null | web_audio/helpers.py | SaxAlien/crap-code | bff99bc4501d1c7bc8c169c1b66a2d6bd7ad3494 | [
"Apache-2.0"
] | null | null | null | web_audio/helpers.py | SaxAlien/crap-code | bff99bc4501d1c7bc8c169c1b66a2d6bd7ad3494 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Crap class
but make code more compact. lmao
WARNING! WARNING!
HIGH CONCENTRATION OF SHIT!
and in future here will be adding more and more methods and classes
but i'm not shure
"""
import os
def success(message):
return '<div class="alert alert-success alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def warning(message):
return '<div class="alert alert-danger alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def playlist(path):
"""
Especially here ._.
:param path:
:return:
"""
listdir = os.listdir(path)
raw_html = ''
for i in listdir:
raw_html += '<option>{}</option>'.format(unicode.encode(unicode(str(i), 'utf-8'), 'utf8'))
return raw_html # fix utf-8 encode and some useful stuff such as <option> format
| 25.731707 | 98 | 0.611374 |
import os
def success(message):
return '<div class="alert alert-success alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def warning(message):
return '<div class="alert alert-danger alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def playlist(path):
listdir = os.listdir(path)
raw_html = ''
for i in listdir:
raw_html += '<option>{}</option>'.format(unicode.encode(unicode(str(i), 'utf-8'), 'utf8'))
return raw_html
| true | true |
f725541a544124a0f547895b485bcf1f5d21572b | 1,467 | py | Python | problems/pctsp/salesman/pctsp/application.py | AYaddaden/attention-learn-to-route | 74c2d6533d9e0faae80fa85f6bee2df2142708ba | [
"MIT"
] | 540 | 2019-02-07T13:52:30.000Z | 2022-03-31T12:51:46.000Z | problems/pctsp/salesman/pctsp/application.py | AYaddaden/attention-learn-to-route | 74c2d6533d9e0faae80fa85f6bee2df2142708ba | [
"MIT"
] | 40 | 2019-02-06T17:57:11.000Z | 2022-03-18T12:18:48.000Z | problems/pctsp/salesman/pctsp/application.py | AYaddaden/attention-learn-to-route | 74c2d6533d9e0faae80fa85f6bee2df2142708ba | [
"MIT"
] | 227 | 2019-02-15T09:25:02.000Z | 2022-03-27T10:42:21.000Z | # module application.py
#
# Copyright (c) 2015 Rafael Reis
#
"""
application module - Main module that solves the Prize Collecting Travelling Salesman Problem
"""
from pctsp.model.pctsp import *
from pctsp.model import solution
from pctsp.algo.genius import genius
from pctsp.algo import ilocal_search as ils
from pkg_resources import resource_filename
import random
INPUT_INSTANCE_FILE = resource_filename('pctsp', 'data/problem_20_100_100_1000.pctsp')
def solve_instance(filename, min_prize, runs=10, seed=1234):
random.seed(seed)
pctsp = Pctsp()
pctsp.load(filename, min_prize)
s = solution.random(pctsp, size=int(len(pctsp.prize) * 0.7))
s = ils.ilocal_search(s, n_runs=runs)
return (s.route[1:], s.quality)
def main():
"""Main function, that solves the PCTSP.
"""
pctsp = Pctsp()
pctsp.load(INPUT_INSTANCE_FILE, 386)
#pctsp.prize = np.array([0, 4, 8, 3])
#pctsp.penal = np.array([1000, 7, 11, 17])
#pctsp.cost = np.array([[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]])
# print(pctsp.type)
size = int(len(pctsp.prize)*0.7)
s = solution.random(pctsp, size=size)
print(s.route)
print(s.size)
print(s.quality)
print(s.is_valid())
print("\n")
# s = genius(pctsp)
# print(s.route)
# print(s.quality)
s = ils.ilocal_search(s)
print(s.route)
print(s.size)
print(s.quality)
print(s.is_valid())
if __name__ == '__main__':
main()
| 24.04918 | 93 | 0.652352 |
from pctsp.model.pctsp import *
from pctsp.model import solution
from pctsp.algo.genius import genius
from pctsp.algo import ilocal_search as ils
from pkg_resources import resource_filename
import random
INPUT_INSTANCE_FILE = resource_filename('pctsp', 'data/problem_20_100_100_1000.pctsp')
def solve_instance(filename, min_prize, runs=10, seed=1234):
random.seed(seed)
pctsp = Pctsp()
pctsp.load(filename, min_prize)
s = solution.random(pctsp, size=int(len(pctsp.prize) * 0.7))
s = ils.ilocal_search(s, n_runs=runs)
return (s.route[1:], s.quality)
def main():
pctsp = Pctsp()
pctsp.load(INPUT_INSTANCE_FILE, 386)
size = int(len(pctsp.prize)*0.7)
s = solution.random(pctsp, size=size)
print(s.route)
print(s.size)
print(s.quality)
print(s.is_valid())
print("\n")
s = ils.ilocal_search(s)
print(s.route)
print(s.size)
print(s.quality)
print(s.is_valid())
if __name__ == '__main__':
main()
| true | true |
f72555b7e4d80fd6f6a428c1e413cc4fa2ac3266 | 5,723 | py | Python | azure-batch/azure/batch/models/cloud_job_schedule.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-batch/azure/batch/models/cloud_job_schedule.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-batch/azure/batch/models/cloud_job_schedule.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudJobSchedule(Model):
"""A job schedule that allows recurring jobs by specifying when to run jobs
and a specification used to create each job.
:param id: A string that uniquely identifies the schedule within the
account.
:type id: str
:param display_name: The display name for the schedule.
:type display_name: str
:param url: The URL of the job schedule.
:type url: str
:param e_tag: The ETag of the job schedule. This is an opaque string. You
can use it to detect whether the job schedule has changed between
requests. In particular, you can be pass the ETag with an Update Job
Schedule request to specify that your changes should take effect only if
nobody else has modified the schedule in the meantime.
:type e_tag: str
:param last_modified: The last modified time of the job schedule. This is
the last time at which the schedule level data, such as the job
specification or recurrence information, changed. It does not factor in
job-level changes such as new jobs being created or jobs changing state.
:type last_modified: datetime
:param creation_time: The creation time of the job schedule.
:type creation_time: datetime
:param state: The current state of the job schedule. Possible values
include: 'active', 'completed', 'disabled', 'terminating', 'deleting'
:type state: str or ~azure.batch.models.JobScheduleState
:param state_transition_time: The time at which the job schedule entered
the current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the job schedule. This
property is not present if the job schedule is in its initial active
state. Possible values include: 'active', 'completed', 'disabled',
'terminating', 'deleting'
:type previous_state: str or ~azure.batch.models.JobScheduleState
:param previous_state_transition_time: The time at which the job schedule
entered its previous state. This property is not present if the job
schedule is in its initial active state.
:type previous_state_transition_time: datetime
:param schedule: The schedule according to which jobs will be created.
:type schedule: ~azure.batch.models.Schedule
:param job_specification: The details of the jobs to be created on this
schedule.
:type job_specification: ~azure.batch.models.JobSpecification
:param execution_info: Information about jobs that have been and will be
run under this schedule.
:type execution_info: ~azure.batch.models.JobScheduleExecutionInformation
:param metadata: A list of name-value pairs associated with the schedule
as metadata. The Batch service does not assign any meaning to metadata; it
is solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param stats: The lifetime resource usage statistics for the job schedule.
The statistics may not be immediately available. The Batch service
performs periodic roll-up of statistics. The typical delay is about 30
minutes.
:type stats: ~azure.batch.models.JobScheduleStatistics
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobScheduleState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'},
}
def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None):
super(CloudJobSchedule, self).__init__()
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.schedule = schedule
self.job_specification = job_specification
self.execution_info = execution_info
self.metadata = metadata
self.stats = stats
| 52.990741 | 298 | 0.687227 |
from msrest.serialization import Model
class CloudJobSchedule(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobScheduleState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'},
}
def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None):
super(CloudJobSchedule, self).__init__()
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.schedule = schedule
self.job_specification = job_specification
self.execution_info = execution_info
self.metadata = metadata
self.stats = stats
| true | true |
f725565a4176de2b5beb230d5de6c67dc05f4158 | 4,574 | py | Python | troposphere/iotwireless.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | troposphere/iotwireless.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | troposphere/iotwireless.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.0.0
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class Destination(AWSObject):
resource_type = "AWS::IoTWireless::Destination"
props = {
"Description": (str, False),
"Expression": (str, True),
"ExpressionType": (str, True),
"Name": (str, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
class LoRaWANDeviceProfile(AWSProperty):
props = {
"ClassBTimeout": (integer, False),
"ClassCTimeout": (integer, False),
"MacVersion": (str, False),
"MaxDutyCycle": (integer, False),
"MaxEirp": (integer, False),
"PingSlotDr": (integer, False),
"PingSlotFreq": (integer, False),
"PingSlotPeriod": (integer, False),
"RegParamsRevision": (str, False),
"RfRegion": (str, False),
"Supports32BitFCnt": (boolean, False),
"SupportsClassB": (boolean, False),
"SupportsClassC": (boolean, False),
"SupportsJoin": (boolean, False),
}
class DeviceProfile(AWSObject):
resource_type = "AWS::IoTWireless::DeviceProfile"
props = {
"LoRaWAN": (LoRaWANDeviceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class LoRaWANServiceProfile(AWSProperty):
props = {
"AddGwMetadata": (boolean, False),
"ChannelMask": (str, False),
"DevStatusReqFreq": (integer, False),
"DlBucketSize": (integer, False),
"DlRate": (integer, False),
"DlRatePolicy": (str, False),
"DrMax": (integer, False),
"DrMin": (integer, False),
"HrAllowed": (boolean, False),
"MinGwDiversity": (integer, False),
"NwkGeoLoc": (boolean, False),
"PrAllowed": (boolean, False),
"RaAllowed": (boolean, False),
"ReportDevStatusBattery": (boolean, False),
"ReportDevStatusMargin": (boolean, False),
"TargetPer": (integer, False),
"UlBucketSize": (integer, False),
"UlRate": (integer, False),
"UlRatePolicy": (str, False),
}
class ServiceProfile(AWSObject):
resource_type = "AWS::IoTWireless::ServiceProfile"
props = {
"LoRaWAN": (LoRaWANServiceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class SessionKeysAbpV10x(AWSProperty):
props = {
"AppSKey": (str, True),
"NwkSKey": (str, True),
}
class AbpV10x(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV10x, True),
}
class SessionKeysAbpV11(AWSProperty):
props = {
"AppSKey": (str, True),
"FNwkSIntKey": (str, True),
"NwkSEncKey": (str, True),
"SNwkSIntKey": (str, True),
}
class AbpV11(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV11, True),
}
class OtaaV10x(AWSProperty):
props = {
"AppEui": (str, True),
"AppKey": (str, True),
}
class OtaaV11(AWSProperty):
props = {
"AppKey": (str, True),
"JoinEui": (str, True),
"NwkKey": (str, True),
}
class LoRaWANDevice(AWSProperty):
props = {
"AbpV10x": (AbpV10x, False),
"AbpV11": (AbpV11, False),
"DevEui": (str, False),
"DeviceProfileId": (str, False),
"OtaaV10x": (OtaaV10x, False),
"OtaaV11": (OtaaV11, False),
"ServiceProfileId": (str, False),
}
class WirelessDevice(AWSObject):
resource_type = "AWS::IoTWireless::WirelessDevice"
props = {
"Description": (str, False),
"DestinationName": (str, True),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANDevice, False),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
"Type": (str, True),
}
class LoRaWANGateway(AWSProperty):
props = {
"GatewayEui": (str, True),
"RfRegion": (str, True),
}
class WirelessGateway(AWSObject):
resource_type = "AWS::IoTWireless::WirelessGateway"
props = {
"Description": (str, False),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANGateway, True),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
}
| 25.131868 | 55 | 0.566463 |
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class Destination(AWSObject):
resource_type = "AWS::IoTWireless::Destination"
props = {
"Description": (str, False),
"Expression": (str, True),
"ExpressionType": (str, True),
"Name": (str, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
class LoRaWANDeviceProfile(AWSProperty):
props = {
"ClassBTimeout": (integer, False),
"ClassCTimeout": (integer, False),
"MacVersion": (str, False),
"MaxDutyCycle": (integer, False),
"MaxEirp": (integer, False),
"PingSlotDr": (integer, False),
"PingSlotFreq": (integer, False),
"PingSlotPeriod": (integer, False),
"RegParamsRevision": (str, False),
"RfRegion": (str, False),
"Supports32BitFCnt": (boolean, False),
"SupportsClassB": (boolean, False),
"SupportsClassC": (boolean, False),
"SupportsJoin": (boolean, False),
}
class DeviceProfile(AWSObject):
resource_type = "AWS::IoTWireless::DeviceProfile"
props = {
"LoRaWAN": (LoRaWANDeviceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class LoRaWANServiceProfile(AWSProperty):
props = {
"AddGwMetadata": (boolean, False),
"ChannelMask": (str, False),
"DevStatusReqFreq": (integer, False),
"DlBucketSize": (integer, False),
"DlRate": (integer, False),
"DlRatePolicy": (str, False),
"DrMax": (integer, False),
"DrMin": (integer, False),
"HrAllowed": (boolean, False),
"MinGwDiversity": (integer, False),
"NwkGeoLoc": (boolean, False),
"PrAllowed": (boolean, False),
"RaAllowed": (boolean, False),
"ReportDevStatusBattery": (boolean, False),
"ReportDevStatusMargin": (boolean, False),
"TargetPer": (integer, False),
"UlBucketSize": (integer, False),
"UlRate": (integer, False),
"UlRatePolicy": (str, False),
}
class ServiceProfile(AWSObject):
resource_type = "AWS::IoTWireless::ServiceProfile"
props = {
"LoRaWAN": (LoRaWANServiceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class SessionKeysAbpV10x(AWSProperty):
props = {
"AppSKey": (str, True),
"NwkSKey": (str, True),
}
class AbpV10x(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV10x, True),
}
class SessionKeysAbpV11(AWSProperty):
props = {
"AppSKey": (str, True),
"FNwkSIntKey": (str, True),
"NwkSEncKey": (str, True),
"SNwkSIntKey": (str, True),
}
class AbpV11(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV11, True),
}
class OtaaV10x(AWSProperty):
props = {
"AppEui": (str, True),
"AppKey": (str, True),
}
class OtaaV11(AWSProperty):
props = {
"AppKey": (str, True),
"JoinEui": (str, True),
"NwkKey": (str, True),
}
class LoRaWANDevice(AWSProperty):
props = {
"AbpV10x": (AbpV10x, False),
"AbpV11": (AbpV11, False),
"DevEui": (str, False),
"DeviceProfileId": (str, False),
"OtaaV10x": (OtaaV10x, False),
"OtaaV11": (OtaaV11, False),
"ServiceProfileId": (str, False),
}
class WirelessDevice(AWSObject):
resource_type = "AWS::IoTWireless::WirelessDevice"
props = {
"Description": (str, False),
"DestinationName": (str, True),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANDevice, False),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
"Type": (str, True),
}
class LoRaWANGateway(AWSProperty):
props = {
"GatewayEui": (str, True),
"RfRegion": (str, True),
}
class WirelessGateway(AWSObject):
resource_type = "AWS::IoTWireless::WirelessGateway"
props = {
"Description": (str, False),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANGateway, True),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
}
| true | true |
f7255693150e9fa60c4098be227a094b573e4ddb | 2,130 | py | Python | tests/test_http.py | flome/uproot | eb2ae1ffe6fb2c2ce8cb7cbdc0919d5b51c0ff0f | [
"BSD-3-Clause"
] | null | null | null | tests/test_http.py | flome/uproot | eb2ae1ffe6fb2c2ce8cb7cbdc0919d5b51c0ff0f | [
"BSD-3-Clause"
] | null | null | null | tests/test_http.py | flome/uproot | eb2ae1ffe6fb2c2ce8cb7cbdc0919d5b51c0ff0f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot/blob/master/LICENSE
import pytest
import mock
HTTPError = pytest.importorskip('requests.exceptions').HTTPError
import uproot
FILE = "foriter"
LOCAL = "tests/samples/{FILE}.root".format(FILE=FILE)
URL = "http://scikit-hep.org/uproot/examples/{FILE}.root".format(FILE=FILE)
URL_AUTH = "http://scikit-hep.org/uproot/authentication/{FILE}.root".format(FILE=FILE)
AUTH = ("scikit-hep", "uproot")
def mock_get_local_instead_of_http(url="", headers={}, auth=None, **kwargs):
class MockResponse:
def __init__(self, status_code):
self.status_code = status_code
if self.status_code == 200:
with open(LOCAL, "rb") as f:
self.content = f.read()
self.headers = {"Content-Range": str(len(self.content))}
def raise_for_status(self):
if self.status_code == 401: # Authentication Error
raise HTTPError
elif self.status_code == 200: # Ok
pass
if url == URL:
return MockResponse(200)
elif url == URL_AUTH and auth == None:
return MockResponse(401)
elif url == URL_AUTH and auth == AUTH:
return MockResponse(200)
elif url == URL_AUTH:
return MockResponse(401)
@mock.patch("requests.get", mock_get_local_instead_of_http)
class Test(object):
def test_no_auth_needed_no_auth(self):
f = uproot.open(URL)
assert type(f) == uproot.rootio.ROOTDirectory
def test_no_auth_needed_with_auth(self):
f = uproot.open(URL, httpsource={"auth": AUTH})
assert type(f) == uproot.rootio.ROOTDirectory
def test_auth_needed_no_auth(self):
with pytest.raises(HTTPError):
f = uproot.open(URL_AUTH)
def test_auth_needed_correct_auth(self):
f = uproot.open(URL_AUTH, httpsource={"auth": AUTH})
assert type(f) == uproot.rootio.ROOTDirectory
def test_auth_needed_wrong_auth(self):
with pytest.raises(HTTPError):
f = uproot.open(URL_AUTH, httpsource={"auth": ("", "")})
| 34.354839 | 86 | 0.643662 |
import pytest
import mock
HTTPError = pytest.importorskip('requests.exceptions').HTTPError
import uproot
FILE = "foriter"
LOCAL = "tests/samples/{FILE}.root".format(FILE=FILE)
URL = "http://scikit-hep.org/uproot/examples/{FILE}.root".format(FILE=FILE)
URL_AUTH = "http://scikit-hep.org/uproot/authentication/{FILE}.root".format(FILE=FILE)
AUTH = ("scikit-hep", "uproot")
def mock_get_local_instead_of_http(url="", headers={}, auth=None, **kwargs):
class MockResponse:
def __init__(self, status_code):
self.status_code = status_code
if self.status_code == 200:
with open(LOCAL, "rb") as f:
self.content = f.read()
self.headers = {"Content-Range": str(len(self.content))}
def raise_for_status(self):
if self.status_code == 401:
raise HTTPError
elif self.status_code == 200:
pass
if url == URL:
return MockResponse(200)
elif url == URL_AUTH and auth == None:
return MockResponse(401)
elif url == URL_AUTH and auth == AUTH:
return MockResponse(200)
elif url == URL_AUTH:
return MockResponse(401)
@mock.patch("requests.get", mock_get_local_instead_of_http)
class Test(object):
def test_no_auth_needed_no_auth(self):
f = uproot.open(URL)
assert type(f) == uproot.rootio.ROOTDirectory
def test_no_auth_needed_with_auth(self):
f = uproot.open(URL, httpsource={"auth": AUTH})
assert type(f) == uproot.rootio.ROOTDirectory
def test_auth_needed_no_auth(self):
with pytest.raises(HTTPError):
f = uproot.open(URL_AUTH)
def test_auth_needed_correct_auth(self):
f = uproot.open(URL_AUTH, httpsource={"auth": AUTH})
assert type(f) == uproot.rootio.ROOTDirectory
def test_auth_needed_wrong_auth(self):
with pytest.raises(HTTPError):
f = uproot.open(URL_AUTH, httpsource={"auth": ("", "")})
| true | true |
f72557569101118345344ad6a9f06be139d1e4a8 | 424 | py | Python | core/migrations/0036_alter_event_submission_type.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | null | null | null | core/migrations/0036_alter_event_submission_type.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | 1 | 2022-02-17T07:01:51.000Z | 2022-02-17T07:01:51.000Z | core/migrations/0036_alter_event_submission_type.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | 1 | 2022-02-17T05:16:58.000Z | 2022-02-17T05:16:58.000Z | # Generated by Django 3.2.5 on 2021-08-27 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0035_alter_event_code'),
]
operations = [
migrations.AlterField(
model_name='event',
name='submission_type',
field=models.IntegerField(choices=[(1, 'Code'), (2, 'File')], default=1),
),
]
| 22.315789 | 85 | 0.589623 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0035_alter_event_code'),
]
operations = [
migrations.AlterField(
model_name='event',
name='submission_type',
field=models.IntegerField(choices=[(1, 'Code'), (2, 'File')], default=1),
),
]
| true | true |
f7255769c302384481f7ccaa713331893763eddb | 2,983 | py | Python | core/brain/dialog.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
] | 1 | 2016-10-08T09:01:05.000Z | 2016-10-08T09:01:05.000Z | core/brain/dialog.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
] | 1 | 2019-09-24T09:56:52.000Z | 2019-09-24T09:56:52.000Z | core/brain/dialog.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
] | null | null | null | def start_dialog(text):
if text is None:
text = recognize_by_google()
if text is None:
return
logging.debug( "You said: " + text )
c = Confirm(text)
state = c.get_state( sentence=text )
logging.debug(type(state))
logging.debug(state)
if(( state == 0) or (state is None)):
sentence = 'DID YOU SAY' + text.upper() + '?'
say(sentence)
logging.debug('start confirm')
listen()
os.system('mv ' + settings.app_dirs['tmp_input_audio_dir'] + 'speech.flac' + ' ' + settings.app_dirs['tmp_input_audio_dir'] + 'last-speech.flac')
#confirmation = recognize_by_google()
confirmation = ask_julius()
if (confirmation is not None) and ('yes' in confirmation.strip().lower()):
s = 'You said %s' % confirmation
_utils = Utils()
path = _utils.get_full_path_to_module_by_request(text)
#copy default reaction files
if not os.path.isfile( path + '/reaction.py' ):
_utils.copy_default_reaction_files( path + '/' )
logging.debug(s)
say('OKAY, NICE!')
c.confirm(1)
logging.debug('Searching for media in internet...')
say('NOTHING FOUND. IM TRYING TO FIND INFORMATION IN THE INTERNET!')
link_to_audio = search_www(text)
logging.debug(text)
downloaded = _utils.download_audio_resource(link_to_audio, text)
if downloaded:
play(text)
else:
say("SORRY !, COULD NOT FIND, MEDIA, FILE, AT WIKI WEBSITE")
suggest_info(text)
elif confirmation is not None and 'no' in confirmation.strip().lower():
say('SORRY, PLEASE, COME CLOSER, AND, REPEAT YOUR QUESTION')
else:
say('PLEASE ASK AGAIN')
os.system('rm -f ' + settings.app_dirs['tmp_input_audio_dir'] + '*.wav ')
#start dialog from begining
#listen()
#start_dialog()
else:
#already know the answer :) play it
play(text)
finish_dialog()
def search_www( text_to_search ):
"""docstring for search_www"""
#small hack for searching exactly wiki or dictionary files
json_results = search( text_to_search)
# now grep the results and find wiki info
if not json_results:
say('OOPS, COULD NOT CONNECT GOOGLE')
return False
_wiki = Wiki()
wiki_page_link = _wiki.find_resourse_link(json_results)
if wiki_page_link:
link_to_audio = _wiki.find_audio_resourse(wiki_page_link)
info = { 'audio_external': link_to_audio
,'wiki_external' : wiki_page_link
,'audio_local' : ''
}
#logging.debug('save json %s' % info)
_utils = Utils()
_utils.save_file_json_info(text_to_search, info)
if link_to_audio:
return link_to_audio
return False
| 35.511905 | 153 | 0.584982 | def start_dialog(text):
if text is None:
text = recognize_by_google()
if text is None:
return
logging.debug( "You said: " + text )
c = Confirm(text)
state = c.get_state( sentence=text )
logging.debug(type(state))
logging.debug(state)
if(( state == 0) or (state is None)):
sentence = 'DID YOU SAY' + text.upper() + '?'
say(sentence)
logging.debug('start confirm')
listen()
os.system('mv ' + settings.app_dirs['tmp_input_audio_dir'] + 'speech.flac' + ' ' + settings.app_dirs['tmp_input_audio_dir'] + 'last-speech.flac')
confirmation = ask_julius()
if (confirmation is not None) and ('yes' in confirmation.strip().lower()):
s = 'You said %s' % confirmation
_utils = Utils()
path = _utils.get_full_path_to_module_by_request(text)
if not os.path.isfile( path + '/reaction.py' ):
_utils.copy_default_reaction_files( path + '/' )
logging.debug(s)
say('OKAY, NICE!')
c.confirm(1)
logging.debug('Searching for media in internet...')
say('NOTHING FOUND. IM TRYING TO FIND INFORMATION IN THE INTERNET!')
link_to_audio = search_www(text)
logging.debug(text)
downloaded = _utils.download_audio_resource(link_to_audio, text)
if downloaded:
play(text)
else:
say("SORRY !, COULD NOT FIND, MEDIA, FILE, AT WIKI WEBSITE")
suggest_info(text)
elif confirmation is not None and 'no' in confirmation.strip().lower():
say('SORRY, PLEASE, COME CLOSER, AND, REPEAT YOUR QUESTION')
else:
say('PLEASE ASK AGAIN')
os.system('rm -f ' + settings.app_dirs['tmp_input_audio_dir'] + '*.wav ')
else:
play(text)
finish_dialog()
def search_www( text_to_search ):
json_results = search( text_to_search)
if not json_results:
say('OOPS, COULD NOT CONNECT GOOGLE')
return False
_wiki = Wiki()
wiki_page_link = _wiki.find_resourse_link(json_results)
if wiki_page_link:
link_to_audio = _wiki.find_audio_resourse(wiki_page_link)
info = { 'audio_external': link_to_audio
,'wiki_external' : wiki_page_link
,'audio_local' : ''
}
_utils = Utils()
_utils.save_file_json_info(text_to_search, info)
if link_to_audio:
return link_to_audio
return False
| true | true |
f72558e0ae0df4436c838a4cc5685909f6d3e117 | 711 | py | Python | distribute_repo.py | ewhitesides/pulp_operations | b6a3541559e48c717926b245bbbf2dd87638e093 | [
"MIT"
] | null | null | null | distribute_repo.py | ewhitesides/pulp_operations | b6a3541559e48c717926b245bbbf2dd87638e093 | [
"MIT"
] | 1 | 2021-06-17T04:35:05.000Z | 2021-06-17T04:35:05.000Z | distribute_repo.py | ewhitesides/pulp_operations | b6a3541559e48c717926b245bbbf2dd87638e093 | [
"MIT"
] | null | null | null | """
script to distribute repos from repo_data.py
"""
import urllib3
import pulp_operations
from repo_data import repo_data
#disable ssl warnings for now
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#release latest version of the repo to distribution 'latest'
for os in repo_data:
for repo in repo_data[os]:
repo_name = f"{os}-{repo}"
dist_name = f"{repo_name}-latest"
pulp_operations.release(repo_name, 0, dist_name)
#output distribution url info (optional)
for os in repo_data:
for repo in repo_data[os]:
repo_name = f"{os}-{repo}"
dist_name = f"{repo_name}-latest"
pulp_operations.distribution.get_distribution_url(dist_name)
| 28.44 | 68 | 0.729958 |
import urllib3
import pulp_operations
from repo_data import repo_data
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
for os in repo_data:
for repo in repo_data[os]:
repo_name = f"{os}-{repo}"
dist_name = f"{repo_name}-latest"
pulp_operations.release(repo_name, 0, dist_name)
for os in repo_data:
for repo in repo_data[os]:
repo_name = f"{os}-{repo}"
dist_name = f"{repo_name}-latest"
pulp_operations.distribution.get_distribution_url(dist_name)
| true | true |
f72558ec957eca8761389e372a0381b0b817ae58 | 21,798 | py | Python | Old/hoop_detection_angle.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | 6 | 2019-04-01T02:38:40.000Z | 2021-06-05T18:23:06.000Z | Old/hoop_detection_angle.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | null | null | null | Old/hoop_detection_angle.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | 1 | 2019-09-01T08:58:28.000Z | 2019-09-01T08:58:28.000Z | from __future__ import print_function
import time
import math
import thread
# Dk imports
from pymavlink import mavutil
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
# Mux and TOF imports
import I2CMultiplexer
import VL53L1X
# CV imports
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from fractions import Fraction
from PIL import Image
import random
from sympy import Point, Polygon, pi
#cap = cv2.VideoCapture(0)
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 24
camera.exposure_mode = 'auto'
camera.exposure_compensation = -3
camera.drc_strength = 'off'
camera.still_stats = False
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(167, 103), Fraction(27,16))
rawCapture = PiRGBArray(camera, size=(426, 240))
out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))
# allow the camera to warmup
time.sleep(0.1)
# Connect to Vehicle
connection_string = '/dev/ttyUSB0'
sitl = None
# Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True, baud=57600)
# Global variables for distance:
distance_in_mm_N = 0 # North Sensor
distance_in_mm_S = 0 # South Sensor
distance_in_mm_E = 0 # East Sensor
distance_in_mm_W = 0 # West Sensor
distance_in_mm_45 = 0 # 45 degree south east sensor
dX = 0
dY = 0
#Create an I2C Multiplexer object, the address of I2C Multiplexer is 0X70
I2CMulti = I2CMultiplexer.I2CMultiplexer(0x70)
# Init TOF obj
tof = VL53L1X.VL53L1X()
# STarts the TOFs on their respective ports
try:
# for i in [0,2,4,6]:
for i in [0,1,2,7,3]:
I2CMulti.selectPort(i)
tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=0x29)
tof.open() # Initialise the i2c bus and configure the sensor
tof.start_ranging(3) # Start ranging, 1 = Short Range, 2 = Medium Range, 3 = Long Range
except:
print("port init failed")
def detect_circle():
global dX
global dY
for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
for i in range(5): # Clears the 5 frame buffer
frame = img.array
height, width = frame.shape[:2]
centre = (int(width/2), int(height/2))
b_channel = np.array(frame[:,:,0]).astype('float')
g_channel = np.array(frame[:,:,1]).astype('float')
r_channel = np.array(frame[:,:,2]).astype('float')
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))
#img_rec_red2 = np.divide(r_channel, 255)
img_rec_red2 = np.divide(img_rec_red2,255)
#img_rec_red2 = np.square(img_rec_red2)
img_rec_red2[img_rec_red2 < 0.3] = 0
#dX, dY = 0,0
trials = 1
try:
# Get the array of indices of detected pixels
thresholded_array = np.argwhere(img_rec_red2 >= 0.3)
thresholded_list = thresholded_array.tolist()
#print(thresholded_list)
if len(thresholded_list) > trials*3:
# sets the number of trials before averaging to get the centre
total_centres_X = 0
total_centres_Y = 0
hoop_centre = (0,0)
arr_len_3rd = int(len(thresholded_list) / 3)
for i in range(trials):
r1 = random.randrange(0, int(arr_len_3rd/2))
#r2 = random.randrange(0, arr_len_3rd)
# rerolls if the same number was rolled
#while r2 == r1:
r2 = random.randrange(arr_len_3rd, 2*arr_len_3rd)
r3 = random.randrange(int(2.5*arr_len_3rd), len(thresholded_list))
#while r3 == r1 or r3 == r2:
#r3 = random.randrange(0, len(thresholded_list))
print(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3])
current_centre = Polygon(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3]).circumcenter
#print(current_centre)
total_centres_X += int(current_centre.y)
total_centres_Y += int(current_centre.x)
cv2.circle(frame, (thresholded_list[r1][1], thresholded_list[r1][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r2][1], thresholded_list[r2][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r3][1], thresholded_list[r3][0]), 5, (0, 0, 255), -1)
cX = int(total_centres_X / trials)
cY = int(total_centres_Y / trials)
#print(cX,cY)
except:
print("no hoop detected")
# put text and highlight the center
try:
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
cv2.line(frame, centre, (cX, cY), (255,0,0), 2)
#cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
#print('Velocities: ' + str(dX) + "," + str(dY))
except:
#print("No centre detected")
#dX = 0
#dY = 0
dX = None
dY = None
out.write(frame)
k = cv2.waitKey(1)
rawCapture.truncate(0)
# Arm and rakeoff to specific altitude
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
#Don't try to arm until autopilot is ready
# while not vehicle.is_armable:
# print(" Waiting for vehicle to initialise...")
# time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# while not vehicle.armed == True:
# print("Not Armed")
# time.sleep(0.4)
# while not vehicle.armed == True:
# vehicle.armed = True
# print("Not Armed 2")
# time.sleep(0.4)
#Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.rangefinder.distance)
current_alt = vehicle.rangefinder.distance
if current_alt > 20:
current_alt = 0
print(" Arm state: ", vehicle.armed)
# Break and return from function just below target altitude.
if current_alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(1)
def goto_position_target_local_ned(north, east, down):
"""
Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified
location in the North, East, Down frame.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111111000, # type_mask (only positions enabled)
north, east, down,
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_location_metres(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation
def goto(dNorth, dEast, gotoFunction=vehicle.simple_goto):
"""
Moves the vehicle to a position dNorth metres North and dEast metres East of the current position.
The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for
the target position. This allows it to be called with different position-setting commands.
By default it uses the standard method: dronekit.lib.Vehicle.simple_goto().
The method reports the distance to target every two seconds.
"""
currentLocation = vehicle.location.global_relative_frame
targetLocation = get_location_metres(currentLocation, dNorth, dEast)
targetDistance = get_distance_metres(currentLocation, targetLocation)
gotoFunction(targetLocation)
#print "DEBUG: targetLocation: %s" % targetLocation
#print "DEBUG: targetLocation: %s" % targetDistance
print("Initiating GOTO")
while vehicle.mode.name=="GUIDED": #Stop action if we are no longer in guided mode.
#print "DEBUG: mode: %s" % vehicle.mode.name
remainingDistance=get_distance_metres(vehicle.location.global_relative_frame, targetLocation)
print("Distance to target: " + str(remainingDistance))
if remainingDistance < 0.11: #Just below target, in case of undershoot.
print("Reached target")
break;
time.sleep(2)
# Sends a velocity to the drone at a rate of 2 Hx
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(0.5)
# Sets the Yaw - vehicle will yaw according to the yaw slew rate set in params
# give the vehicle more time (give a 0 velocity vector for x amount of seconds - enough for
# the drone to complete the yaw)
def condition_yaw(heading, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
# The following 2 methods allow for the drone attitude to be directly controlled
# the movement is not OF corrected - avoid usage where possible
def set_attitude(roll_angle = 0.0, pitch_angle = 0.0, yaw_rate = 0.0, thrust = 0.5, duration = 0):
"""
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
"""
"""
The roll and pitch rate cannot be controllbed with rate in radian in AC3.4.4 or earlier,
so you must use quaternion to control the pitch and roll for those vehicles.
"""
# Thrust > 0.5: Ascend
# Thrust == 0.5: Hold the altitude
# Thrust < 0.5: Descend
msg = vehicle.message_factory.set_attitude_target_encode(
0, # time_boot_ms
1, # Target system
1, # Target component
0b00000000, # Type mask: bit 1 is LSB
to_quaternion(roll_angle, pitch_angle), # Quaternion
0, # Body roll rate in radian
0, # Body pitch rate in radian
math.radians(yaw_rate), # Body yaw rate in radian
thrust # Thrust
)
vehicle.send_mavlink(msg)
start = time.time()
while time.time() - start < duration:
vehicle.send_mavlink(msg)
#time.sleep(0.1)
def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):
"""
Convert degrees to quaternions
"""
t0 = math.cos(math.radians(yaw * 0.5))
t1 = math.sin(math.radians(yaw * 0.5))
t2 = math.cos(math.radians(roll * 0.5))
t3 = math.sin(math.radians(roll * 0.5))
t4 = math.cos(math.radians(pitch * 0.5))
t5 = math.sin(math.radians(pitch * 0.5))
w = t0 * t2 * t4 + t1 * t3 * t5
x = t0 * t3 * t4 - t1 * t2 * t5
y = t0 * t2 * t5 + t1 * t3 * t4
z = t1 * t2 * t4 - t0 * t3 * t5
# Gets the readings from the TOF sensors and updates the distance vars
def get_I2C_readings():
global distance_in_mm_N
global distance_in_mm_S
global distance_in_mm_E
global distance_in_mm_W
global distance_in_mm_45
while(True):
I2CMulti.selectPort(0)
distance_in_mm_N = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(3)
distance_in_mm_S = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(7)
distance_in_mm_E = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(2)
distance_in_mm_W = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(1)
distance_in_mm_45 = tof.get_distance() # Grab the range in mm
#print("Sensor N distance: " + str(distance_in_mm_N) + " \nSensor S distance: " + str(distance_in_mm_S) + "\nSensor E distance: " + str(distance_in_mm_E) + "\nSensor W distance: " + str(distance_in_mm_W))
time.sleep(0.05)
def calculate_velocity(ground_heading, angle):
rads = math.radian(angle)
rads += math.radians(ground_heading)
if rads > math.radians(360):
rads -= math.radians(360)
elif rads < -math.radians(360):
rads += math.radians(360)
vel_x = (np.cos(heading_rad) / 5)
vel_y = (np.sin(heading_rad) / 5)
return vel_x, vel_y
# Starts TOF readings before takeoff
#thread.start_new_thread(get_I2C_readings, ())
# Starts CV code
thread.start_new_thread(detect_circle, ())
# Gets vehcle heading on thr ground (this is assumed to be the forward heading)
ground_heading = vehicle.heading
# Takeoff to 1.5m
arm_and_takeoff(1.5)
# Corridor Variables
INCREMENT_DISTANCE = 0.1
CORRIDOR_WIDTH_HALVED = 1300 # in mm
THRESHOLD_DISTANCE = 100
lower_bound = CORRIDOR_WIDTH_HALVED - THRESHOLD_DISTANCE
upper_bound = CORRIDOR_WIDTH_HALVED + THRESHOLD_DISTANCE
#print(str(right_X) + str(right_Y))
VEL_SCALE_Y = 0.005 # velocity scaling factor from openCV
VEL_SCALE_X = 0.001
px_threshold = 10 # sets the threshold before any velocity is taken
print(dX, dY)
# Hoop alignment code
x_aligned = False
y_aligned = False
### SINGLE AXIS ALIGNMENT CODE
# while True:
# if dX < -px_threshold or dX > px_threshold:
# # remember, negative means up
# up_vel = -dX*VEL_SCALE
# if up_vel > 0.05:
# up_vel = 0.05
# elif up_vel < 0.05:
# up_vel = -0.05
# send_global_velocity(0,0,(up_vel), 2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else:
# break
# print("x aligned")
# while True:
# if dY < -px_threshold or dY > px_threshold:
# right_vel_X = -right_X*dY*VEL_SCALE
# right_vel_Y = -right_Y*dY*VEL_SCALE
# if right_vel_X > 0.05:
# right_vel_X = 0.05
# elif right_vel_X < -0.05:
# right_vel_X = -0.05
# if right_vel_Y > 0.05:
# right_vel_Y = 0.05
# elif right_vel_Y < -0.05:
# right_vel_Y = -0.05
# send_global_velocity(right_vel_X,right_vel_Y,0,2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else :
# break
### DOUBLE AXIS ALIGNMENT
up_vel, right_vel_X, right_vel_Y = 0,0,0
forward_scale = 0.1
stab_seconds_X = 0
stab_seconds_Y = 0
stab_threshold = 1
while (not x_aligned) or (not y_aligned):
if dX == None:
print("hoop not detected")
break
line_d = (dX**2 + dY**2)**0.5
if line_d == 0:
fwd_x, fwd_y = calculate_velocity(ground_heading, 0)
send_global_velocity(fwd_X,fwd_Y,0,2)
send_global_velocity(0,0,0,1)
total_scale = forward_scale/line_d
print(dX, dY)
if dX < -px_threshold or dX > px_threshold:
x_aligned = False
up_vel = round((-dX*VEL_SCALE_X), 3)
if up_vel > 0.1:
up_vel = 0.1
elif up_vel < -0.1:
up_vel = -0.1
stab_seconds_X = 0
else:
if stab_seconds_X == stab_threshold:
x_aligned = True
else:
x_aligned = False
stab_seconds_X += 1
up_vel = 0
if dY < -px_threshold or dY > px_threshold:
y_aligned = False
angle = math.degrees(np.arctan2(total_scale / line_d))
right_vel_X, right_vel_Y = calculate_velocity(ground_heading, angle)
stab_seconds_Y = 0
else:
if stab_seconds_Y == stab_threshold:
y_aligned = True
else:
y_aligned = False
stab_seconds_Y += 1
right_vel_X = 0
right_vel_Y = 0
print("alignment x: " + str(x_aligned))
print("alignment y: " + str(y_aligned))
print("velocity: " + str(right_vel_X) + " : " + str(right_vel_Y) + " : " + str(up_vel))
send_global_velocity(right_vel_X,right_vel_Y,up_vel,2)
send_global_velocity(0,0,0,1) # reset the global vels
print("Fully Aligned")
send_global_velocity(0,0,0,10) # reset the global vels
# condition_yaw(90, True)
# condition_yaw(-90, True)
print("Landing")
vehicle.mode = VehicleMode("LAND")
# Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl:
sitl.stop()
I2CMulti.i2c.write_byte(0x70,0) # how it closes?
tof.stop_ranging() # Stop ranging
out.release()
| 34.6 | 206 | 0.65006 | from __future__ import print_function
import time
import math
import thread
from pymavlink import mavutil
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
import I2CMultiplexer
import VL53L1X
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from fractions import Fraction
from PIL import Image
import random
from sympy import Point, Polygon, pi
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 24
camera.exposure_mode = 'auto'
camera.exposure_compensation = -3
camera.drc_strength = 'off'
camera.still_stats = False
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(167, 103), Fraction(27,16))
rawCapture = PiRGBArray(camera, size=(426, 240))
out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))
time.sleep(0.1)
connection_string = '/dev/ttyUSB0'
sitl = None
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True, baud=57600)
distance_in_mm_N = 0
distance_in_mm_S = 0
distance_in_mm_E = 0
distance_in_mm_W = 0
distance_in_mm_45 = 0
dX = 0
dY = 0
I2CMulti = I2CMultiplexer.I2CMultiplexer(0x70)
tof = VL53L1X.VL53L1X()
try:
for i in [0,1,2,7,3]:
I2CMulti.selectPort(i)
tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=0x29)
tof.open()
tof.start_ranging(3)
except:
print("port init failed")
def detect_circle():
global dX
global dY
for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
for i in range(5):
frame = img.array
height, width = frame.shape[:2]
centre = (int(width/2), int(height/2))
b_channel = np.array(frame[:,:,0]).astype('float')
g_channel = np.array(frame[:,:,1]).astype('float')
r_channel = np.array(frame[:,:,2]).astype('float')
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))
img_rec_red2 = np.divide(img_rec_red2,255)
img_rec_red2[img_rec_red2 < 0.3] = 0
trials = 1
try:
thresholded_array = np.argwhere(img_rec_red2 >= 0.3)
thresholded_list = thresholded_array.tolist()
if len(thresholded_list) > trials*3:
total_centres_X = 0
total_centres_Y = 0
hoop_centre = (0,0)
arr_len_3rd = int(len(thresholded_list) / 3)
for i in range(trials):
r1 = random.randrange(0, int(arr_len_3rd/2))
r2 = random.randrange(arr_len_3rd, 2*arr_len_3rd)
r3 = random.randrange(int(2.5*arr_len_3rd), len(thresholded_list))
print(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3])
current_centre = Polygon(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3]).circumcenter
total_centres_X += int(current_centre.y)
total_centres_Y += int(current_centre.x)
cv2.circle(frame, (thresholded_list[r1][1], thresholded_list[r1][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r2][1], thresholded_list[r2][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r3][1], thresholded_list[r3][0]), 5, (0, 0, 255), -1)
cX = int(total_centres_X / trials)
cY = int(total_centres_Y / trials)
except:
print("no hoop detected")
try:
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
cv2.line(frame, centre, (cX, cY), (255,0,0), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
except:
dX = None
dY = None
out.write(frame)
k = cv2.waitKey(1)
rawCapture.truncate(0)
def arm_and_takeoff(aTargetAltitude):
print("Basic pre-arm checks")
# while not vehicle.is_armable:
# print(" Waiting for vehicle to initialise...")
# time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# while not vehicle.armed == True:
# print("Not Armed")
# time.sleep(0.4)
# while not vehicle.armed == True:
# vehicle.armed = True
# print("Not Armed 2")
# time.sleep(0.4)
#Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.rangefinder.distance)
current_alt = vehicle.rangefinder.distance
if current_alt > 20:
current_alt = 0
print(" Arm state: ", vehicle.armed)
# Break and return from function just below target altitude.
if current_alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(1)
def goto_position_target_local_ned(north, east, down):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111111000, # type_mask (only positions enabled)
north, east, down,
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def get_distance_metres(aLocation1, aLocation2):
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_location_metres(original_location, dNorth, dEast):
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation
def goto(dNorth, dEast, gotoFunction=vehicle.simple_goto):
currentLocation = vehicle.location.global_relative_frame
targetLocation = get_location_metres(currentLocation, dNorth, dEast)
targetDistance = get_distance_metres(currentLocation, targetLocation)
gotoFunction(targetLocation)
#print "DEBUG: targetLocation: %s" % targetLocation
#print "DEBUG: targetLocation: %s" % targetDistance
print("Initiating GOTO")
while vehicle.mode.name=="GUIDED": #Stop action if we are no longer in guided mode.
#print "DEBUG: mode: %s" % vehicle.mode.name
remainingDistance=get_distance_metres(vehicle.location.global_relative_frame, targetLocation)
print("Distance to target: " + str(remainingDistance))
if remainingDistance < 0.11: #Just below target, in case of undershoot.
print("Reached target")
break;
time.sleep(2)
# Sends a velocity to the drone at a rate of 2 Hx
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(0.5)
# Sets the Yaw - vehicle will yaw according to the yaw slew rate set in params
# give the vehicle more time (give a 0 velocity vector for x amount of seconds - enough for
# the drone to complete the yaw)
def condition_yaw(heading, relative=False):
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
# The following 2 methods allow for the drone attitude to be directly controlled
# the movement is not OF corrected - avoid usage where possible
def set_attitude(roll_angle = 0.0, pitch_angle = 0.0, yaw_rate = 0.0, thrust = 0.5, duration = 0):
# Thrust > 0.5: Ascend
# Thrust == 0.5: Hold the altitude
# Thrust < 0.5: Descend
msg = vehicle.message_factory.set_attitude_target_encode(
0, # time_boot_ms
1, # Target system
1, # Target component
0b00000000, # Type mask: bit 1 is LSB
to_quaternion(roll_angle, pitch_angle), # Quaternion
0, # Body roll rate in radian
0, # Body pitch rate in radian
math.radians(yaw_rate), # Body yaw rate in radian
thrust # Thrust
)
vehicle.send_mavlink(msg)
start = time.time()
while time.time() - start < duration:
vehicle.send_mavlink(msg)
#time.sleep(0.1)
def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):
t0 = math.cos(math.radians(yaw * 0.5))
t1 = math.sin(math.radians(yaw * 0.5))
t2 = math.cos(math.radians(roll * 0.5))
t3 = math.sin(math.radians(roll * 0.5))
t4 = math.cos(math.radians(pitch * 0.5))
t5 = math.sin(math.radians(pitch * 0.5))
w = t0 * t2 * t4 + t1 * t3 * t5
x = t0 * t3 * t4 - t1 * t2 * t5
y = t0 * t2 * t5 + t1 * t3 * t4
z = t1 * t2 * t4 - t0 * t3 * t5
# Gets the readings from the TOF sensors and updates the distance vars
def get_I2C_readings():
global distance_in_mm_N
global distance_in_mm_S
global distance_in_mm_E
global distance_in_mm_W
global distance_in_mm_45
while(True):
I2CMulti.selectPort(0)
distance_in_mm_N = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(3)
distance_in_mm_S = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(7)
distance_in_mm_E = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(2)
distance_in_mm_W = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(1)
distance_in_mm_45 = tof.get_distance() # Grab the range in mm
#print("Sensor N distance: " + str(distance_in_mm_N) + " \nSensor S distance: " + str(distance_in_mm_S) + "\nSensor E distance: " + str(distance_in_mm_E) + "\nSensor W distance: " + str(distance_in_mm_W))
time.sleep(0.05)
def calculate_velocity(ground_heading, angle):
rads = math.radian(angle)
rads += math.radians(ground_heading)
if rads > math.radians(360):
rads -= math.radians(360)
elif rads < -math.radians(360):
rads += math.radians(360)
vel_x = (np.cos(heading_rad) / 5)
vel_y = (np.sin(heading_rad) / 5)
return vel_x, vel_y
# Starts TOF readings before takeoff
#thread.start_new_thread(get_I2C_readings, ())
# Starts CV code
thread.start_new_thread(detect_circle, ())
# Gets vehcle heading on thr ground (this is assumed to be the forward heading)
ground_heading = vehicle.heading
# Takeoff to 1.5m
arm_and_takeoff(1.5)
# Corridor Variables
INCREMENT_DISTANCE = 0.1
CORRIDOR_WIDTH_HALVED = 1300 # in mm
THRESHOLD_DISTANCE = 100
lower_bound = CORRIDOR_WIDTH_HALVED - THRESHOLD_DISTANCE
upper_bound = CORRIDOR_WIDTH_HALVED + THRESHOLD_DISTANCE
#print(str(right_X) + str(right_Y))
VEL_SCALE_Y = 0.005 # velocity scaling factor from openCV
VEL_SCALE_X = 0.001
px_threshold = 10 # sets the threshold before any velocity is taken
print(dX, dY)
# Hoop alignment code
x_aligned = False
y_aligned = False
### SINGLE AXIS ALIGNMENT CODE
# while True:
# if dX < -px_threshold or dX > px_threshold:
# # remember, negative means up
# up_vel = -dX*VEL_SCALE
# if up_vel > 0.05:
# up_vel = 0.05
# elif up_vel < 0.05:
# up_vel = -0.05
# send_global_velocity(0,0,(up_vel), 2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else:
# break
# print("x aligned")
# while True:
# if dY < -px_threshold or dY > px_threshold:
# right_vel_X = -right_X*dY*VEL_SCALE
# right_vel_Y = -right_Y*dY*VEL_SCALE
# if right_vel_X > 0.05:
# right_vel_X = 0.05
# elif right_vel_X < -0.05:
# right_vel_X = -0.05
# if right_vel_Y > 0.05:
# right_vel_Y = 0.05
# elif right_vel_Y < -0.05:
# right_vel_Y = -0.05
# send_global_velocity(right_vel_X,right_vel_Y,0,2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else :
# break
### DOUBLE AXIS ALIGNMENT
up_vel, right_vel_X, right_vel_Y = 0,0,0
forward_scale = 0.1
stab_seconds_X = 0
stab_seconds_Y = 0
stab_threshold = 1
while (not x_aligned) or (not y_aligned):
if dX == None:
print("hoop not detected")
break
line_d = (dX**2 + dY**2)**0.5
if line_d == 0:
fwd_x, fwd_y = calculate_velocity(ground_heading, 0)
send_global_velocity(fwd_X,fwd_Y,0,2)
send_global_velocity(0,0,0,1)
total_scale = forward_scale/line_d
print(dX, dY)
if dX < -px_threshold or dX > px_threshold:
x_aligned = False
up_vel = round((-dX*VEL_SCALE_X), 3)
if up_vel > 0.1:
up_vel = 0.1
elif up_vel < -0.1:
up_vel = -0.1
stab_seconds_X = 0
else:
if stab_seconds_X == stab_threshold:
x_aligned = True
else:
x_aligned = False
stab_seconds_X += 1
up_vel = 0
if dY < -px_threshold or dY > px_threshold:
y_aligned = False
angle = math.degrees(np.arctan2(total_scale / line_d))
right_vel_X, right_vel_Y = calculate_velocity(ground_heading, angle)
stab_seconds_Y = 0
else:
if stab_seconds_Y == stab_threshold:
y_aligned = True
else:
y_aligned = False
stab_seconds_Y += 1
right_vel_X = 0
right_vel_Y = 0
print("alignment x: " + str(x_aligned))
print("alignment y: " + str(y_aligned))
print("velocity: " + str(right_vel_X) + " : " + str(right_vel_Y) + " : " + str(up_vel))
send_global_velocity(right_vel_X,right_vel_Y,up_vel,2)
send_global_velocity(0,0,0,1) # reset the global vels
print("Fully Aligned")
send_global_velocity(0,0,0,10) # reset the global vels
# condition_yaw(90, True)
# condition_yaw(-90, True)
print("Landing")
vehicle.mode = VehicleMode("LAND")
# Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl:
sitl.stop()
I2CMulti.i2c.write_byte(0x70,0) # how it closes?
tof.stop_ranging() # Stop ranging
out.release()
| true | true |
f7255a8df81f50ed5c1b02a6f21936c3d0283313 | 449 | py | Python | msg/urls_api.py | paul-wolf/django-stack | 7b45b3087659e15d936182b15ba6b07c14549584 | [
"MIT"
] | null | null | null | msg/urls_api.py | paul-wolf/django-stack | 7b45b3087659e15d936182b15ba6b07c14549584 | [
"MIT"
] | null | null | null | msg/urls_api.py | paul-wolf/django-stack | 7b45b3087659e15d936182b15ba6b07c14549584 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include, patterns
from rest_framework import routers
from . import views
# this gets our Foo model routed
router = routers.DefaultRouter()
router.register(r'foo', views.FooViewSet)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)), # Foo REST urls
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')), # browsable api login urls
)
| 24.944444 | 87 | 0.674833 | from django.conf.urls import url, include, patterns
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'foo', views.FooViewSet)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
)
| true | true |
f7255b2ec1f0592cd0f12cd48061fd80b46db0c4 | 1,544 | py | Python | src/socialhand/forms.py | GonzaloAvilez/site | 10866c826fa0df89f7f25e7392fd0fc8e395f54d | [
"MIT"
] | null | null | null | src/socialhand/forms.py | GonzaloAvilez/site | 10866c826fa0df89f7f25e7392fd0fc8e395f54d | [
"MIT"
] | 12 | 2019-10-02T17:18:09.000Z | 2022-03-11T23:54:53.000Z | src/socialhand/forms.py | GonzaloAvilez/site | 10866c826fa0df89f7f25e7392fd0fc8e395f54d | [
"MIT"
] | null | null | null | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset
from crispy_forms.bootstrap import InlineField
class ContactForm(forms.Form):
subject = forms.CharField (required = False,)
contact_name = forms.CharField(required=True,
label='Your Name')
contact_email = forms.EmailField(required=True,
label='Your Email')
content = forms.CharField (required=True,
widget=forms.Textarea,
label='Your Message')
def __init__(self, *args,**kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper ()
# self.helper.form_class = 'form-inline'
# self.helper.laber_class = 'col-lg-2'
# self.helper.field_class = 'col-lg-8'
self.helper.layout=Layout(
# Div(
# InlineField('contact_name'),
# InlineField('contact_email'),
# InlineField('subject'),
# InlineField('content'),
# ),
Div(
InlineField('subject',css_class='form-control input-contact input-lg'),
InlineField('contact_email',css_class='form-control input-contact input-lg'),
InlineField('contact_name',css_class='form-control input-contact input-lg'),
css_class="col-lg-6 form-group lead",
),
Div(
InlineField('content', css_class='input-contact'),
css_class="col-lg-6 form-group",
),
Submit('submit', 'Send Message',css_class="btn btn-contact btn-default"),
)
| 33.565217 | 90 | 0.654793 | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset
from crispy_forms.bootstrap import InlineField
class ContactForm(forms.Form):
subject = forms.CharField (required = False,)
contact_name = forms.CharField(required=True,
label='Your Name')
contact_email = forms.EmailField(required=True,
label='Your Email')
content = forms.CharField (required=True,
widget=forms.Textarea,
label='Your Message')
def __init__(self, *args,**kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper ()
self.helper.layout=Layout(
Div(
InlineField('subject',css_class='form-control input-contact input-lg'),
InlineField('contact_email',css_class='form-control input-contact input-lg'),
InlineField('contact_name',css_class='form-control input-contact input-lg'),
css_class="col-lg-6 form-group lead",
),
Div(
InlineField('content', css_class='input-contact'),
css_class="col-lg-6 form-group",
),
Submit('submit', 'Send Message',css_class="btn btn-contact btn-default"),
)
| true | true |
f7255b8e53122ef5294b9fbb88a6f9fa7e69aa5f | 12,799 | py | Python | python_packages_static/flopy/mf6/modflow/mfgwtsrc.py | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | python_packages_static/flopy/mf6/modflow/mfgwtsrc.py | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | python_packages_static/flopy/mf6/modflow/mfgwtsrc.py | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY
# mf6/utils/createpackages.py
# FILE created on February 18, 2021 16:23:05 UTC
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwtsrc(mfpackage.MFPackage):
"""
ModflowGwtsrc defines a src package within a gwt6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
auxiliary : [string]
* auxiliary (string) defines an array of one or more auxiliary variable
names. There is no limit on the number of auxiliary variables that
can be provided on this line; however, lists of information provided
in subsequent blocks must have a column of data for each auxiliary
variable name defined here. The number of auxiliary variables
detected on this line determines the value for naux. Comments cannot
be provided anywhere on this line as they will be interpreted as
auxiliary variable names. Auxiliary variables may not be used by the
package, but they will be available for use by other parts of the
program. The program will terminate with an error if auxiliary
variables are specified on more than one line in the options block.
auxmultname : string
* auxmultname (string) name of auxiliary variable to be used as
multiplier of mass loading rate.
boundnames : boolean
* boundnames (boolean) keyword to indicate that boundary names may be
provided with the list of mass source cells.
print_input : boolean
* print_input (boolean) keyword to indicate that the list of mass
source information will be written to the listing file immediately
after it is read.
print_flows : boolean
* print_flows (boolean) keyword to indicate that the list of mass
source flow rates will be printed to the listing file for every
stress period time step in which "BUDGET PRINT" is specified in
Output Control. If there is no Output Control option and
"PRINT_FLOWS" is specified, then flow rates are printed for the last
time step of each stress period.
save_flows : boolean
* save_flows (boolean) keyword to indicate that mass source flow terms
will be written to the file specified with "BUDGET FILEOUT" in Output
Control.
timeseries : {varname:data} or timeseries data
* Contains data for the ts package. Data can be stored in a dictionary
containing data for the ts package with variable names as keys and
package data as values. Data just for the timeseries variable is also
acceptable. See ts package documentation for more information.
observations : {varname:data} or continuous data
* Contains data for the obs package. Data can be stored in a dictionary
containing data for the obs package with variable names as keys and
package data as values. Data just for the observations variable is
also acceptable. See obs package documentation for more information.
maxbound : integer
* maxbound (integer) integer value specifying the maximum number of
sources cells that will be specified for use during any stress
period.
stress_period_data : [cellid, smassrate, aux, boundname]
* cellid ((integer, ...)) is the cell identifier, and depends on the
type of grid that is used for the simulation. For a structured grid
that uses the DIS input file, CELLID is the layer, row, and column.
For a grid that uses the DISV input file, CELLID is the layer and
CELL2D number. If the model uses the unstructured discretization
(DISU) input file, CELLID is the node number for the cell. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* smassrate (double) is the mass source loading rate. A positive value
indicates addition of solute mass and a negative value indicates
removal of solute mass. If the Options block includes a
TIMESERIESFILE entry (see the "Time-Variable Input" section), values
can be obtained from a time series by entering the time-series name
in place of a numeric value.
* aux (double) represents the values of the auxiliary variables for
each mass source. The values of auxiliary variables must be present
for each mass source. The values must be specified in the order of
the auxiliary variables specified in the OPTIONS block. If the
package supports time series and the Options block includes a
TIMESERIESFILE entry (see the "Time-Variable Input" section), values
can be obtained from a time series by entering the time-series name
in place of a numeric value.
* boundname (string) name of the mass source cell. BOUNDNAME is an
ASCII character variable that can contain as many as 40 characters.
If BOUNDNAME contains spaces in it, then the entire name must be
enclosed within single quotes.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
auxiliary = ListTemplateGenerator(("gwt6", "src", "options", "auxiliary"))
ts_filerecord = ListTemplateGenerator(
("gwt6", "src", "options", "ts_filerecord")
)
obs_filerecord = ListTemplateGenerator(
("gwt6", "src", "options", "obs_filerecord")
)
stress_period_data = ListTemplateGenerator(
("gwt6", "src", "period", "stress_period_data")
)
package_abbr = "gwtsrc"
_package_type = "src"
dfn_file_name = "gwt-src.dfn"
dfn = [
[
"block options",
"name auxiliary",
"type string",
"shape (naux)",
"reader urword",
"optional true",
],
[
"block options",
"name auxmultname",
"type string",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name boundnames",
"type keyword",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name save_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name ts_filerecord",
"type record ts6 filein ts6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package ts",
"construct_data timeseries",
"parameter_name timeseries",
],
[
"block options",
"name ts6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name filein",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name ts6_filename",
"type string",
"preserve_case true",
"in_record true",
"reader urword",
"optional false",
"tagged false",
],
[
"block options",
"name obs_filerecord",
"type record obs6 filein obs6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package obs",
"construct_data continuous",
"parameter_name observations",
],
[
"block options",
"name obs6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name obs6_filename",
"type string",
"preserve_case true",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block dimensions",
"name maxbound",
"type integer",
"reader urword",
"optional false",
],
[
"block period",
"name iper",
"type integer",
"block_variable True",
"in_record true",
"tagged false",
"shape",
"valid",
"reader urword",
"optional false",
],
[
"block period",
"name stress_period_data",
"type recarray cellid smassrate aux boundname",
"shape (maxbound)",
"reader urword",
],
[
"block period",
"name cellid",
"type integer",
"shape (ncelldim)",
"tagged false",
"in_record true",
"reader urword",
],
[
"block period",
"name smassrate",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
"time_series true",
],
[
"block period",
"name aux",
"type double precision",
"in_record true",
"tagged false",
"shape (naux)",
"reader urword",
"optional true",
"time_series true",
],
[
"block period",
"name boundname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
"optional true",
],
]
def __init__(
self,
model,
loading_package=False,
auxiliary=None,
auxmultname=None,
boundnames=None,
print_input=None,
print_flows=None,
save_flows=None,
timeseries=None,
observations=None,
maxbound=None,
stress_period_data=None,
filename=None,
pname=None,
parent_file=None,
):
super(ModflowGwtsrc, self).__init__(
model, "src", filename, pname, loading_package, parent_file
)
# set up variables
self.auxiliary = self.build_mfdata("auxiliary", auxiliary)
self.auxmultname = self.build_mfdata("auxmultname", auxmultname)
self.boundnames = self.build_mfdata("boundnames", boundnames)
self.print_input = self.build_mfdata("print_input", print_input)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.save_flows = self.build_mfdata("save_flows", save_flows)
self._ts_filerecord = self.build_mfdata("ts_filerecord", None)
self._ts_package = self.build_child_package(
"ts", timeseries, "timeseries", self._ts_filerecord
)
self._obs_filerecord = self.build_mfdata("obs_filerecord", None)
self._obs_package = self.build_child_package(
"obs", observations, "continuous", self._obs_filerecord
)
self.maxbound = self.build_mfdata("maxbound", maxbound)
self.stress_period_data = self.build_mfdata(
"stress_period_data", stress_period_data
)
self._init_complete = True
| 36.464387 | 79 | 0.57067 |
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwtsrc(mfpackage.MFPackage):
auxiliary = ListTemplateGenerator(("gwt6", "src", "options", "auxiliary"))
ts_filerecord = ListTemplateGenerator(
("gwt6", "src", "options", "ts_filerecord")
)
obs_filerecord = ListTemplateGenerator(
("gwt6", "src", "options", "obs_filerecord")
)
stress_period_data = ListTemplateGenerator(
("gwt6", "src", "period", "stress_period_data")
)
package_abbr = "gwtsrc"
_package_type = "src"
dfn_file_name = "gwt-src.dfn"
dfn = [
[
"block options",
"name auxiliary",
"type string",
"shape (naux)",
"reader urword",
"optional true",
],
[
"block options",
"name auxmultname",
"type string",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name boundnames",
"type keyword",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name save_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name ts_filerecord",
"type record ts6 filein ts6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package ts",
"construct_data timeseries",
"parameter_name timeseries",
],
[
"block options",
"name ts6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name filein",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name ts6_filename",
"type string",
"preserve_case true",
"in_record true",
"reader urword",
"optional false",
"tagged false",
],
[
"block options",
"name obs_filerecord",
"type record obs6 filein obs6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package obs",
"construct_data continuous",
"parameter_name observations",
],
[
"block options",
"name obs6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name obs6_filename",
"type string",
"preserve_case true",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block dimensions",
"name maxbound",
"type integer",
"reader urword",
"optional false",
],
[
"block period",
"name iper",
"type integer",
"block_variable True",
"in_record true",
"tagged false",
"shape",
"valid",
"reader urword",
"optional false",
],
[
"block period",
"name stress_period_data",
"type recarray cellid smassrate aux boundname",
"shape (maxbound)",
"reader urword",
],
[
"block period",
"name cellid",
"type integer",
"shape (ncelldim)",
"tagged false",
"in_record true",
"reader urword",
],
[
"block period",
"name smassrate",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
"time_series true",
],
[
"block period",
"name aux",
"type double precision",
"in_record true",
"tagged false",
"shape (naux)",
"reader urword",
"optional true",
"time_series true",
],
[
"block period",
"name boundname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
"optional true",
],
]
def __init__(
self,
model,
loading_package=False,
auxiliary=None,
auxmultname=None,
boundnames=None,
print_input=None,
print_flows=None,
save_flows=None,
timeseries=None,
observations=None,
maxbound=None,
stress_period_data=None,
filename=None,
pname=None,
parent_file=None,
):
super(ModflowGwtsrc, self).__init__(
model, "src", filename, pname, loading_package, parent_file
)
self.auxiliary = self.build_mfdata("auxiliary", auxiliary)
self.auxmultname = self.build_mfdata("auxmultname", auxmultname)
self.boundnames = self.build_mfdata("boundnames", boundnames)
self.print_input = self.build_mfdata("print_input", print_input)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.save_flows = self.build_mfdata("save_flows", save_flows)
self._ts_filerecord = self.build_mfdata("ts_filerecord", None)
self._ts_package = self.build_child_package(
"ts", timeseries, "timeseries", self._ts_filerecord
)
self._obs_filerecord = self.build_mfdata("obs_filerecord", None)
self._obs_package = self.build_child_package(
"obs", observations, "continuous", self._obs_filerecord
)
self.maxbound = self.build_mfdata("maxbound", maxbound)
self.stress_period_data = self.build_mfdata(
"stress_period_data", stress_period_data
)
self._init_complete = True
| true | true |
f7255db587da3c1bac8640b99f5f51ab4e48fd2b | 1,492 | py | Python | hmm/scripts/easy_casino_learn.py | ondrejba/hmm | 1e9fe47a6057d93e7c77614016a89d5d46959e97 | [
"MIT"
] | null | null | null | hmm/scripts/easy_casino_learn.py | ondrejba/hmm | 1e9fe47a6057d93e7c77614016a89d5d46959e97 | [
"MIT"
] | null | null | null | hmm/scripts/easy_casino_learn.py | ondrejba/hmm | 1e9fe47a6057d93e7c77614016a89d5d46959e97 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from ..easy_casino import Casino
from ..hmm_multinoulli import HMMMultinoulli
hmm = HMMMultinoulli(Casino.A, Casino.PX, Casino.INIT)
# generate sequence
seq_length = 300
batch_size = 500
xs_batch = []
zs_batch = []
for j in range(batch_size):
casino = Casino()
xs = [casino.observe()]
zs = [casino.z]
for i in range(seq_length - 1):
casino.transition()
xs.append(casino.observe())
zs.append(casino.z)
xs_batch.append(xs)
zs_batch.append(zs)
xs_batch = np.array(xs_batch)
zs_batch = np.array(zs_batch)
num_hidden_states = len(np.unique(zs_batch))
# learn
hmm.initialize_em(2, 6)
for i in range(200):
# learn
print("step", i)
print(hmm.A)
print(hmm.init)
print(hmm.PX)
print()
ll = hmm.learn_em(xs_batch)
print("log likelihood:", ll)
print()
# calculate probabilities
alphas, log_evidence, betas, gammas, etas = hmm.forward_backward(xs_batch[0])
# plot alphas and gammas
plot_zs = np.array(zs_batch[0])
plot_alphas = alphas[:, 1]
plot_gammas = gammas[:, 1]
plot_xs = np.linspace(1, len(plot_zs), num=len(plot_zs))
plt.figure(figsize=(12, 9))
plt.subplot(2, 1, 1)
plt.title("filtering")
plt.plot(plot_xs, plot_zs, label="z")
plt.plot(plot_xs, plot_alphas, label="P(z) = 1")
plt.legend()
plt.subplot(2, 1, 2)
plt.title("smoothing")
plt.plot(plot_xs, plot_zs, label="z")
plt.plot(plot_xs, plot_gammas, label="P(z) = 1")
plt.legend()
plt.show()
| 20.438356 | 77 | 0.678954 | import numpy as np
import matplotlib.pyplot as plt
from ..easy_casino import Casino
from ..hmm_multinoulli import HMMMultinoulli
hmm = HMMMultinoulli(Casino.A, Casino.PX, Casino.INIT)
seq_length = 300
batch_size = 500
xs_batch = []
zs_batch = []
for j in range(batch_size):
casino = Casino()
xs = [casino.observe()]
zs = [casino.z]
for i in range(seq_length - 1):
casino.transition()
xs.append(casino.observe())
zs.append(casino.z)
xs_batch.append(xs)
zs_batch.append(zs)
xs_batch = np.array(xs_batch)
zs_batch = np.array(zs_batch)
num_hidden_states = len(np.unique(zs_batch))
hmm.initialize_em(2, 6)
for i in range(200):
print("step", i)
print(hmm.A)
print(hmm.init)
print(hmm.PX)
print()
ll = hmm.learn_em(xs_batch)
print("log likelihood:", ll)
print()
alphas, log_evidence, betas, gammas, etas = hmm.forward_backward(xs_batch[0])
plot_zs = np.array(zs_batch[0])
plot_alphas = alphas[:, 1]
plot_gammas = gammas[:, 1]
plot_xs = np.linspace(1, len(plot_zs), num=len(plot_zs))
plt.figure(figsize=(12, 9))
plt.subplot(2, 1, 1)
plt.title("filtering")
plt.plot(plot_xs, plot_zs, label="z")
plt.plot(plot_xs, plot_alphas, label="P(z) = 1")
plt.legend()
plt.subplot(2, 1, 2)
plt.title("smoothing")
plt.plot(plot_xs, plot_zs, label="z")
plt.plot(plot_xs, plot_gammas, label="P(z) = 1")
plt.legend()
plt.show()
| true | true |
f7255dc9372185d8116fca049ef881d946cb5401 | 5,629 | py | Python | test/functional/mempool_persist.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | HunterCanimun/surgeofficial-surge-coin | 663dc25517e9045a65a9b1e0993bbaa06d564284 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=false command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=false
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=false, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=false. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=false,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool=true. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=false
does not overwrite a previously valid mempool stored on disk.
"""
from decimal import Decimal
import os
from test_framework.test_framework import SurgeTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class MempoolPersistTest(SurgeTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
assert self.nodes[0].getmempoolinfo()["loaded"] # start_node is blocking on the mempool being loaded
assert self.nodes[2].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
assert self.nodes[0].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
assert self.nodes[0].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 5)
# Following code is ahead of our current repository state. Future back port.
'''
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
assert self.nodes[0].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
'''
if __name__ == '__main__':
MempoolPersistTest().main()
| 45.764228 | 127 | 0.693907 |
from decimal import Decimal
import os
from test_framework.test_framework import SurgeTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class MempoolPersistTest(SurgeTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
assert self.nodes[0].getmempoolinfo()["loaded"]
assert self.nodes[2].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
assert self.nodes[0].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
assert self.nodes[0].getmempoolinfo()["loaded"]
assert_equal(len(self.nodes[0].getrawmempool()), 5)
if __name__ == '__main__':
MempoolPersistTest().main()
| true | true |
f7256002b00ab39290e07cbb4bf618fbad591a53 | 7,106 | py | Python | run_tests.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | run_tests.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | run_tests.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import contextlib
import docker
import subprocess
import os.path
import sys
import time
import urllib2
SERVERS = {
'bjoern': ['python', '-m', 'wsgi_benchmark.bjoern_server'],
'cheroot': ['python', '-m', 'wsgi_benchmark.cheroot_server'],
'cheroot_high_concurrency': ['python', '-m', 'wsgi_benchmark.high_concurrency_cheroot_server'],
'eventlet': ['python', '-m', 'wsgi_benchmark.eventlet_server'],
'gunicorn': ['./gunicorn_server.sh'],
'gunicorn_gevent': ['./gevent_gunicorn_server.sh'],
'gunicorn_gthread': ['./gthread_gunicorn_server.sh'],
'gunicorn_meinheld': ['./meinheld_gunicorn_server.sh'],
'gunicorn_high_concurrency': ['./high_concurrency_gunicorn_server.sh'],
'gevent': ['python', '-m', 'wsgi_benchmark.gevent_server'],
'meinheld': ['python', '-m', 'wsgi_benchmark.meinheld_server'],
'uwsgi': ['./uwsgi_server.sh'],
'uwsgi_gevent': ['./gevent_uwsgi_server.sh'],
'uwsgi_high_concurrency': ['./high_concurrency_uwsgi_server.sh'],
'uwsgi_threaded': ['./threaded_uwsgi_server.sh'],
'waitress': ['python', '-m', 'wsgi_benchmark.waitress_server'],
'waitress_high_concurrency': ['python', '-m', 'wsgi_benchmark.high_concurrency_waitress_server'],
'werkzeug': ['python', '-m', 'wsgi_benchmark.werkzeug_server'],
'werkzeug_threading': ['python', '-m', 'wsgi_benchmark.threading_werkzeug_server'],
'werkzeug_forking': ['python', '-m', 'wsgi_benchmark.forking_werkzeug_server'],
'wsgiref': ['python', '-m', 'wsgi_benchmark.wsgiref_server'],
'wsgiref_threading': ['python', '-m', 'wsgi_benchmark.threading_wsgiref_server'],
'wsgiref_forking': ['python', '-m', 'wsgi_benchmark.forking_wsgiref_server']
}
GATLING_SCENARIOS = {
'hello_world': 'HelloWorldSimulation',
'numeric_nogil': 'NumericNoGilSimulation',
'native_io': 'NativeIOSimulation',
'socket_io': 'SocketIOSimulation',
'sendfile': 'SendfileSimulation',
'dynamic_file': 'DynamicFileSimulation',
'sha512': 'SHA512Simulation',
'forward_request': 'ForwardSimulation',
'gzip': 'GzipSimulation',
'numeric_gil': 'NumericGilSimulation'
}
def build_image():
docker.from_env().images.build(path='.', tag='wsgi_benchmark')
@contextlib.contextmanager
def server(command, server_name):
container = docker.from_env().containers.run(
'wsgi_benchmark', command,
name='wsgi_benchmark-{}'.format(server_name),
detach=True, ports={'8765/tcp': 8765})
print "{server_name} running as container {container.id}".format(**locals())
try:
for i in xrange(30):
try:
assert urllib2.urlopen('http://localhost:8765/hello_world').read() == 'Hello World'
except:
time.sleep(1)
else:
break
else:
raise RuntimeError("Could not start server process: {}" .format(server_name))
yield container
finally:
container.remove(force=True)
if __name__ == '__main__':
build_image()
for server_name, server_command in sorted(SERVERS.items()):
print "Testing {server_name} starts".format(**locals())
with server(server_command, server_name):
print "Success"
with open('results/misc_results.txt', 'w') as misc_results:
for server_name, command in sorted(SERVERS.items()):
with server(command, server_name):
try:
hash_result = subprocess.check_output(
'head -c 65536 /dev/zero | curl -T - -y 5 http://localhost:8765/sha512', shell=True)
success = hash_result == '73e4153936dab198397b74ee9efc26093dda721eaab2f8d92786891153b45b04265a161b169c988edb0db2c53124607b6eaaa816559c5ce54f3dbc9fa6a7a4b2'
except:
success = False
misc_results.write(
'{server_name}-chunked: {success}\n'.format(**locals()))
with server(command, server_name):
interrupted_result = subprocess.call(
['curl', 'http://localhost:8765/interrupted'])
success = interrupted_result != 0
misc_results.write(
'{server_name}-interrupted: {success}\n'.format(**locals()))
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-X',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_read'.format(**locals()),
'-u', 'http://localhost:8765/dynamic_file'], cwd='results')
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-H',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_headers'.format(**locals()),
'-u', 'http://localhost:8765/hello_world'], cwd='results')
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-B',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_body'.format(**locals()),
'-u', 'http://localhost:8765/sha512'], cwd='results')
for scenario_name, scenario_class in sorted(GATLING_SCENARIOS.items()):
with server(command, server_name) as server_container:
container = docker.from_env().containers.run(
'wsgi_benchmark',
[
'mvn',
'-Dgatling.simulationClass=io.github.jamespic.wsgi_benchmark.%s' % scenario_class,
'-Dgatling.outputName=%s-%s' % (
scenario_name, server_name),
'-Dgatling.resultsFolder=/results',
'integration-test'
],
name='wsgi_benchmark_gatling_{scenario_name}_{server_name}'.format(**locals()),
links={server_container.name: 'wsgi_benchmark_server'},
volumes={os.path.abspath('results'): {'bind': '/results', 'mode': 'rw'}},
environment={'TARGET_HOSTNAME': 'wsgi_benchmark_server'},
working_dir='/wsgi_benchmark/gatling',
detach=True
)
try:
for line in container.logs(stdout=True, stderr=True, stream=True):
sys.stdout.write(line)
finally:
container.remove(force=True)
| 46.444444 | 175 | 0.54067 |
import contextlib
import docker
import subprocess
import os.path
import sys
import time
import urllib2
SERVERS = {
'bjoern': ['python', '-m', 'wsgi_benchmark.bjoern_server'],
'cheroot': ['python', '-m', 'wsgi_benchmark.cheroot_server'],
'cheroot_high_concurrency': ['python', '-m', 'wsgi_benchmark.high_concurrency_cheroot_server'],
'eventlet': ['python', '-m', 'wsgi_benchmark.eventlet_server'],
'gunicorn': ['./gunicorn_server.sh'],
'gunicorn_gevent': ['./gevent_gunicorn_server.sh'],
'gunicorn_gthread': ['./gthread_gunicorn_server.sh'],
'gunicorn_meinheld': ['./meinheld_gunicorn_server.sh'],
'gunicorn_high_concurrency': ['./high_concurrency_gunicorn_server.sh'],
'gevent': ['python', '-m', 'wsgi_benchmark.gevent_server'],
'meinheld': ['python', '-m', 'wsgi_benchmark.meinheld_server'],
'uwsgi': ['./uwsgi_server.sh'],
'uwsgi_gevent': ['./gevent_uwsgi_server.sh'],
'uwsgi_high_concurrency': ['./high_concurrency_uwsgi_server.sh'],
'uwsgi_threaded': ['./threaded_uwsgi_server.sh'],
'waitress': ['python', '-m', 'wsgi_benchmark.waitress_server'],
'waitress_high_concurrency': ['python', '-m', 'wsgi_benchmark.high_concurrency_waitress_server'],
'werkzeug': ['python', '-m', 'wsgi_benchmark.werkzeug_server'],
'werkzeug_threading': ['python', '-m', 'wsgi_benchmark.threading_werkzeug_server'],
'werkzeug_forking': ['python', '-m', 'wsgi_benchmark.forking_werkzeug_server'],
'wsgiref': ['python', '-m', 'wsgi_benchmark.wsgiref_server'],
'wsgiref_threading': ['python', '-m', 'wsgi_benchmark.threading_wsgiref_server'],
'wsgiref_forking': ['python', '-m', 'wsgi_benchmark.forking_wsgiref_server']
}
GATLING_SCENARIOS = {
'hello_world': 'HelloWorldSimulation',
'numeric_nogil': 'NumericNoGilSimulation',
'native_io': 'NativeIOSimulation',
'socket_io': 'SocketIOSimulation',
'sendfile': 'SendfileSimulation',
'dynamic_file': 'DynamicFileSimulation',
'sha512': 'SHA512Simulation',
'forward_request': 'ForwardSimulation',
'gzip': 'GzipSimulation',
'numeric_gil': 'NumericGilSimulation'
}
def build_image():
docker.from_env().images.build(path='.', tag='wsgi_benchmark')
@contextlib.contextmanager
def server(command, server_name):
container = docker.from_env().containers.run(
'wsgi_benchmark', command,
name='wsgi_benchmark-{}'.format(server_name),
detach=True, ports={'8765/tcp': 8765})
print "{server_name} running as container {container.id}".format(**locals())
try:
for i in xrange(30):
try:
assert urllib2.urlopen('http://localhost:8765/hello_world').read() == 'Hello World'
except:
time.sleep(1)
else:
break
else:
raise RuntimeError("Could not start server process: {}" .format(server_name))
yield container
finally:
container.remove(force=True)
if __name__ == '__main__':
build_image()
for server_name, server_command in sorted(SERVERS.items()):
print "Testing {server_name} starts".format(**locals())
with server(server_command, server_name):
print "Success"
with open('results/misc_results.txt', 'w') as misc_results:
for server_name, command in sorted(SERVERS.items()):
with server(command, server_name):
try:
hash_result = subprocess.check_output(
'head -c 65536 /dev/zero | curl -T - -y 5 http://localhost:8765/sha512', shell=True)
success = hash_result == '73e4153936dab198397b74ee9efc26093dda721eaab2f8d92786891153b45b04265a161b169c988edb0db2c53124607b6eaaa816559c5ce54f3dbc9fa6a7a4b2'
except:
success = False
misc_results.write(
'{server_name}-chunked: {success}\n'.format(**locals()))
with server(command, server_name):
interrupted_result = subprocess.call(
['curl', 'http://localhost:8765/interrupted'])
success = interrupted_result != 0
misc_results.write(
'{server_name}-interrupted: {success}\n'.format(**locals()))
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-X',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_read'.format(**locals()),
'-u', 'http://localhost:8765/dynamic_file'], cwd='results')
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-H',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_headers'.format(**locals()),
'-u', 'http://localhost:8765/hello_world'], cwd='results')
with server(command, server_name):
subprocess.call(['slowhttptest', '-g', '-B',
'-v', '1',
'-i', '3',
'-l', '30',
'-c', '300',
'-o', '{server_name}-slow_body'.format(**locals()),
'-u', 'http://localhost:8765/sha512'], cwd='results')
for scenario_name, scenario_class in sorted(GATLING_SCENARIOS.items()):
with server(command, server_name) as server_container:
container = docker.from_env().containers.run(
'wsgi_benchmark',
[
'mvn',
'-Dgatling.simulationClass=io.github.jamespic.wsgi_benchmark.%s' % scenario_class,
'-Dgatling.outputName=%s-%s' % (
scenario_name, server_name),
'-Dgatling.resultsFolder=/results',
'integration-test'
],
name='wsgi_benchmark_gatling_{scenario_name}_{server_name}'.format(**locals()),
links={server_container.name: 'wsgi_benchmark_server'},
volumes={os.path.abspath('results'): {'bind': '/results', 'mode': 'rw'}},
environment={'TARGET_HOSTNAME': 'wsgi_benchmark_server'},
working_dir='/wsgi_benchmark/gatling',
detach=True
)
try:
for line in container.logs(stdout=True, stderr=True, stream=True):
sys.stdout.write(line)
finally:
container.remove(force=True)
| false | true |
f7256127fb8bcd69a2e2866490517a48fb8fa051 | 2,814 | py | Python | examples/00-basic-examples/Create_Netlist.py | sparfenyuk/PyAEDT | efe8d219be974fa8a164d84ca9bc5c0e1b32256c | [
"MIT"
] | null | null | null | examples/00-basic-examples/Create_Netlist.py | sparfenyuk/PyAEDT | efe8d219be974fa8a164d84ca9bc5c0e1b32256c | [
"MIT"
] | null | null | null | examples/00-basic-examples/Create_Netlist.py | sparfenyuk/PyAEDT | efe8d219be974fa8a164d84ca9bc5c0e1b32256c | [
"MIT"
] | null | null | null | """
Netlist Example Analysis
--------------------------------------------
# This Example shows how to Import Netlist in AEDT Nexxim
Netlists supported are HSPICE and, partially, Mentor
"""
import sys
import os
#########################################################
# Import Packages
# Setup The local path to the Path Containing AEDTLIb
from pyaedt import examples
netlist = examples.download_netlist()
from pyaedt import generate_unique_name
if os.name == "posix":
tmpfold = os.environ["TMPDIR"]
else:
tmpfold = os.environ["TEMP"]
temp_folder = os.path.join(tmpfold, generate_unique_name("Example"))
if not os.path.exists(temp_folder): os.makedirs(temp_folder)
myfile = os.path.join(netlist)
print(temp_folder)
#########################################################
# Import of Main Classes needed: Desktop and Circuit
from pyaedt import Circuit
from pyaedt import Desktop
###############################################################################
# Launch Desktop and Circuit
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This examples will use AEDT 2021.1 in Graphical mode
# This examples will use SI units.
desktopVersion = "2021.1"
###############################################################################
# NonGraphical
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Change Boolean to False to open AEDT in graphical mode
NonGraphical = False
NewThread = True
###############################################################################
# Launch AEDT and Circuit Design
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Desktop Class initialize Aedt and start it on specified version and specified graphical mode. NewThread Boolean variables defines if
# a user wants to create a new instance of AEDT or try to connect to existing instance of it
desktop = Desktop(desktopVersion, NonGraphical, NewThread)
aedtapp = Circuit()
#########################################################
# Save Project to temp folder. Can be changed
aedtapp.save_project(os.path.join(temp_folder, "my_firt_netlist.aedt"))
#########################################################
# Define a design variable
# using $ prefix user will create a project variable
aedtapp["Voltage"]="5"
#########################################################
# Launch command to create Schematic
# This method will read the netlist and parse it. All components will be parsed but only speficied
# categories will be mapped. In particular : R, L, C, Q, U, J, V, I components will be mapped
aedtapp.create_schematic_from_netlist(myfile)
#########################################################
# Close Project....or continue adding functionalities
if os.name != "posix":
aedtapp.close_project()
desktop.force_close_desktop()
#########################################################
| 25.125 | 134 | 0.542644 |
import sys
import os
| true | true |
f725615748650d8ad2045fa8fddd4a9cd2da6c74 | 2,799 | py | Python | torch/fx/experimental/unification/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | torch/fx/experimental/unification/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | torch/fx/experimental/unification/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
""" Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {} # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed") # pragma:nocover
except Exception:
pass
def freeze(d):
""" Freeze container to hashable form
>>> freeze(1)
1
>>> freeze([1, 2])
(1, 2)
>>> freeze({1: 2}) # doctest: +SKIP
frozenset([(1, 2)])
"""
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
| 26.913462 | 77 | 0.568775 | def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def _toposort(edges):
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed")
except Exception:
pass
def freeze(d):
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
| true | true |
f725615e2782bb4b02f5432ef2200adf2c96f1c1 | 1,971 | py | Python | dpaycli/instance.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | dpaycli/instance.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | dpaycli/instance.py | dpays/dpay-cli | dfa80898e1faea2cee92ebec6fe04873381bd40f | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import dpaycli as stm
class SharedInstance(object):
"""Singelton for the DPay Instance"""
instance = None
config = {}
def shared_dpay_instance():
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
dpay instance that can be reused by multiple classes.
.. code-block:: python
from dpaycli.account import Account
from dpaycli.instance import shared_dpay_instance
account = Account("test")
# is equivalent with
account = Account("test", dpay_instance=shared_dpay_instance())
"""
if not SharedInstance.instance:
clear_cache()
SharedInstance.instance = stm.DPay(**SharedInstance.config)
return SharedInstance.instance
def set_shared_dpay_instance(dpay_instance):
""" This method allows us to override default dpay instance for all users of
``SharedInstance.instance``.
:param dpaycli.dpay.DPay dpay_instance: DPay instance
"""
clear_cache()
SharedInstance.instance = dpay_instance
def clear_cache():
""" Clear Caches
"""
from .blockchainobject import BlockchainObject
BlockchainObject.clear_cache()
def set_shared_config(config):
""" This allows to set a config that will be used when calling
``shared_dpay_instance`` and allows to define the configuration
without requiring to actually create an instance
"""
if not isinstance(config, dict):
raise AssertionError()
SharedInstance.config.update(config)
# if one is already set, delete
if SharedInstance.instance:
clear_cache()
SharedInstance.instance = None
| 29.863636 | 80 | 0.702689 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import dpaycli as stm
class SharedInstance(object):
instance = None
config = {}
def shared_dpay_instance():
if not SharedInstance.instance:
clear_cache()
SharedInstance.instance = stm.DPay(**SharedInstance.config)
return SharedInstance.instance
def set_shared_dpay_instance(dpay_instance):
clear_cache()
SharedInstance.instance = dpay_instance
def clear_cache():
from .blockchainobject import BlockchainObject
BlockchainObject.clear_cache()
def set_shared_config(config):
if not isinstance(config, dict):
raise AssertionError()
SharedInstance.config.update(config)
if SharedInstance.instance:
clear_cache()
SharedInstance.instance = None
| true | true |
f725625a267ae68f9b9998f841a9d4c496b39647 | 2,394 | py | Python | examples/dfp/v201511/proposal_line_item_service/get_proposal_line_items_for_proposal.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/dfp/v201511/proposal_line_item_service/get_proposal_line_items_for_proposal.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201511/proposal_line_item_service/get_proposal_line_items_for_proposal.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all proposal line items that belong to a proposal.
To create proposal line items, run create_proposal_line_items.py."""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the id of the proposal to get proposal line items from.
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService',
version='v201511')
# Create statement object to only select proposal line items belonging to a
# given proposal.
values = [{
'key': 'proposalId',
'value': {
'xsi_type': 'NumberValue',
'value': proposal_id
}
}]
query = 'WHERE proposalId = :proposalId ORDER BY id ASC'
statement = dfp.FilterStatement(query, values)
while True:
# Get proposal line items by statement.
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for idx, proposal_line_item in enumerate(response['results'],
start=statement.offset):
print(
'%s) Proposal line item with id \'%s\', belonging to proposal id '
'\'%s\', and named \'%s\' was found.' % (
idx, proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PROPOSAL_ID)
| 33.71831 | 80 | 0.687552 |
"""This code example gets all proposal line items that belong to a proposal.
To create proposal line items, run create_proposal_line_items.py."""
from googleads import dfp
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
proposal_line_item_service = client.GetService(
'ProposalLineItemService',
version='v201511')
values = [{
'key': 'proposalId',
'value': {
'xsi_type': 'NumberValue',
'value': proposal_id
}
}]
query = 'WHERE proposalId = :proposalId ORDER BY id ASC'
statement = dfp.FilterStatement(query, values)
while True:
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
for idx, proposal_line_item in enumerate(response['results'],
start=statement.offset):
print(
'%s) Proposal line item with id \'%s\', belonging to proposal id '
'\'%s\', and named \'%s\' was found.' % (
idx, proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PROPOSAL_ID)
| false | true |
f72562f7f2fd98b8968e23cb2d33be250c5fd8dd | 8,348 | py | Python | script/hassfest/requirements.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 1 | 2020-10-01T13:36:50.000Z | 2020-10-01T13:36:50.000Z | script/hassfest/requirements.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 69 | 2020-08-04T09:03:43.000Z | 2022-03-31T06:13:01.000Z | script/hassfest/requirements.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 1 | 2020-10-26T10:44:32.000Z | 2020-10-26T10:44:32.000Z | """Validate requirements."""
from __future__ import annotations
from collections import deque
import json
import operator
import os
import re
import subprocess
import sys
from awesomeversion import AwesomeVersion, AwesomeVersionStrategy
from stdlib_list import stdlib_list
from tqdm import tqdm
from homeassistant.const import REQUIRED_PYTHON_VER
import homeassistant.util.package as pkg_util
from script.gen_requirements_all import COMMENT_REQUIREMENTS
from .model import Config, Integration
IGNORE_PACKAGES = {
commented.lower().replace("_", "-") for commented in COMMENT_REQUIREMENTS
}
PACKAGE_REGEX = re.compile(r"^(?:--.+\s)?([-_\.\w\d]+).*==.+$")
PIP_REGEX = re.compile(r"^(--.+\s)?([-_\.\w\d]+.*(?:==|>=|<=|~=|!=|<|>|===)?.*$)")
SUPPORTED_PYTHON_TUPLES = [
REQUIRED_PYTHON_VER[:2],
tuple(map(operator.add, REQUIRED_PYTHON_VER, (0, 1, 0)))[:2],
]
SUPPORTED_PYTHON_VERSIONS = [
".".join(map(str, version_tuple)) for version_tuple in SUPPORTED_PYTHON_TUPLES
]
STD_LIBS = {version: set(stdlib_list(version)) for version in SUPPORTED_PYTHON_VERSIONS}
PIPDEPTREE_CACHE = None
IGNORE_VIOLATIONS = {
# Still has standard library requirements.
"acmeda",
"blink",
"ezviz",
"hdmi_cec",
"juicenet",
"lupusec",
"rainbird",
"slide",
"suez_water",
}
def normalize_package_name(requirement: str) -> str:
"""Return a normalized package name from a requirement string."""
match = PACKAGE_REGEX.search(requirement)
if not match:
return ""
# pipdeptree needs lowercase and dash instead of underscore as separator
package = match.group(1).lower().replace("_", "-")
return package
def validate(integrations: dict[str, Integration], config: Config):
"""Handle requirements for integrations."""
# Check if we are doing format-only validation.
if not config.requirements:
for integration in integrations.values():
validate_requirements_format(integration)
return
ensure_cache()
# check for incompatible requirements
disable_tqdm = config.specific_integrations or os.environ.get("CI", False)
for integration in tqdm(integrations.values(), disable=disable_tqdm):
if not integration.manifest:
continue
validate_requirements(integration)
def validate_requirements_format(integration: Integration) -> bool:
"""Validate requirements format.
Returns if valid.
"""
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep and integration.core:
integration.add_error(
"requirements",
f'Requirement {req} need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error(
"requirements",
f"Unable to parse package version ({version}) for {pkg}.",
)
continue
return len(integration.errors) == start_errors
def validate_requirements(integration: Integration):
"""Validate requirements."""
if not validate_requirements_format(integration):
return
# Some integrations have not been fixed yet so are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
integration_requirements = set()
integration_packages = set()
for req in integration.requirements:
package = normalize_package_name(req)
if not package:
integration.add_error(
"requirements",
f"Failed to normalize package name from requirement {req}",
)
return
if package in IGNORE_PACKAGES:
continue
integration_requirements.add(req)
integration_packages.add(package)
if integration.disabled:
return
install_ok = install_requirements(integration, integration_requirements)
if not install_ok:
return
all_integration_requirements = get_requirements(integration, integration_packages)
if integration_requirements and not all_integration_requirements:
integration.add_error(
"requirements",
f"Failed to resolve requirements {integration_requirements}",
)
return
# Check for requirements incompatible with standard library.
for version, std_libs in STD_LIBS.items():
for req in all_integration_requirements:
if req in std_libs:
integration.add_error(
"requirements",
f"Package {req} is not compatible with Python {version} standard library",
)
def ensure_cache():
"""Ensure we have a cache of pipdeptree.
{
"flake8-docstring": {
"key": "flake8-docstrings",
"package_name": "flake8-docstrings",
"installed_version": "1.5.0"
"dependencies": {"flake8"}
}
}
"""
global PIPDEPTREE_CACHE
if PIPDEPTREE_CACHE is not None:
return
cache = {}
for item in json.loads(
subprocess.run(
["pipdeptree", "-w", "silence", "--json"],
check=True,
capture_output=True,
text=True,
).stdout
):
cache[item["package"]["key"]] = {
**item["package"],
"dependencies": {dep["key"] for dep in item["dependencies"]},
}
PIPDEPTREE_CACHE = cache
def get_requirements(integration: Integration, packages: set[str]) -> set[str]:
"""Return all (recursively) requirements for an integration."""
ensure_cache()
all_requirements = set()
to_check = deque(packages)
while to_check:
package = to_check.popleft()
if package in all_requirements:
continue
all_requirements.add(package)
item = PIPDEPTREE_CACHE.get(package)
if item is None:
# Only warn if direct dependencies could not be resolved
if package in packages:
integration.add_error(
"requirements", f"Failed to resolve requirements for {package}"
)
continue
to_check.extend(item["dependencies"])
return all_requirements
def install_requirements(integration: Integration, requirements: set[str]) -> bool:
"""Install integration requirements.
Return True if successful.
"""
global PIPDEPTREE_CACHE
ensure_cache()
for req in requirements:
match = PIP_REGEX.search(req)
if not match:
integration.add_error(
"requirements",
f"Failed to parse requirement {req} before installation",
)
continue
install_args = match.group(1)
requirement_arg = match.group(2)
is_installed = False
normalized = normalize_package_name(requirement_arg)
if normalized and "==" in requirement_arg:
ver = requirement_arg.split("==")[-1]
item = PIPDEPTREE_CACHE.get(normalized)
is_installed = item and item["installed_version"] == ver
if not is_installed:
try:
is_installed = pkg_util.is_installed(req)
except ValueError:
is_installed = False
if is_installed:
continue
args = [sys.executable, "-m", "pip", "install", "--quiet"]
if install_args:
args.append(install_args)
args.append(requirement_arg)
try:
result = subprocess.run(args, check=True, capture_output=True, text=True)
except subprocess.SubprocessError:
integration.add_error(
"requirements",
f"Requirement {req} failed to install",
)
else:
# Clear the pipdeptree cache if something got installed
if "Successfully installed" in result.stdout:
PIPDEPTREE_CACHE = None
if integration.errors:
return False
return True
| 28.298305 | 94 | 0.622185 | from __future__ import annotations
from collections import deque
import json
import operator
import os
import re
import subprocess
import sys
from awesomeversion import AwesomeVersion, AwesomeVersionStrategy
from stdlib_list import stdlib_list
from tqdm import tqdm
from homeassistant.const import REQUIRED_PYTHON_VER
import homeassistant.util.package as pkg_util
from script.gen_requirements_all import COMMENT_REQUIREMENTS
from .model import Config, Integration
IGNORE_PACKAGES = {
commented.lower().replace("_", "-") for commented in COMMENT_REQUIREMENTS
}
PACKAGE_REGEX = re.compile(r"^(?:--.+\s)?([-_\.\w\d]+).*==.+$")
PIP_REGEX = re.compile(r"^(--.+\s)?([-_\.\w\d]+.*(?:==|>=|<=|~=|!=|<|>|===)?.*$)")
SUPPORTED_PYTHON_TUPLES = [
REQUIRED_PYTHON_VER[:2],
tuple(map(operator.add, REQUIRED_PYTHON_VER, (0, 1, 0)))[:2],
]
SUPPORTED_PYTHON_VERSIONS = [
".".join(map(str, version_tuple)) for version_tuple in SUPPORTED_PYTHON_TUPLES
]
STD_LIBS = {version: set(stdlib_list(version)) for version in SUPPORTED_PYTHON_VERSIONS}
PIPDEPTREE_CACHE = None
IGNORE_VIOLATIONS = {
"acmeda",
"blink",
"ezviz",
"hdmi_cec",
"juicenet",
"lupusec",
"rainbird",
"slide",
"suez_water",
}
def normalize_package_name(requirement: str) -> str:
match = PACKAGE_REGEX.search(requirement)
if not match:
return ""
package = match.group(1).lower().replace("_", "-")
return package
def validate(integrations: dict[str, Integration], config: Config):
if not config.requirements:
for integration in integrations.values():
validate_requirements_format(integration)
return
ensure_cache()
disable_tqdm = config.specific_integrations or os.environ.get("CI", False)
for integration in tqdm(integrations.values(), disable=disable_tqdm):
if not integration.manifest:
continue
validate_requirements(integration)
def validate_requirements_format(integration: Integration) -> bool:
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep and integration.core:
integration.add_error(
"requirements",
f'Requirement {req} need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error(
"requirements",
f"Unable to parse package version ({version}) for {pkg}.",
)
continue
return len(integration.errors) == start_errors
def validate_requirements(integration: Integration):
if not validate_requirements_format(integration):
return
if integration.domain in IGNORE_VIOLATIONS:
return
integration_requirements = set()
integration_packages = set()
for req in integration.requirements:
package = normalize_package_name(req)
if not package:
integration.add_error(
"requirements",
f"Failed to normalize package name from requirement {req}",
)
return
if package in IGNORE_PACKAGES:
continue
integration_requirements.add(req)
integration_packages.add(package)
if integration.disabled:
return
install_ok = install_requirements(integration, integration_requirements)
if not install_ok:
return
all_integration_requirements = get_requirements(integration, integration_packages)
if integration_requirements and not all_integration_requirements:
integration.add_error(
"requirements",
f"Failed to resolve requirements {integration_requirements}",
)
return
for version, std_libs in STD_LIBS.items():
for req in all_integration_requirements:
if req in std_libs:
integration.add_error(
"requirements",
f"Package {req} is not compatible with Python {version} standard library",
)
def ensure_cache():
global PIPDEPTREE_CACHE
if PIPDEPTREE_CACHE is not None:
return
cache = {}
for item in json.loads(
subprocess.run(
["pipdeptree", "-w", "silence", "--json"],
check=True,
capture_output=True,
text=True,
).stdout
):
cache[item["package"]["key"]] = {
**item["package"],
"dependencies": {dep["key"] for dep in item["dependencies"]},
}
PIPDEPTREE_CACHE = cache
def get_requirements(integration: Integration, packages: set[str]) -> set[str]:
ensure_cache()
all_requirements = set()
to_check = deque(packages)
while to_check:
package = to_check.popleft()
if package in all_requirements:
continue
all_requirements.add(package)
item = PIPDEPTREE_CACHE.get(package)
if item is None:
if package in packages:
integration.add_error(
"requirements", f"Failed to resolve requirements for {package}"
)
continue
to_check.extend(item["dependencies"])
return all_requirements
def install_requirements(integration: Integration, requirements: set[str]) -> bool:
global PIPDEPTREE_CACHE
ensure_cache()
for req in requirements:
match = PIP_REGEX.search(req)
if not match:
integration.add_error(
"requirements",
f"Failed to parse requirement {req} before installation",
)
continue
install_args = match.group(1)
requirement_arg = match.group(2)
is_installed = False
normalized = normalize_package_name(requirement_arg)
if normalized and "==" in requirement_arg:
ver = requirement_arg.split("==")[-1]
item = PIPDEPTREE_CACHE.get(normalized)
is_installed = item and item["installed_version"] == ver
if not is_installed:
try:
is_installed = pkg_util.is_installed(req)
except ValueError:
is_installed = False
if is_installed:
continue
args = [sys.executable, "-m", "pip", "install", "--quiet"]
if install_args:
args.append(install_args)
args.append(requirement_arg)
try:
result = subprocess.run(args, check=True, capture_output=True, text=True)
except subprocess.SubprocessError:
integration.add_error(
"requirements",
f"Requirement {req} failed to install",
)
else:
if "Successfully installed" in result.stdout:
PIPDEPTREE_CACHE = None
if integration.errors:
return False
return True
| true | true |
f72563ebf144b5c4ed5d91972f5d809a8a4c52e3 | 2,010 | py | Python | a02_zapatamezaj.py | 2019-fall-csc-226/a02-loopy-turtles-loopy-languages-zapatamezaj-a02 | 0a390e93a2f32ce7dbaaf963e660e1f98fd5741c | [
"MIT"
] | null | null | null | a02_zapatamezaj.py | 2019-fall-csc-226/a02-loopy-turtles-loopy-languages-zapatamezaj-a02 | 0a390e93a2f32ce7dbaaf963e660e1f98fd5741c | [
"MIT"
] | null | null | null | a02_zapatamezaj.py | 2019-fall-csc-226/a02-loopy-turtles-loopy-languages-zapatamezaj-a02 | 0a390e93a2f32ce7dbaaf963e660e1f98fd5741c | [
"MIT"
] | null | null | null | ######################################################################
# Author: Jose Zapata Meza
# Username: zapatamezaj
# Assignment: A02: Loopy Turtle, Loopy Languages
# Purpose: Practice using the turtle library and loops
######################################################################
# Acknowledgements:
# licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
######################################################################
# Imports the turtle files
import turtle
# Creates a window with a grey background
wn = turtle.Screen()
wn.bgcolor("grey")
# Creates a turtle that is named Jose
jose = turtle.Turtle()
jose.setheading(0)
# Jose's color is blue
jose.color('blue')
# The origin of Jose is moved
jose.penup()
jose.back(200)
jose.pendown()
# A turtle to make a road is created and placed to where the road should be
road = turtle.Turtle()
road.penup()
road.backward(340)
road.right(90)
road.forward(150)
road.left(90)
road.pendown()
road.forward(700)
# A turtle to draw a sun is created
sun = turtle.Turtle()
sun.penup()
sun.color('yellow')
sun.forward(250)
sun.left(90)
sun.forward(220)
sun.pendown()
# The turtle fills its shape to make the sun bright
sun.begin_fill()
sun.circle(50)
sun.end_fill()
sun.hideturtle()
# A loop is created to draw a rectangle
for i in range(2):
jose.forward(400)
jose.right(90)
jose.forward(100)
jose.right(90)
# Jose moves to draw the rear tire of the car
jose.forward(50)
jose.penup()
jose.right(90)
jose.forward(100)
jose.pendown()
jose.circle(50)
# Jose moves to draw the front tire of the car
jose.left(90)
jose.forward(200)
jose.right(90)
jose.circle(50)
# Jose moves to make the top part of the car
jose.left(90)
jose.forward(100)
jose.left(90)
jose.penup()
jose.forward(100)
jose.pendown()
jose.forward(100)
jose.left(90)
jose.forward(300)
jose.left(90)
jose.forward(100)
jose.hideturtle()
# To end the program, one clicks on the screen
wn.exitonclick()
| 21.157895 | 75 | 0.653234 | true | true | |
f72563f0c25147eb28eb5f134aa0a5390efffad7 | 11,449 | py | Python | client_code/Slider/__init__.py | hugetim/anvil-extras | ca83f6ada5149514c2affbe1ab081a4ca677c7e0 | [
"MIT"
] | null | null | null | client_code/Slider/__init__.py | hugetim/anvil-extras | ca83f6ada5149514c2affbe1ab081a4ca677c7e0 | [
"MIT"
] | null | null | null | client_code/Slider/__init__.py | hugetim/anvil-extras | ca83f6ada5149514c2affbe1ab081a4ca677c7e0 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
import anvil.js
from anvil import HtmlPanel as _HtmlPanel
from ..utils._component_helpers import _get_color, _html_injector, _spacing_property
from ._anvil_designer import SliderTemplate
__version__ = "1.7.1"
noui_version = "15.4.0"
_html_injector.cdn(
f"https://cdn.jsdelivr.net/npm/nouislider@{noui_version}/dist/nouislider.min.css"
)
_html_injector.css(
"""
.anvil-slider-container {
padding: 10px 0;
}
.anvil-slider-container.has-pips {
padding-bottom: 40px;
}
.anvil-container-overflow, .anvil-panel-col {
overflow: visible;
}
.noUi-connect {
background: var(--primary);
}
.noUi-horizontal .noUi-handle {
width: 34px;
height: 34px;
right: -17px;
top: -10px;
border-radius: 50%;
}
.noUi-handle::before, .noUi-handle::after {
content: none
}
"""
)
_Slider = anvil.js.import_from(
f"https://cdn.skypack.dev/nouislider@{noui_version}"
).default
import json
def _as_list(x):
return x if isinstance(x, list) else [x]
def _from_list(x):
return x[0] if isinstance(x, list) else x
def _parse(s, force_list=False):
if not isinstance(s, str):
return s
s = s.lower().strip()
if not s:
return None if not force_list else []
if ("," in s or force_list) and s[0] != "[":
s = "[" + s + "]"
try:
return json.loads(s)
except Exception:
return [] if force_list else s
try:
# added in python 3.9 not currently available in skulpt
_removeprefix = str.removeprefix
_removesuffix = str.removesuffix
except AttributeError:
def _removeprefix(s, prefix):
return s[len(prefix) :] if s.startswith(prefix) else s
def _removesuffix(s, suffix):
return s[: len(s) - len(suffix)] if s.endswith(suffix) else s
def _wrap_formatter(formatter):
fto = formatter["to"]
ffrom = formatter["from"]
def wrap_to(f: float, *args) -> str:
s = fto(f)
if not isinstance(s, str):
raise TypeError(
f"Custom formatter returned {type(s).__name__} (expected str)"
)
return s
def wrap_from(s: str, *args) -> float:
#### This function is called from javascript so accept *args
if not isinstance(s, str):
raise TypeError(
f"got an unexpected value when trying to assign a value to the slider, (got {s})"
)
try:
return ffrom(s)
except Exception as e:
try:
# we may have just been give a number so do the obvious thing
res = float(s)
return int(res) if res.is_integer() else res
except Exception:
raise RuntimeError(f"your custom formatter raised an exception: {e!r}")
return {"to": wrap_to, "from": wrap_from, "format_spec": formatter}
def _get_formatter(formatspec: str) -> dict:
"""
Expecting a format spec e.g. '.2f'
Or a simple string '£{:.2f}'
"""
if isinstance(formatspec, dict):
return _wrap_formatter(formatspec)
if not isinstance(formatspec, str):
raise TypeError("expected property format to be of type str")
first = formatspec.find("{")
last = formatspec.find("}")
prefix = "" if first == -1 else formatspec[:first]
suffix = "" if last == -1 else formatspec[last + 1 :]
type = formatspec[len(formatspec) - 1] if last == -1 else formatspec[last - 1]
def to_format(f: float, *args) -> str:
# Used in javascript world so expects extra args
try:
return format(f, formatspec) if first == -1 else formatspec.format(f)
except Exception:
return f # better just to return what was passed to us
# this will raise an error if we have an invalid spec
format(1.1, formatspec) if first == -1 else formatspec.format(1.1)
def from_format(s: str, *args) -> float:
# Used in javascript world so expects extra args
if not isinstance(s, str):
raise TypeError(
f"got an unexpected value when trying to assign a value to the slider, (got {s})"
)
s = (
_removesuffix(_removeprefix(s, prefix), suffix)
.strip()
.replace(",", "")
.replace("_", "")
)
has_percent = type == "%" and s[len(s) - 1] == "%"
if has_percent:
s = s[: len(s) - 1]
try:
f = float(s)
except Exception:
return False
if has_percent:
f = f / 100
return int(f) if f.is_integer() else f
# noUiSlider requires a format like {from: (float) => str, to: (str) => float}
return {"from": from_format, "to": to_format, "format_spec": formatspec}
def _prop_getter(prop, fget=None):
return lambda self: self._props[prop] if fget is None else fget(self._props[prop])
def _slider_prop(prop, fset=None, fget=None):
def setter(self, value):
value = value if fset is None else fset(value)
self._props[prop] = value
if prop == "format":
pips = self._make_pips()
self._slider.updateOptions({prop: value, "pips": pips})
else:
self._slider.updateOptions({prop: value})
return property(_prop_getter(prop, fget), setter)
def _min_max_prop(prop):
def getter(self):
return self._props["range"][prop]
def setter(self, value):
r = self._props["range"]
r[prop] = value
self._slider.updateOptions({"range": r})
return property(getter, setter)
def _pips_prop(prop):
def setter(self, value):
self._props[prop] = value
pips = self._make_pips()
self._toggle_has_pips(pips)
self._slider.updateOptions({"pips": pips})
return property(_prop_getter(prop), setter)
_defaults = {
"animate": True,
"start": 20,
"step": None,
"tooltips": False,
"connect": False,
"behaviour": "tap",
"format": None,
"pips": None,
"pips_mode": None,
"pips_values": [],
"pips_density": -1,
"pips_stepped": True,
"margin": None,
"padding": None,
"limit": None,
"range": None,
"min": 0,
"max": 100,
"visible": True,
"enabled": True,
"spacing_above": "small",
"spacing_below": "small",
"value": None,
"values": None,
"formatted_value": None,
"formatted_values": None,
}
class Slider(SliderTemplate):
def __init__(self, **properties):
# Any code you write here will run when the form opens.
dom_node = self._dom_node = anvil.js.get_dom_node(self)
dom_node.classList.add("anvil-slider-container")
self._slider_node = dom_node.querySelector(".anvil-slider")
# remove the script to stop them loading
while dom_node.firstElementChild:
dom_node.removeChild(dom_node.firstElementChild)
dom_node.append(self._slider_node)
props = self._props = _defaults | properties
for prop in (
"start",
"connect",
"margin",
"padding",
"limit",
"pips_values",
):
props[prop] = _parse(props[prop], prop == "pips_values")
props["range"] = props["range"] or {"min": props["min"], "max": props["max"]}
props["format"] = _get_formatter(props["format"] or ".2f")
pips = self._make_pips()
self._toggle_has_pips(pips)
try:
self._slider = _Slider.create(self._slider_node, props | {"pips": pips})
except Exception as e:
raise RuntimeError(repr(e).replace("noUiSlider", "Slider"))
###### EVENTS ######
self._slider.on("slide", lambda a, h, *e: self.raise_event("slide", handle=h))
self._slider.on("change", lambda a, h, *e: self.raise_event("change", handle=h))
###### PROPS TO INIT ######
always = {p: props[p] for p in ("color", "spacing_above", "spacing_below")}
if_true = {
p: props[p]
for p in ("formatted_value", "formatted_values", "value", "values")
if props[p] is not None
}
if_false = {p: props[p] for p in ("enabled", "visible") if not props[p]}
self.init_components(**always, **if_false, **if_true)
###### VALUE PROPERTIES ######
def _value_setter(self, val):
self._slider.set(val)
def _value(self):
return _from_list(self._slider.get(True))
def _values(self):
return _as_list(self._slider.get(True))
def _formatted_value(self):
return _from_list(self._slider.get())
def _formatted_values(self):
return _as_list(self._slider.get())
value = property(_value, _value_setter)
values = property(_values, _value_setter)
formatted_value = property(_formatted_value, _value_setter)
formatted_values = property(_formatted_values, _value_setter)
###### noUiSlider PROPS ######
connect = _slider_prop("connect") # not dynamic
behaviour = _slider_prop("behaviour") # not dynamic
margin = _slider_prop("margin")
padding = _slider_prop("padding")
limit = _slider_prop("limit")
step = _slider_prop("step")
start = _slider_prop("start")
range = _slider_prop("range")
min = _min_max_prop("min")
max = _min_max_prop("max")
tooltips = _slider_prop("tooltips")
animate = _slider_prop("animate")
format = _slider_prop(
"format", fset=lambda s: _get_formatter(s), fget=lambda d: d["format_spec"]
)
###### PIPS PROPS ######
pips = _pips_prop("pips")
pips_mode = _pips_prop("pips_mode")
pips_values = _pips_prop("pips_values")
pips_density = _pips_prop("pips_density")
pips_stepped = _pips_prop("pips_stepped")
def _toggle_has_pips(self, pips):
self._dom_node.classList.toggle("has-pips", bool(pips))
def _make_pips(self):
props = self._props
pips = props["pips"]
if not pips:
return None
elif pips is True:
return {
"format": props["format"],
"mode": props["pips_mode"],
"values": props["pips_values"],
"density": props["pips_density"],
"stepped": props["pips_stepped"],
}
elif isinstance(pips, dict):
return pips
else:
raise TypeError(f"pips should be a bool or a dict, got {type(pips)}")
###### VISUAL PROPS ######
@property
def enabled(self):
return not self._slider_node.getAttribute("disabled")
@enabled.setter
def enabled(self, value):
if value:
self._slider_node.removeAttribute("disabled")
else:
self._slider_node.setAttribute("disabled", True)
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
self._dom_node.style.setProperty("--primary", _get_color(value))
spacing_above = _spacing_property("above")
spacing_below = _spacing_property("below")
visible = _HtmlPanel.visible
###### METHODS ######
def reset(self):
self._slider.reset()
self.raise_event("x-writeback")
| 29.507732 | 97 | 0.599354 |
import anvil.js
from anvil import HtmlPanel as _HtmlPanel
from ..utils._component_helpers import _get_color, _html_injector, _spacing_property
from ._anvil_designer import SliderTemplate
__version__ = "1.7.1"
noui_version = "15.4.0"
_html_injector.cdn(
f"https://cdn.jsdelivr.net/npm/nouislider@{noui_version}/dist/nouislider.min.css"
)
_html_injector.css(
"""
.anvil-slider-container {
padding: 10px 0;
}
.anvil-slider-container.has-pips {
padding-bottom: 40px;
}
.anvil-container-overflow, .anvil-panel-col {
overflow: visible;
}
.noUi-connect {
background: var(--primary);
}
.noUi-horizontal .noUi-handle {
width: 34px;
height: 34px;
right: -17px;
top: -10px;
border-radius: 50%;
}
.noUi-handle::before, .noUi-handle::after {
content: none
}
"""
)
_Slider = anvil.js.import_from(
f"https://cdn.skypack.dev/nouislider@{noui_version}"
).default
import json
def _as_list(x):
return x if isinstance(x, list) else [x]
def _from_list(x):
return x[0] if isinstance(x, list) else x
def _parse(s, force_list=False):
if not isinstance(s, str):
return s
s = s.lower().strip()
if not s:
return None if not force_list else []
if ("," in s or force_list) and s[0] != "[":
s = "[" + s + "]"
try:
return json.loads(s)
except Exception:
return [] if force_list else s
try:
_removeprefix = str.removeprefix
_removesuffix = str.removesuffix
except AttributeError:
def _removeprefix(s, prefix):
return s[len(prefix) :] if s.startswith(prefix) else s
def _removesuffix(s, suffix):
return s[: len(s) - len(suffix)] if s.endswith(suffix) else s
def _wrap_formatter(formatter):
fto = formatter["to"]
ffrom = formatter["from"]
def wrap_to(f: float, *args) -> str:
s = fto(f)
if not isinstance(s, str):
raise TypeError(
f"Custom formatter returned {type(s).__name__} (expected str)"
)
return s
def wrap_from(s: str, *args) -> float:
)
try:
return ffrom(s)
except Exception as e:
try:
res = float(s)
return int(res) if res.is_integer() else res
except Exception:
raise RuntimeError(f"your custom formatter raised an exception: {e!r}")
return {"to": wrap_to, "from": wrap_from, "format_spec": formatter}
def _get_formatter(formatspec: str) -> dict:
if isinstance(formatspec, dict):
return _wrap_formatter(formatspec)
if not isinstance(formatspec, str):
raise TypeError("expected property format to be of type str")
first = formatspec.find("{")
last = formatspec.find("}")
prefix = "" if first == -1 else formatspec[:first]
suffix = "" if last == -1 else formatspec[last + 1 :]
type = formatspec[len(formatspec) - 1] if last == -1 else formatspec[last - 1]
def to_format(f: float, *args) -> str:
try:
return format(f, formatspec) if first == -1 else formatspec.format(f)
except Exception:
return f
format(1.1, formatspec) if first == -1 else formatspec.format(1.1)
def from_format(s: str, *args) -> float:
if not isinstance(s, str):
raise TypeError(
f"got an unexpected value when trying to assign a value to the slider, (got {s})"
)
s = (
_removesuffix(_removeprefix(s, prefix), suffix)
.strip()
.replace(",", "")
.replace("_", "")
)
has_percent = type == "%" and s[len(s) - 1] == "%"
if has_percent:
s = s[: len(s) - 1]
try:
f = float(s)
except Exception:
return False
if has_percent:
f = f / 100
return int(f) if f.is_integer() else f
return {"from": from_format, "to": to_format, "format_spec": formatspec}
def _prop_getter(prop, fget=None):
return lambda self: self._props[prop] if fget is None else fget(self._props[prop])
def _slider_prop(prop, fset=None, fget=None):
def setter(self, value):
value = value if fset is None else fset(value)
self._props[prop] = value
if prop == "format":
pips = self._make_pips()
self._slider.updateOptions({prop: value, "pips": pips})
else:
self._slider.updateOptions({prop: value})
return property(_prop_getter(prop, fget), setter)
def _min_max_prop(prop):
def getter(self):
return self._props["range"][prop]
def setter(self, value):
r = self._props["range"]
r[prop] = value
self._slider.updateOptions({"range": r})
return property(getter, setter)
def _pips_prop(prop):
def setter(self, value):
self._props[prop] = value
pips = self._make_pips()
self._toggle_has_pips(pips)
self._slider.updateOptions({"pips": pips})
return property(_prop_getter(prop), setter)
_defaults = {
"animate": True,
"start": 20,
"step": None,
"tooltips": False,
"connect": False,
"behaviour": "tap",
"format": None,
"pips": None,
"pips_mode": None,
"pips_values": [],
"pips_density": -1,
"pips_stepped": True,
"margin": None,
"padding": None,
"limit": None,
"range": None,
"min": 0,
"max": 100,
"visible": True,
"enabled": True,
"spacing_above": "small",
"spacing_below": "small",
"value": None,
"values": None,
"formatted_value": None,
"formatted_values": None,
}
class Slider(SliderTemplate):
def __init__(self, **properties):
dom_node = self._dom_node = anvil.js.get_dom_node(self)
dom_node.classList.add("anvil-slider-container")
self._slider_node = dom_node.querySelector(".anvil-slider")
while dom_node.firstElementChild:
dom_node.removeChild(dom_node.firstElementChild)
dom_node.append(self._slider_node)
props = self._props = _defaults | properties
for prop in (
"start",
"connect",
"margin",
"padding",
"limit",
"pips_values",
):
props[prop] = _parse(props[prop], prop == "pips_values")
props["range"] = props["range"] or {"min": props["min"], "max": props["max"]}
props["format"] = _get_formatter(props["format"] or ".2f")
pips = self._make_pips()
self._toggle_has_pips(pips)
try:
self._slider = _Slider.create(self._slider_node, props | {"pips": pips})
except Exception as e:
raise RuntimeError(repr(e).replace("noUiSlider", "Slider"))
er.on("change", lambda a, h, *e: self.raise_event("change", handle=h))
for p in ("formatted_value", "formatted_values", "value", "values")
if props[p] is not None
}
if_false = {p: props[p] for p in ("enabled", "visible") if not props[p]}
self.init_components(**always, **if_false, **if_true)
elf):
return _as_list(self._slider.get(True))
def _formatted_value(self):
return _from_list(self._slider.get())
def _formatted_values(self):
return _as_list(self._slider.get())
value = property(_value, _value_setter)
values = property(_values, _value_setter)
formatted_value = property(_formatted_value, _value_setter)
formatted_values = property(_formatted_values, _value_setter)
")
limit = _slider_prop("limit")
step = _slider_prop("step")
start = _slider_prop("start")
range = _slider_prop("range")
min = _min_max_prop("min")
max = _min_max_prop("max")
tooltips = _slider_prop("tooltips")
animate = _slider_prop("animate")
format = _slider_prop(
"format", fset=lambda s: _get_formatter(s), fget=lambda d: d["format_spec"]
)
nsity = _pips_prop("pips_density")
pips_stepped = _pips_prop("pips_stepped")
def _toggle_has_pips(self, pips):
self._dom_node.classList.toggle("has-pips", bool(pips))
def _make_pips(self):
props = self._props
pips = props["pips"]
if not pips:
return None
elif pips is True:
return {
"format": props["format"],
"mode": props["pips_mode"],
"values": props["pips_values"],
"density": props["pips_density"],
"stepped": props["pips_stepped"],
}
elif isinstance(pips, dict):
return pips
else:
raise TypeError(f"pips should be a bool or a dict, got {type(pips)}")
(self, value):
if value:
self._slider_node.removeAttribute("disabled")
else:
self._slider_node.setAttribute("disabled", True)
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
self._dom_node.style.setProperty("--primary", _get_color(value))
spacing_above = _spacing_property("above")
spacing_below = _spacing_property("below")
visible = _HtmlPanel.visible
| true | true |
f72564206e9156bd00056bf1ef33a09cb7439d94 | 338 | py | Python | server/twitter/migrations/0004_auto_20200822_2202.py | vanviethieuanh/twitter-clone | bac1be8118514ec8ce169eb0c0f4d5658ab2013a | [
"MIT"
] | 1 | 2021-12-29T00:49:51.000Z | 2021-12-29T00:49:51.000Z | server/twitter/migrations/0004_auto_20200822_2202.py | vanviethieuanh/twitter-clone | bac1be8118514ec8ce169eb0c0f4d5658ab2013a | [
"MIT"
] | null | null | null | server/twitter/migrations/0004_auto_20200822_2202.py | vanviethieuanh/twitter-clone | bac1be8118514ec8ce169eb0c0f4d5658ab2013a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-08-22 15:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('twitter', '0003_auto_20200822_2127'),
]
operations = [
migrations.AlterUniqueTogether(
name='follow',
unique_together=set(),
),
]
| 18.777778 | 47 | 0.60355 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('twitter', '0003_auto_20200822_2127'),
]
operations = [
migrations.AlterUniqueTogether(
name='follow',
unique_together=set(),
),
]
| true | true |
f72564424a1cdf41e8a72d242dbdc9892b53b527 | 3,217 | py | Python | model/snl_block.py | ustbjdl1021/improved_snl_unet | 7f7bf092153e1a535337b80bd1b673eff3ddec52 | [
"MIT"
] | null | null | null | model/snl_block.py | ustbjdl1021/improved_snl_unet | 7f7bf092153e1a535337b80bd1b673eff3ddec52 | [
"MIT"
] | 2 | 2022-03-30T13:05:27.000Z | 2022-03-31T13:43:22.000Z | model/snl_block.py | ustbjdl1021/improved_snl_unet | 7f7bf092153e1a535337b80bd1b673eff3ddec52 | [
"MIT"
] | 1 | 2022-03-31T13:33:30.000Z | 2022-03-31T13:33:30.000Z | import torch
import torch.nn as nn
class ImprovedSNL(nn.Module):
def __init__(self, in_channels, transfer_channels, stage_num=2):
super(ImprovedSNL, self).__init__()
self.in_channels = in_channels
self.transfer_channels = transfer_channels
self.stage_num = stage_num
self.transform_t = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.transform_p = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.row_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.column_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.w1 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.w2 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.bn = nn.BatchNorm2d(in_channels)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def getAtt(self, x):
t = self.transform_t(x)
p = self.transform_p(x)
b, c, h, w = t.size()
t = t.view(b, c, -1).permute(0, 2, 1)
p = p.view(b, c, -1)
m = torch.bmm(torch.relu(t), torch.relu(p))
m += m.permute(0, 2, 1)
m_hat = m / 2
degree = torch.sum(m_hat, dim=2)
degree[degree != 0] = torch.sqrt(1.0 / degree[degree != 0])
affinity_matrix = m_hat * degree.unsqueeze(1)
affinity_matrix *= degree.unsqueeze(2)
return affinity_matrix
def stage(self, x):
affinity_matrix = self.getAtt(x)
column_features = self.column_transform(x)
b, c, h, w = column_features.size()
column_features = column_features.view(b, c, -1)
column_features = torch.bmm(column_features, affinity_matrix).contiguous().view(b,c,h,w)
column_features = self.w1(column_features)
row_features = self.row_transform(x)
b, c, h, w = row_features.size()
row_features = row_features.view(b, c, -1).permute(0, 2, 1)
row_features = torch.bmm(affinity_matrix, row_features).permute(0, 2, 1).contiguous().view(b,c,h,w)
row_features = self.w2(row_features)
output = column_features + row_features
output = self.bn(output)
output = output + x
return output
def forward(self, x):
for stage in range(self.stage_num):
x = self.stage(x)
return x
| 36.556818 | 111 | 0.587193 | import torch
import torch.nn as nn
class ImprovedSNL(nn.Module):
def __init__(self, in_channels, transfer_channels, stage_num=2):
super(ImprovedSNL, self).__init__()
self.in_channels = in_channels
self.transfer_channels = transfer_channels
self.stage_num = stage_num
self.transform_t = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.transform_p = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.row_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.column_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.w1 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.w2 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.bn = nn.BatchNorm2d(in_channels)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def getAtt(self, x):
t = self.transform_t(x)
p = self.transform_p(x)
b, c, h, w = t.size()
t = t.view(b, c, -1).permute(0, 2, 1)
p = p.view(b, c, -1)
m = torch.bmm(torch.relu(t), torch.relu(p))
m += m.permute(0, 2, 1)
m_hat = m / 2
degree = torch.sum(m_hat, dim=2)
degree[degree != 0] = torch.sqrt(1.0 / degree[degree != 0])
affinity_matrix = m_hat * degree.unsqueeze(1)
affinity_matrix *= degree.unsqueeze(2)
return affinity_matrix
def stage(self, x):
affinity_matrix = self.getAtt(x)
column_features = self.column_transform(x)
b, c, h, w = column_features.size()
column_features = column_features.view(b, c, -1)
column_features = torch.bmm(column_features, affinity_matrix).contiguous().view(b,c,h,w)
column_features = self.w1(column_features)
row_features = self.row_transform(x)
b, c, h, w = row_features.size()
row_features = row_features.view(b, c, -1).permute(0, 2, 1)
row_features = torch.bmm(affinity_matrix, row_features).permute(0, 2, 1).contiguous().view(b,c,h,w)
row_features = self.w2(row_features)
output = column_features + row_features
output = self.bn(output)
output = output + x
return output
def forward(self, x):
for stage in range(self.stage_num):
x = self.stage(x)
return x
| true | true |
f72565ec076f5d92978bba872dc9b48d63e0a69e | 10,084 | py | Python | regym/rl_algorithms/algorithms/PPO/rnd_loss.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | 2 | 2020-09-13T15:53:20.000Z | 2020-12-08T15:57:05.000Z | regym/rl_algorithms/algorithms/PPO/rnd_loss.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | null | null | null | regym/rl_algorithms/algorithms/PPO/rnd_loss.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | 1 | 2021-09-20T13:48:30.000Z | 2021-09-20T13:48:30.000Z | from typing import Dict, List
import torch
import torch.nn.functional as F
def compute_loss(states: torch.Tensor,
actions: torch.Tensor,
next_states: torch.Tensor,
log_probs_old: torch.Tensor,
ext_returns: torch.Tensor,
ext_advantages: torch.Tensor,
std_ext_advantages: torch.Tensor,
int_returns: torch.Tensor,
int_advantages: torch.Tensor,
std_int_advantages: torch.Tensor,
target_random_features: torch.Tensor,
states_mean: torch.Tensor,
states_std: torch.Tensor,
model: torch.nn.Module,
pred_intr_model: torch.nn.Module,
intrinsic_reward_ratio: float,
ratio_clip: float,
entropy_weight: float,
value_weight: float,
rnd_weight: float,
rnd_obs_clip: float,
summary_writer: object = None,
iteration_count: int = 0,
rnn_states: Dict[str, Dict[str, List[torch.Tensor]]] = None) -> torch.Tensor:
'''
Computes the loss of an actor critic model using the
loss function from equation (9) in the paper:
Proximal Policy Optimization Algorithms: https://arxiv.org/abs/1707.06347
:param states: Dimension: batch_size x state_size: States visited by the agent.
:param actions: Dimension: batch_size x action_size. Actions which the agent
took at every state in :param states: with the same index.
:param log_probs_old: Dimension: batch_size x 1. Log probability of taking
the action with the same index in :param actions:.
Used to compute the policy probability ratio.
Refer to original paper equation (6)
:param ext_returns: Dimension: batch_size x 1. Empirical returns obtained via
calculating the discounted return from the environment's rewards
:param ext_advantages: Dimension: batch_size x 1. Estimated advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param std_ext_advantages: Dimension: batch_size x 1. Estimated standardized advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param int_returns: Dimension: batch_size x 1. Empirical intrinsic returns obtained via
calculating the discounted intrinsic return from the intrinsic rewards.
:param int_advantages: Dimension: batch_size x 1. Estimated intrisinc advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param std_int_advantages: Dimension: batch_size x 1. Estimated standardized intrinsic advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param target_random_features: target random features used to compute the intrinsic rewards.
:param states_mean: mean over the previous training step's states.
:param states_std: standard deviation over the previous training step's states.
:param model: torch.nn.Module used to compute the policy probability ratio
as specified in equation (6) of original paper.
:param predict_intr_model: intrinsic reward prediction model.
:param intrinsic_reward_ratio: ratio of intrinsic reward to extrinsic reward.
:param ratio_clip: Epsilon value used to clip the policy ratio's value.
This parameter acts as the radius of the Trust Region.
Refer to original paper equation (7).
:param entropy_weight: Coefficient to be used for the entropy bonus
for the loss function. Refer to original paper eq (9)
:param value_weight: Coefficient to be used for the value loss
for the loss function. Refer to original paper eq (9)
:param rnd_weight: Coefficient to be used for the rnd loss
for the loss function.
:param rnn_states: The :param model: can be made up of different submodules.
Some of these submodules will feature an LSTM architecture.
This parameter is a dictionary which maps recurrent submodule names
to a dictionary which contains 2 lists of tensors, each list
corresponding to the 'hidden' and 'cell' states of
the LSTM submodules. These tensors are used by the
:param model: when calculating the policy probability ratio.
'''
advantages = ext_advantages + intrinsic_reward_ratio*int_advantages
std_advantages = std_ext_advantages + intrinsic_reward_ratio*std_int_advantages
prediction = model(states, actions, rnn_states=rnn_states)
ratio = torch.exp((prediction['log_pi_a'] - log_probs_old))
obj = ratio * std_advantages
obj_clipped = torch.clamp(ratio,
1.0 - ratio_clip,
1.0 + ratio_clip) * std_advantages
policy_val = -torch.min(obj, obj_clipped).mean()
entropy_val = prediction['ent'].mean()
policy_loss = policy_val - entropy_weight * entropy_val # L^{clip} and L^{S} from original paper
#policy_loss = -torch.min(obj, obj_clipped).mean() - entropy_weight * prediction['ent'].mean() # L^{clip} and L^{S} from original paper
# Random Network Distillation loss:
norm_next_states = (next_states-states_mean) / (states_std+1e-8)
if rnd_obs_clip > 1e-1:
norm_next_states = torch.clamp( norm_next_states, -rnd_obs_clip, rnd_obs_clip)
pred_random_features = pred_intr_model(norm_next_states)
# Clamping:
#pred_random_features = torch.clamp(pred_random_features, -1e20, 1e20)
#target_random_features = torch.clamp(target_random_features, -1e20, 1e20)
# Softmax:
#pred_random_features = F.softmax(pred_random_features)
# Losses:
#int_reward_loss = torch.nn.functional.smooth_l1_loss(target_random_features.detach(), pred_random_features)
int_reward_loss = torch.nn.functional.mse_loss( pred_random_features, target_random_features.detach())
#ext_returns = torch.clamp(ext_returns, -1e10, 1e10)
#int_returns = torch.clamp(int_returns, -1e10, 1e10)
#prediction['v'] = torch.clamp(prediction['v'], -1e10, 1e10)
#prediction['int_v'] = torch.clamp(prediction['int_v'], -1e10, 1e10)
#ext_v_loss = torch.nn.functional.smooth_l1_loss(ext_returns, prediction['v'])
#int_v_loss = torch.nn.functional.smooth_l1_loss(int_returns, prediction['int_v'])
ext_v_loss = torch.nn.functional.mse_loss(input=prediction['v'], target=ext_returns)
int_v_loss = torch.nn.functional.mse_loss(input=prediction['int_v'], target=int_returns)
value_loss = (ext_v_loss + int_v_loss)
#value_loss = ext_v_loss
rnd_loss = int_reward_loss
total_loss = policy_loss + rnd_weight * rnd_loss + value_weight * value_loss
#total_loss = policy_loss + value_weight * value_loss
if summary_writer is not None:
summary_writer.add_scalar('Training/RatioMean', ratio.mean().cpu().item(), iteration_count)
#summary_writer.add_histogram('Training/Ratio', ratio.cpu(), iteration_count)
summary_writer.add_scalar('Training/ExtAdvantageMean', ext_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntAdvantageMean', int_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/AdvantageMean', advantages.mean().cpu().item(), iteration_count)
#summary_writer.add_histogram('Training/ExtAdvantage', ext_advantages.cpu(), iteration_count)
#summary_writer.add_histogram('Training/IntAdvantage', int_advantages.cpu(), iteration_count)
#summary_writer.add_histogram('Training/Advantage', advantages.cpu(), iteration_count)
summary_writer.add_scalar('Training/RNDLoss', int_reward_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/ExtVLoss', ext_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntVLoss', int_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/MeanVValues', prediction['v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanReturns', ext_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdVValues', prediction['v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdReturns', ext_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntVValues', prediction['int_v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntReturns', int_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntVValues', prediction['int_v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntReturns', int_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/ValueLoss', value_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyVal', policy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/EntropyVal', entropy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyLoss', policy_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/TotalLoss', total_loss.cpu().item(), iteration_count)
return total_loss
| 61.865031 | 139 | 0.673344 | from typing import Dict, List
import torch
import torch.nn.functional as F
def compute_loss(states: torch.Tensor,
actions: torch.Tensor,
next_states: torch.Tensor,
log_probs_old: torch.Tensor,
ext_returns: torch.Tensor,
ext_advantages: torch.Tensor,
std_ext_advantages: torch.Tensor,
int_returns: torch.Tensor,
int_advantages: torch.Tensor,
std_int_advantages: torch.Tensor,
target_random_features: torch.Tensor,
states_mean: torch.Tensor,
states_std: torch.Tensor,
model: torch.nn.Module,
pred_intr_model: torch.nn.Module,
intrinsic_reward_ratio: float,
ratio_clip: float,
entropy_weight: float,
value_weight: float,
rnd_weight: float,
rnd_obs_clip: float,
summary_writer: object = None,
iteration_count: int = 0,
rnn_states: Dict[str, Dict[str, List[torch.Tensor]]] = None) -> torch.Tensor:
advantages = ext_advantages + intrinsic_reward_ratio*int_advantages
std_advantages = std_ext_advantages + intrinsic_reward_ratio*std_int_advantages
prediction = model(states, actions, rnn_states=rnn_states)
ratio = torch.exp((prediction['log_pi_a'] - log_probs_old))
obj = ratio * std_advantages
obj_clipped = torch.clamp(ratio,
1.0 - ratio_clip,
1.0 + ratio_clip) * std_advantages
policy_val = -torch.min(obj, obj_clipped).mean()
entropy_val = prediction['ent'].mean()
policy_loss = policy_val - entropy_weight * entropy_val
states-states_mean) / (states_std+1e-8)
if rnd_obs_clip > 1e-1:
norm_next_states = torch.clamp( norm_next_states, -rnd_obs_clip, rnd_obs_clip)
pred_random_features = pred_intr_model(norm_next_states)
int_reward_loss = torch.nn.functional.mse_loss( pred_random_features, target_random_features.detach())
ext_v_loss = torch.nn.functional.mse_loss(input=prediction['v'], target=ext_returns)
int_v_loss = torch.nn.functional.mse_loss(input=prediction['int_v'], target=int_returns)
value_loss = (ext_v_loss + int_v_loss)
rnd_loss = int_reward_loss
total_loss = policy_loss + rnd_weight * rnd_loss + value_weight * value_loss
if summary_writer is not None:
summary_writer.add_scalar('Training/RatioMean', ratio.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/ExtAdvantageMean', ext_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntAdvantageMean', int_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/AdvantageMean', advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/RNDLoss', int_reward_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/ExtVLoss', ext_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntVLoss', int_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/MeanVValues', prediction['v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanReturns', ext_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdVValues', prediction['v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdReturns', ext_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntVValues', prediction['int_v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntReturns', int_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntVValues', prediction['int_v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntReturns', int_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/ValueLoss', value_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyVal', policy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/EntropyVal', entropy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyLoss', policy_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/TotalLoss', total_loss.cpu().item(), iteration_count)
return total_loss
| true | true |
f725663a96725554f28f77d984f7989ab3fbe8af | 4,939 | py | Python | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/cluster_cert.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/cluster_cert.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/cluster_cert.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ClusterCert:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'server': 'str',
'certificate_authority_data': 'str',
'insecure_skip_tls_verify': 'bool'
}
attribute_map = {
'server': 'server',
'certificate_authority_data': 'certificate-authority-data',
'insecure_skip_tls_verify': 'insecure-skip-tls-verify'
}
def __init__(self, server=None, certificate_authority_data=None, insecure_skip_tls_verify=None):
"""ClusterCert - a model defined in huaweicloud sdk"""
self._server = None
self._certificate_authority_data = None
self._insecure_skip_tls_verify = None
self.discriminator = None
if server is not None:
self.server = server
if certificate_authority_data is not None:
self.certificate_authority_data = certificate_authority_data
if insecure_skip_tls_verify is not None:
self.insecure_skip_tls_verify = insecure_skip_tls_verify
@property
def server(self):
"""Gets the server of this ClusterCert.
服务器地址。
:return: The server of this ClusterCert.
:rtype: str
"""
return self._server
@server.setter
def server(self, server):
"""Sets the server of this ClusterCert.
服务器地址。
:param server: The server of this ClusterCert.
:type: str
"""
self._server = server
@property
def certificate_authority_data(self):
"""Gets the certificate_authority_data of this ClusterCert.
证书授权数据。
:return: The certificate_authority_data of this ClusterCert.
:rtype: str
"""
return self._certificate_authority_data
@certificate_authority_data.setter
def certificate_authority_data(self, certificate_authority_data):
"""Sets the certificate_authority_data of this ClusterCert.
证书授权数据。
:param certificate_authority_data: The certificate_authority_data of this ClusterCert.
:type: str
"""
self._certificate_authority_data = certificate_authority_data
@property
def insecure_skip_tls_verify(self):
"""Gets the insecure_skip_tls_verify of this ClusterCert.
不校验服务端证书,在 cluster 类型为 externalCluster 时,该值为 true。
:return: The insecure_skip_tls_verify of this ClusterCert.
:rtype: bool
"""
return self._insecure_skip_tls_verify
@insecure_skip_tls_verify.setter
def insecure_skip_tls_verify(self, insecure_skip_tls_verify):
"""Sets the insecure_skip_tls_verify of this ClusterCert.
不校验服务端证书,在 cluster 类型为 externalCluster 时,该值为 true。
:param insecure_skip_tls_verify: The insecure_skip_tls_verify of this ClusterCert.
:type: bool
"""
self._insecure_skip_tls_verify = insecure_skip_tls_verify
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterCert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.224852 | 100 | 0.606803 |
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ClusterCert:
sensitive_list = []
openapi_types = {
'server': 'str',
'certificate_authority_data': 'str',
'insecure_skip_tls_verify': 'bool'
}
attribute_map = {
'server': 'server',
'certificate_authority_data': 'certificate-authority-data',
'insecure_skip_tls_verify': 'insecure-skip-tls-verify'
}
def __init__(self, server=None, certificate_authority_data=None, insecure_skip_tls_verify=None):
self._server = None
self._certificate_authority_data = None
self._insecure_skip_tls_verify = None
self.discriminator = None
if server is not None:
self.server = server
if certificate_authority_data is not None:
self.certificate_authority_data = certificate_authority_data
if insecure_skip_tls_verify is not None:
self.insecure_skip_tls_verify = insecure_skip_tls_verify
@property
def server(self):
return self._server
@server.setter
def server(self, server):
self._server = server
@property
def certificate_authority_data(self):
return self._certificate_authority_data
@certificate_authority_data.setter
def certificate_authority_data(self, certificate_authority_data):
self._certificate_authority_data = certificate_authority_data
@property
def insecure_skip_tls_verify(self):
return self._insecure_skip_tls_verify
@insecure_skip_tls_verify.setter
def insecure_skip_tls_verify(self, insecure_skip_tls_verify):
self._insecure_skip_tls_verify = insecure_skip_tls_verify
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ClusterCert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7256728eb65c78928992820c0d53c79800f694d | 483 | py | Python | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-01 16:02
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 1, 16, 2, 48, 685488, tzinfo=utc)),
),
]
| 23 | 109 | 0.625259 |
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 1, 16, 2, 48, 685488, tzinfo=utc)),
),
]
| true | true |
f72568c0fee6e2b462e73799b21aa117bda8f7a5 | 1,562 | py | Python | sdk/python/pulumi_azure_native/management/v20200501/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/management/v20200501/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/management/v20200501/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .get_entity import *
from .get_hierarchy_setting import *
from .get_management_group import *
from .get_management_group_subscription import *
from .hierarchy_setting import *
from .management_group import *
from .management_group_subscription import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:management/v20200501:HierarchySetting":
return HierarchySetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:management/v20200501:ManagementGroup":
return ManagementGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:management/v20200501:ManagementGroupSubscription":
return ManagementGroupSubscription(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "management/v20200501", _module_instance)
_register_module()
| 37.190476 | 101 | 0.707426 |
# Export this package's modules as members:
from .get_entity import *
from .get_hierarchy_setting import *
from .get_management_group import *
from .get_management_group_subscription import *
from .hierarchy_setting import *
from .management_group import *
from .management_group_subscription import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:management/v20200501:HierarchySetting":
return HierarchySetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:management/v20200501:ManagementGroup":
return ManagementGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:management/v20200501:ManagementGroupSubscription":
return ManagementGroupSubscription(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "management/v20200501", _module_instance)
_register_module()
| true | true |
f72569012f6a31830be2670b47bccbad2cee3f8e | 4,367 | py | Python | vimms_gym/viewer_helper.py | glasgowcompbio/vimms-gym | 95cb6fa84ee6e3a64618b7a2a54c3835ad0d7867 | [
"MIT"
] | null | null | null | vimms_gym/viewer_helper.py | glasgowcompbio/vimms-gym | 95cb6fa84ee6e3a64618b7a2a54c3835ad0d7867 | [
"MIT"
] | null | null | null | vimms_gym/viewer_helper.py | glasgowcompbio/vimms-gym | 95cb6fa84ee6e3a64618b7a2a54c3835ad0d7867 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import streamlit as st
from stable_baselines3 import PPO
from vimms.ChemicalSamplers import UniformRTAndIntensitySampler, GaussianChromatogramSampler, \
UniformMZFormulaSampler
from vimms.Common import POSITIVE
from vimms_gym.common import METHOD_PPO, METHOD_TOPN
sys.path.append('..')
from vimms_gym.env import DDAEnv
from vimms_gym.evaluation import Episode, pick_action
@st.experimental_memo
def preset_1():
n_chemicals = (2000, 5000)
mz_range = (100, 600)
rt_range = (200, 1000)
intensity_range = (1E4, 1E10)
min_mz = mz_range[0]
max_mz = mz_range[1]
min_rt = rt_range[0]
max_rt = rt_range[1]
min_log_intensity = np.log(intensity_range[0])
max_log_intensity = np.log(intensity_range[1])
isolation_window = 0.7
rt_tol = 120
mz_tol = 10
ionisation_mode = POSITIVE
enable_spike_noise = True
noise_density = 0.1
noise_max_val = 1E3
mz_sampler = UniformMZFormulaSampler(min_mz=min_mz, max_mz=max_mz)
ri_sampler = UniformRTAndIntensitySampler(min_rt=min_rt, max_rt=max_rt,
min_log_intensity=min_log_intensity,
max_log_intensity=max_log_intensity)
cr_sampler = GaussianChromatogramSampler()
params = {
'chemical_creator': {
'mz_range': mz_range,
'rt_range': rt_range,
'intensity_range': intensity_range,
'n_chemicals': n_chemicals,
'mz_sampler': mz_sampler,
'ri_sampler': ri_sampler,
'cr_sampler': cr_sampler,
},
'noise': {
'enable_spike_noise': enable_spike_noise,
'noise_density': noise_density,
'noise_max_val': noise_max_val,
'mz_range': mz_range
},
'env': {
'ionisation_mode': ionisation_mode,
'rt_range': rt_range,
'isolation_window': isolation_window,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
}
}
return params
@st.experimental_memo
def preset_2():
return None
def load_model_and_params(method, params):
params = dict(params) # make a copy
model = None
N = None
min_ms1_intensity = None
if method == METHOD_PPO:
# TODO: should be uploaded, rather than hardcoded?
in_dir = os.path.abspath(os.path.join('..', 'notebooks', 'simulated_chems', 'results'))
env_name = 'DDAEnv'
model_name = 'PPO'
fname = os.path.join(in_dir, '%s_%s.zip' % (env_name, model_name))
# st.write('Loading model from: ', fname)
model = load_ppo(fname)
elif method == METHOD_TOPN:
min_ms1_intensity = 5000
N = 20 # from optimise_baselines.ipynb
rt_tol = 30 # from optimise_baselines.ipynb
params['env']['rt_tol'] = rt_tol
return N, min_ms1_intensity, model, params
@st.experimental_singleton
def load_ppo(fname):
model = PPO.load(fname)
return model
def run_simulation(N, chems, max_peaks, method, min_ms1_intensity, model, params):
env = DDAEnv(max_peaks, params)
obs = env.reset(chems=chems)
done = False
episode = Episode(obs)
with st.spinner('Wait for it...'):
while not done: # repeat until episode is done
# select an action depending on the observation and method
action, action_probs = pick_action(
method, obs, model, env.features, N, min_ms1_intensity)
# make one step through the simulation
obs, reward, done, info = env.step(action)
# FIXME: seems to slow the simulation a lot!
# image = env.render(mode='rgb_array')
# store new episodic information
if obs is not None:
episode.add_step_data(action, action_probs, obs, reward, info)
if episode.num_steps % 500 == 0:
st.write('Step\t', episode.num_steps, '\tTotal reward\t',
episode.get_total_rewards())
# if episode is finished, break
if done:
msg = f'Episode stored into session: {episode.num_steps} timesteps ' \
f'with total reward {episode.get_total_rewards()}'
st.success(msg)
break
return episode
| 30.971631 | 95 | 0.615068 | import os
import sys
import numpy as np
import streamlit as st
from stable_baselines3 import PPO
from vimms.ChemicalSamplers import UniformRTAndIntensitySampler, GaussianChromatogramSampler, \
UniformMZFormulaSampler
from vimms.Common import POSITIVE
from vimms_gym.common import METHOD_PPO, METHOD_TOPN
sys.path.append('..')
from vimms_gym.env import DDAEnv
from vimms_gym.evaluation import Episode, pick_action
@st.experimental_memo
def preset_1():
n_chemicals = (2000, 5000)
mz_range = (100, 600)
rt_range = (200, 1000)
intensity_range = (1E4, 1E10)
min_mz = mz_range[0]
max_mz = mz_range[1]
min_rt = rt_range[0]
max_rt = rt_range[1]
min_log_intensity = np.log(intensity_range[0])
max_log_intensity = np.log(intensity_range[1])
isolation_window = 0.7
rt_tol = 120
mz_tol = 10
ionisation_mode = POSITIVE
enable_spike_noise = True
noise_density = 0.1
noise_max_val = 1E3
mz_sampler = UniformMZFormulaSampler(min_mz=min_mz, max_mz=max_mz)
ri_sampler = UniformRTAndIntensitySampler(min_rt=min_rt, max_rt=max_rt,
min_log_intensity=min_log_intensity,
max_log_intensity=max_log_intensity)
cr_sampler = GaussianChromatogramSampler()
params = {
'chemical_creator': {
'mz_range': mz_range,
'rt_range': rt_range,
'intensity_range': intensity_range,
'n_chemicals': n_chemicals,
'mz_sampler': mz_sampler,
'ri_sampler': ri_sampler,
'cr_sampler': cr_sampler,
},
'noise': {
'enable_spike_noise': enable_spike_noise,
'noise_density': noise_density,
'noise_max_val': noise_max_val,
'mz_range': mz_range
},
'env': {
'ionisation_mode': ionisation_mode,
'rt_range': rt_range,
'isolation_window': isolation_window,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
}
}
return params
@st.experimental_memo
def preset_2():
return None
def load_model_and_params(method, params):
params = dict(params)
model = None
N = None
min_ms1_intensity = None
if method == METHOD_PPO:
in_dir = os.path.abspath(os.path.join('..', 'notebooks', 'simulated_chems', 'results'))
env_name = 'DDAEnv'
model_name = 'PPO'
fname = os.path.join(in_dir, '%s_%s.zip' % (env_name, model_name))
model = load_ppo(fname)
elif method == METHOD_TOPN:
min_ms1_intensity = 5000
N = 20
rt_tol = 30
params['env']['rt_tol'] = rt_tol
return N, min_ms1_intensity, model, params
@st.experimental_singleton
def load_ppo(fname):
model = PPO.load(fname)
return model
def run_simulation(N, chems, max_peaks, method, min_ms1_intensity, model, params):
env = DDAEnv(max_peaks, params)
obs = env.reset(chems=chems)
done = False
episode = Episode(obs)
with st.spinner('Wait for it...'):
while not done:
action, action_probs = pick_action(
method, obs, model, env.features, N, min_ms1_intensity)
obs, reward, done, info = env.step(action)
if obs is not None:
episode.add_step_data(action, action_probs, obs, reward, info)
if episode.num_steps % 500 == 0:
st.write('Step\t', episode.num_steps, '\tTotal reward\t',
episode.get_total_rewards())
if done:
msg = f'Episode stored into session: {episode.num_steps} timesteps ' \
f'with total reward {episode.get_total_rewards()}'
st.success(msg)
break
return episode
| true | true |
f72569bc15c637ff38eb1d46aa97e5e9e29a5669 | 1,368 | py | Python | alexa keyword crawler.py | SRMSE/keyword-crawler | fa97ed1c5c3252eb24dbaaa495fb9ad98e1d7c37 | [
"MIT"
] | 1 | 2017-08-23T23:47:32.000Z | 2017-08-23T23:47:32.000Z | alexa keyword crawler.py | SRMSE/keyword-crawler | fa97ed1c5c3252eb24dbaaa495fb9ad98e1d7c37 | [
"MIT"
] | null | null | null | alexa keyword crawler.py | SRMSE/keyword-crawler | fa97ed1c5c3252eb24dbaaa495fb9ad98e1d7c37 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup as b
from pymongo import MongoClient
import time
from multiprocessing import Pool
url = "http://www.alexa.com/siteinfo/"
file = open("filtered-domains.txt",'r')
client = MongoClient(connect=False)
db = client.alexa
keyword = db.keyword
bcolors={
"HEADER" : '\033[95m',
"INFO" : '\033[94m',
"SUCCESS" : '\033[92m',
"WARNING" : '\033[93m',
"FAIL" : '\033[91m',
"ENDC" : '\033[0m',
"BOLD" : '\033[1m',
"UNDERLINE" : '\033[4m'
}
def put(msg,type):
print bcolors[type.upper()] + ""+"["+time.asctime( time.localtime(time.time()) )+"]\t["+type.strip().capitalize()+"]\t"+str(msg)+"" + bcolors["ENDC"]
def soup(domain,link):
try:
tags = []
table = domain.find("table",{"id":"keywords_top_keywords_table"}).find("tbody").findAll("td")
for i in range(len(table)):
if i%2 == 0:
tags.append(table[i].findAll("span")[1].text.encode('utf-8'))
put("found all tags of "+link,"INFO")
return tags
except Excption as e:
put(e,"WARNING")
def main(line):
try:
tags = soup(b(requests.get((url+line).strip()).content,"lxml"),line)
dic ={}
dic[line.strip()] = tags
put(dic,"SUCCESS")
keyword.insert(dic, check_keys=False)
put(line.strip()+" added to MongoClient","ENDC")
except Exception as e:
put(e,"FAIL")
if __name__ == "__main__":
p = Pool(50)
main(p.map(main, file)) | 26.307692 | 150 | 0.633772 | import requests
from bs4 import BeautifulSoup as b
from pymongo import MongoClient
import time
from multiprocessing import Pool
url = "http://www.alexa.com/siteinfo/"
file = open("filtered-domains.txt",'r')
client = MongoClient(connect=False)
db = client.alexa
keyword = db.keyword
bcolors={
"HEADER" : '\033[95m',
"INFO" : '\033[94m',
"SUCCESS" : '\033[92m',
"WARNING" : '\033[93m',
"FAIL" : '\033[91m',
"ENDC" : '\033[0m',
"BOLD" : '\033[1m',
"UNDERLINE" : '\033[4m'
}
def put(msg,type):
print bcolors[type.upper()] + ""+"["+time.asctime( time.localtime(time.time()) )+"]\t["+type.strip().capitalize()+"]\t"+str(msg)+"" + bcolors["ENDC"]
def soup(domain,link):
try:
tags = []
table = domain.find("table",{"id":"keywords_top_keywords_table"}).find("tbody").findAll("td")
for i in range(len(table)):
if i%2 == 0:
tags.append(table[i].findAll("span")[1].text.encode('utf-8'))
put("found all tags of "+link,"INFO")
return tags
except Excption as e:
put(e,"WARNING")
def main(line):
try:
tags = soup(b(requests.get((url+line).strip()).content,"lxml"),line)
dic ={}
dic[line.strip()] = tags
put(dic,"SUCCESS")
keyword.insert(dic, check_keys=False)
put(line.strip()+" added to MongoClient","ENDC")
except Exception as e:
put(e,"FAIL")
if __name__ == "__main__":
p = Pool(50)
main(p.map(main, file)) | false | true |
f7256a59d601a2c803274ba2986fc1dd01ff4e55 | 5,081 | py | Python | samples/add_nic_to_vm.py | jm66/pyvmomi-community-samples | 5ca4a50b767500e07b9bce9fba70240bfa963a4e | [
"Apache-2.0"
] | 4 | 2019-05-27T23:36:34.000Z | 2020-11-12T17:08:04.000Z | samples/add_nic_to_vm.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 12 | 2019-04-17T02:47:25.000Z | 2021-04-02T09:15:37.000Z | samples/add_nic_to_vm.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 15 | 2018-04-26T05:18:12.000Z | 2021-11-06T04:44:58.000Z | #!/usr/bin/env python
"""
Written by nickcooper-zhangtonghao
Github: https://github.com/nickcooper-zhangtonghao
Email: nickcooper-zhangtonghao@opencloud.tech
Note: Example code For testing purposes only
This code has been released under the terms of the Apache-2.0 license
http://opensource.org/licenses/Apache-2.0
"""
from pyVmomi import vim
from pyVmomi import vmodl
from tools import tasks
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
import atexit
import argparse
import getpass
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('--no-ssl',
action='store_true',
help='Skip client SSL verification')
parser.add_argument('-v', '--vm-name',
required=False,
action='store',
help='name of the vm')
parser.add_argument('--uuid',
required=False,
action='store',
help='vmuuid of vm')
parser.add_argument('--port-group',
required=True,
action='store',
help='port group to connect on')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password')
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def add_nic(si, vm, network_name):
"""
:param si: Service Instance
:param vm: Virtual Machine Object
:param network_name: Name of the Virtual Network
"""
spec = vim.vm.ConfigSpec()
nic_changes = []
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualE1000()
nic_spec.device.deviceInfo = vim.Description()
nic_spec.device.deviceInfo.summary = 'vCenter API test'
content = si.RetrieveContent()
network = get_obj(content, [vim.Network], network_name)
if isinstance(network, vim.OpaqueNetwork):
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
nic_spec.device.backing.opaqueNetworkType = \
network.summary.opaqueNetworkType
nic_spec.device.backing.opaqueNetworkId = \
network.summary.opaqueNetworkId
else:
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_spec.device.backing.useAutoDetect = False
nic_spec.device.backing.deviceName = network
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.connectable.connected = False
nic_spec.device.connectable.status = 'untried'
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = 'assigned'
nic_changes.append(nic_spec)
spec.deviceChange = nic_changes
e = vm.ReconfigVM_Task(spec=spec)
print("NIC CARD ADDED")
def main():
args = get_args()
# connect this thing
serviceInstance = None
if args.no_ssl:
serviceInstance = SmartConnectNoSSL(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
else:
serviceInstance = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
# disconnect this thing
atexit.register(Disconnect, serviceInstance)
vm = None
if args.uuid:
search_index = serviceInstance.content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
content = serviceInstance.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_nic(serviceInstance, vm, args.port_group)
else:
print("VM not found")
# start this thing
if __name__ == "__main__":
main()
| 30.244048 | 75 | 0.605196 |
from pyVmomi import vim
from pyVmomi import vmodl
from tools import tasks
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
import atexit
import argparse
import getpass
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('--no-ssl',
action='store_true',
help='Skip client SSL verification')
parser.add_argument('-v', '--vm-name',
required=False,
action='store',
help='name of the vm')
parser.add_argument('--uuid',
required=False,
action='store',
help='vmuuid of vm')
parser.add_argument('--port-group',
required=True,
action='store',
help='port group to connect on')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password')
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def add_nic(si, vm, network_name):
spec = vim.vm.ConfigSpec()
nic_changes = []
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualE1000()
nic_spec.device.deviceInfo = vim.Description()
nic_spec.device.deviceInfo.summary = 'vCenter API test'
content = si.RetrieveContent()
network = get_obj(content, [vim.Network], network_name)
if isinstance(network, vim.OpaqueNetwork):
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
nic_spec.device.backing.opaqueNetworkType = \
network.summary.opaqueNetworkType
nic_spec.device.backing.opaqueNetworkId = \
network.summary.opaqueNetworkId
else:
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_spec.device.backing.useAutoDetect = False
nic_spec.device.backing.deviceName = network
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.connectable.connected = False
nic_spec.device.connectable.status = 'untried'
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = 'assigned'
nic_changes.append(nic_spec)
spec.deviceChange = nic_changes
e = vm.ReconfigVM_Task(spec=spec)
print("NIC CARD ADDED")
def main():
args = get_args()
serviceInstance = None
if args.no_ssl:
serviceInstance = SmartConnectNoSSL(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
else:
serviceInstance = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
atexit.register(Disconnect, serviceInstance)
vm = None
if args.uuid:
search_index = serviceInstance.content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
content = serviceInstance.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_nic(serviceInstance, vm, args.port_group)
else:
print("VM not found")
if __name__ == "__main__":
main()
| true | true |
f7256ae570b6c12768a5d1a994314b4d86c179d6 | 894 | py | Python | modules/signatures/windows/antiav_srp.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 24 | 2021-06-21T07:35:37.000Z | 2022-03-22T03:33:59.000Z | modules/signatures/windows/antiav_srp.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | modules/signatures/windows/antiav_srp.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 6 | 2021-06-22T05:32:57.000Z | 2022-02-11T02:05:45.000Z | # Copyright (C) 2014 Optiv, Inc. (brad.spengler@optiv.com), Updated 2016 for cuckoo 2.0
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class AntiAVSRP(Signature):
name = "antiav_srp"
description = "Modifies Software Restriction Policies likely to cripple AV"
severity = 3
categories = ["anti-av"]
authors = ["Optiv"]
minimum = "2.0"
ttp = ["T1089"]
regkeys_re = [
".*\\\\Policies\\\\Microsoft\\\\Windows\\\\Safer\\\\\CodeIdentifiers\\\\0\\\\Paths\\\\.*",
]
def on_complete(self):
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, actions=["regkey_written"], all=True):
self.mark_ioc("registry", regkey)
return self.has_marks()
| 34.384615 | 110 | 0.651007 |
from lib.cuckoo.common.abstracts import Signature
class AntiAVSRP(Signature):
name = "antiav_srp"
description = "Modifies Software Restriction Policies likely to cripple AV"
severity = 3
categories = ["anti-av"]
authors = ["Optiv"]
minimum = "2.0"
ttp = ["T1089"]
regkeys_re = [
".*\\\\Policies\\\\Microsoft\\\\Windows\\\\Safer\\\\\CodeIdentifiers\\\\0\\\\Paths\\\\.*",
]
def on_complete(self):
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, actions=["regkey_written"], all=True):
self.mark_ioc("registry", regkey)
return self.has_marks()
| true | true |
f7256b2ee02db620d84d3addaae0bc4e05297053 | 1,900 | py | Python | 06_Transformacoes_do_Conjunto_de_Dados/6.6_Projecao_Aleatoria/6.6.1._O_Lema_de_Johnson-Lindenstrauss.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | 06_Transformacoes_do_Conjunto_de_Dados/6.6_Projecao_Aleatoria/6.6.1._O_Lema_de_Johnson-Lindenstrauss.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | 06_Transformacoes_do_Conjunto_de_Dados/6.6_Projecao_Aleatoria/6.6.1._O_Lema_de_Johnson-Lindenstrauss.py | BrunoBertti/Scikit_Learning | 4b9e10ff7909f3728ac1e8bba19f5fd779340bc4 | [
"MIT"
] | null | null | null | ########## 6.6.1. O lema de Johnson-Lindenstrauss ##########
# O principal resultado teórico por trás da eficiência da projeção aleatória é o lema de Johnson-Lindenstrauss (citando a Wikipedia):
# Em matemática, o lema de Johnson-Lindenstrauss é um resultado sobre embeddings de baixa distorção de pontos de alta dimensão em espaço euclidiano de baixa dimensão. O lema afirma que um pequeno conjunto de pontos em um espaço de alta dimensão pode ser incorporado em um espaço de dimensão muito menor de tal forma que as distâncias entre os pontos sejam praticamente preservadas. O mapa usado para a incorporação é pelo menos Lipschitz, e pode até ser considerado uma projeção ortogonal.
# Conhecendo apenas o número de amostras, o johnson_lindenstrauss_min_dim estima conservadoramente o tamanho mínimo do subespaço aleatório para garantir uma distorção limitada introduzida pela projeção aleatória:
from sklearn.random_projection import johnson_lindenstrauss_min_dim
johnson_lindenstrauss_min_dim(n_samples=1e6, eps=0.5)
johnson_lindenstrauss_min_dim(n_samples=1e6, eps=[0.5, 0.1, 0.01])
johnson_lindenstrauss_min_dim(n_samples=[1e4, 1e5, 1e6], eps=0.1)
# https://scikit-learn.org/stable/auto_examples/miscellaneous/plot_johnson_lindenstrauss_bound.html
## Exemplos:
## See The Johnson-Lindenstrauss bound for embedding with random projections for a theoretical explication on the Johnson-Lindenstrauss lemma and an empirical validation using sparse random matrices. (https://scikit-learn.org/stable/auto_examples/miscellaneous/plot_johnson_lindenstrauss_bound.html#sphx-glr-auto-examples-miscellaneous-plot-johnson-lindenstrauss-bound-py)
## Referências:
## Sanjoy Dasgupta and Anupam Gupta, 1999. An elementary proof of the Johnson-Lindenstrauss Lemma. ( http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.39.3334&rep=rep1&type=pdf) | 52.777778 | 493 | 0.792105 | true | true | |
f7256c0b18316e9401a8678074fb2dce8d2668b5 | 2,659 | py | Python | ddsp/training/preprocessing.py | jesseengel/ddsp | de195af0a21fba52e6b88c23886c244d8607e49c | [
"Apache-2.0"
] | 7 | 2020-01-18T13:12:52.000Z | 2021-06-24T20:32:19.000Z | ddsp/training/preprocessing.py | jesseengel/ddsp | de195af0a21fba52e6b88c23886c244d8607e49c | [
"Apache-2.0"
] | null | null | null | ddsp/training/preprocessing.py | jesseengel/ddsp | de195af0a21fba52e6b88c23886c244d8607e49c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of preprocess functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import ddsp
import gin
import tensorflow.compat.v1 as tf
hz_to_midi = ddsp.core.hz_to_midi
F0_RANGE = ddsp.spectral_ops.F0_RANGE
LD_RANGE = ddsp.spectral_ops.LD_RANGE
# ---------------------- Preprocess Helpers ------------------------------------
def at_least_3d(x):
"""Adds a channel dimension."""
return x[:, :, tf.newaxis] if len(x.shape) == 2 else x
# ---------------------- Preprocess objects ------------------------------------
class Preprocessor(object):
"""Base class for chaining a series of preprocessing functions."""
def __init__(self):
pass
def __call__(self, features, training=True):
"""Get outputs after preprocessing functions.
Args:
features: dict of feature key and tensors
training: boolean for controlling training-specfic preprocessing behavior
Returns:
Dictionary of transformed features
"""
return copy.copy(features)
@gin.register
class DefaultPreprocessor(Preprocessor):
"""Default class that resamples features and adds `f0_hz` key."""
def __init__(self, time_steps=1000):
super(DefaultPreprocessor, self).__init__()
self.time_steps = time_steps
def __call__(self, features, training=True):
super(DefaultPreprocessor, self).__call__(features, training)
return self._default_processing(features)
def _default_processing(self, features):
"""Always resample to `time_steps` and scale 'loudness_db' and 'f0_hz'."""
for k in ['loudness_db', 'f0_hz']:
features[k] = ddsp.core.resample(features[k], n_timesteps=self.time_steps)
features[k] = at_least_3d(features[k])
# For NN training, scale frequency and loudness to the range [0, 1].
# Log-scale f0 features. Loudness from [-1, 0] to [1, 0].
features['f0_scaled'] = hz_to_midi(features['f0_hz']) / F0_RANGE
features['ld_scaled'] = (features['loudness_db'] / LD_RANGE) + 1.0
return features
| 32.426829 | 80 | 0.699511 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import ddsp
import gin
import tensorflow.compat.v1 as tf
hz_to_midi = ddsp.core.hz_to_midi
F0_RANGE = ddsp.spectral_ops.F0_RANGE
LD_RANGE = ddsp.spectral_ops.LD_RANGE
def at_least_3d(x):
return x[:, :, tf.newaxis] if len(x.shape) == 2 else x
class Preprocessor(object):
def __init__(self):
pass
def __call__(self, features, training=True):
return copy.copy(features)
@gin.register
class DefaultPreprocessor(Preprocessor):
def __init__(self, time_steps=1000):
super(DefaultPreprocessor, self).__init__()
self.time_steps = time_steps
def __call__(self, features, training=True):
super(DefaultPreprocessor, self).__call__(features, training)
return self._default_processing(features)
def _default_processing(self, features):
for k in ['loudness_db', 'f0_hz']:
features[k] = ddsp.core.resample(features[k], n_timesteps=self.time_steps)
features[k] = at_least_3d(features[k])
features['f0_scaled'] = hz_to_midi(features['f0_hz']) / F0_RANGE
features['ld_scaled'] = (features['loudness_db'] / LD_RANGE) + 1.0
return features
| true | true |
f7256c23b14e3bf3204687769f2e778d96ed7ed4 | 2,818 | py | Python | MA cross.py | 0xTDF/Quant-Trading-Strategy-Backtesting-Framework | d77089bab3513013d456819e9790e67e44adec8e | [
"MIT"
] | 1 | 2022-03-25T07:50:15.000Z | 2022-03-25T07:50:15.000Z | MA cross.py | Elisik/Quant-Trading-Strategy-Backtesting-Framework | d77089bab3513013d456819e9790e67e44adec8e | [
"MIT"
] | null | null | null | MA cross.py | Elisik/Quant-Trading-Strategy-Backtesting-Framework | d77089bab3513013d456819e9790e67e44adec8e | [
"MIT"
] | null | null | null | import backtrader as bt
import backtrader.analyzers as bta
from datetime import datetime
import matplotlib.pyplot as plt
import yfinance
class MaCrossStrategy(bt.Strategy):
# signal generator
def __init__(self):
ma_fast = bt.ind.SMA(period = 10)
ma_slow = bt.ind.SMA(period = 20)
self.crossover = bt.ind.CrossOver(ma_fast, ma_slow)
# executes order from the signals
def next(self):
if not self.position:
if self.crossover > 0:
self.buy()
elif self.crossover < 0:
self.close()
cerebro = bt.Cerebro()
# pulls price data from yahoo finance
data = bt.feeds.YahooFinanceCSVData(dataname='BTC-USD.csv')
# converts to log chart
data.plotinfo.plotlog = True
# adds data to engine
cerebro.adddata(data)
# adds strategy to engine
cerebro.addstrategy(MaCrossStrategy)
# sets starting capital
cerebro.broker.setcash(1000.0)
# sets size per trade
cerebro.addsizer(bt.sizers.PercentSizer, percents = 10)
# analysis
cerebro.addanalyzer(bta.SharpeRatio, _name = "sharpe")
cerebro.addanalyzer(bta.Transactions, _name = "trans")
cerebro.addanalyzer(bta.TradeAnalyzer, _name = "trades")
# runs back test
back = cerebro.run()
print(cerebro.broker.getvalue())
# useful output data
sharpeRatio = back[0].analyzers.sharpe.get_analysis()
print(sharpeRatio)
transactions = back[0].analyzers.trans.get_analysis()
#print(transactions)
tradeAnalyzer = back[0].analyzers.trades.get_analysis()
#print(tradeAnalyzer)
# colour scheme of plot
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (10, 6)
plt.rcParams['lines.linewidth'] = 1
SIZE = 7
plt.rcParams['axes.labelsize'] = SIZE
plt.rcParams['ytick.labelsize'] = SIZE
plt.rcParams['xtick.labelsize'] = SIZE
plt.rcParams["font.size"] = SIZE
COLOR = '1'
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
plt.rcParams['grid.linewidth']=0.1
plt.rcParams['grid.color']="#101622"
plt.rcParams['lines.color']="0.5"
plt.rcParams['axes.edgecolor']="0.2"
plt.rcParams['axes.linewidth']=0.5
plt.rcParams['figure.facecolor']="#101622"
plt.rcParams['axes.facecolor']="#101622"
plt.rcParams["savefig.dpi"]=120
dpi = plt.rcParams["savefig.dpi"]
width = 1080
height = 1920
plt.rcParams['figure.figsize'] = height/dpi, width/dpi
plt.rcParams["savefig.facecolor"] ="#101622"
plt.rcParams["savefig.edgecolor"]="#101622"
plt.rcParams['legend.fontsize'] = SIZE
plt.rcParams['legend.title_fontsize'] = SIZE + 1
plt.rcParams['legend.labelspacing'] =0.25
plt.rcParams['image.cmap']='tab10'
cerebro.plot(style = 'candle',barup='white', bardown='#1973c2',volume = False)
plt.show()
| 26.584906 | 79 | 0.694109 | import backtrader as bt
import backtrader.analyzers as bta
from datetime import datetime
import matplotlib.pyplot as plt
import yfinance
class MaCrossStrategy(bt.Strategy):
def __init__(self):
ma_fast = bt.ind.SMA(period = 10)
ma_slow = bt.ind.SMA(period = 20)
self.crossover = bt.ind.CrossOver(ma_fast, ma_slow)
def next(self):
if not self.position:
if self.crossover > 0:
self.buy()
elif self.crossover < 0:
self.close()
cerebro = bt.Cerebro()
data = bt.feeds.YahooFinanceCSVData(dataname='BTC-USD.csv')
data.plotinfo.plotlog = True
cerebro.adddata(data)
cerebro.addstrategy(MaCrossStrategy)
cerebro.broker.setcash(1000.0)
cerebro.addsizer(bt.sizers.PercentSizer, percents = 10)
cerebro.addanalyzer(bta.SharpeRatio, _name = "sharpe")
cerebro.addanalyzer(bta.Transactions, _name = "trans")
cerebro.addanalyzer(bta.TradeAnalyzer, _name = "trades")
back = cerebro.run()
print(cerebro.broker.getvalue())
sharpeRatio = back[0].analyzers.sharpe.get_analysis()
print(sharpeRatio)
transactions = back[0].analyzers.trans.get_analysis()
tradeAnalyzer = back[0].analyzers.trades.get_analysis()
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (10, 6)
plt.rcParams['lines.linewidth'] = 1
SIZE = 7
plt.rcParams['axes.labelsize'] = SIZE
plt.rcParams['ytick.labelsize'] = SIZE
plt.rcParams['xtick.labelsize'] = SIZE
plt.rcParams["font.size"] = SIZE
COLOR = '1'
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
plt.rcParams['grid.linewidth']=0.1
plt.rcParams['grid.color']="#101622"
plt.rcParams['lines.color']="0.5"
plt.rcParams['axes.edgecolor']="0.2"
plt.rcParams['axes.linewidth']=0.5
plt.rcParams['figure.facecolor']="#101622"
plt.rcParams['axes.facecolor']="#101622"
plt.rcParams["savefig.dpi"]=120
dpi = plt.rcParams["savefig.dpi"]
width = 1080
height = 1920
plt.rcParams['figure.figsize'] = height/dpi, width/dpi
plt.rcParams["savefig.facecolor"] ="#101622"
plt.rcParams["savefig.edgecolor"]="#101622"
plt.rcParams['legend.fontsize'] = SIZE
plt.rcParams['legend.title_fontsize'] = SIZE + 1
plt.rcParams['legend.labelspacing'] =0.25
plt.rcParams['image.cmap']='tab10'
cerebro.plot(style = 'candle',barup='white', bardown='#1973c2',volume = False)
plt.show()
| true | true |
f7256d283ba52ce9290a4bd6ce811edcc7a1208a | 8,082 | py | Python | juriscraper/opinions/united_states_backscrapers/federal_appellate/ca5.py | drewsilcock/juriscraper | 706a05f739e10f22b81b9bb16767415d810e49d1 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states_backscrapers/federal_appellate/ca5.py | drewsilcock/juriscraper | 706a05f739e10f22b81b9bb16767415d810e49d1 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states_backscrapers/federal_appellate/ca5.py | drewsilcock/juriscraper | 706a05f739e10f22b81b9bb16767415d810e49d1 | [
"BSD-2-Clause"
] | null | null | null | from lxml import html
from datetime import datetime, timedelta, date
from dateutil.rrule import DAILY, rrule
from selenium.common.exceptions import NoSuchElementException
from juriscraper.AbstractSite import logger
from juriscraper.OpinionSiteWebDriven import OpinionSiteWebDriven
class Site(OpinionSiteWebDriven):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = "http://www.ca5.uscourts.gov/electronic-case-filing/case-information/current-opinions"
self.court_id = self.__module__
self.interval = 5
self.case_date = datetime.today()
self.back_scrape_iterable = [
i.date()
for i in rrule(
DAILY,
interval=self.interval, # Every interval days
dtstart=date(1992, 5, 14),
until=date(2015, 1, 1),
)
]
self.uses_selenium = True
def _download(self, request_dict={}):
if self.test_mode_enabled():
html_tree_list = [
super(Site, self)._download(request_dict=request_dict)
]
self.records_nr = len(
html_tree_list[0].xpath(
"//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')]"
)
)
return html_tree_list
else:
logger.info("Running Selenium browser...")
self.initiate_webdriven_session()
self.wait_for_id("ctl00_Body_C010_ctl00_ctl00_endDate_dateInput")
start_date = self.webdriver.find_element_by_id(
"ctl00_Body_C010_ctl00_ctl00_startDate_dateInput"
)
start_date.send_keys(
(self.case_date - timedelta(days=self.interval)).strftime(
"%m/%d/%Y"
)
)
end_date = self.webdriver.find_element_by_id(
"ctl00_Body_C010_ctl00_ctl00_endDate_dateInput"
)
end_date.send_keys(self.case_date.strftime("%m/%d/%Y"))
submit = self.webdriver.find_element_by_id(
"Body_C010_ctl00_ctl00_btnSearch"
)
submit.click()
self.wait_for_id(
"ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00"
)
self.status = 200
try:
nr_of_pages = self.webdriver.find_element_by_xpath(
'//div[contains(concat(" ", @class, " "), "rgInfoPart")]/strong[2]'
)
records_nr = self.webdriver.find_element_by_xpath(
'//div[contains(concat(" ", @class, " "), "rgInfoPart")]/strong[1]'
)
self.records_nr = int(records_nr.text)
nr_of_pages = int(nr_of_pages.text)
except NoSuchElementException:
try:
self.records_nr = len(
self.webdriver.find_elements_by_xpath(
"//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')]"
)
)
nr_of_pages = 1
except NoSuchElementException:
self.webdriver.quit()
return []
html_pages = []
logger.info(
"records: {}, pages: {}".format(self.records_nr, nr_of_pages)
)
if nr_of_pages == 1:
text = self.webdriver.page_source
self.webdriver.quit()
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
else:
logger.info(
"Paginating through %s pages of results." % nr_of_pages
)
logger.info(" Getting page 1")
text = self.webdriver.page_source
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
for i in range(nr_of_pages - 1):
logger.info(" Getting page %s" % (i + 2))
next_page = self.webdriver.find_element_by_class_name(
"rgPageNext"
)
next_page.click()
self.webdriver.implicitly_wait(5)
text = self.webdriver.page_source
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
self.webdriver.quit()
return html_pages
def _get_case_names(self):
case_names = []
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[4]/text()".format(
n=record
)
case_names.append(html_tree.xpath(path)[0])
return case_names
def _get_download_urls(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[2]/a/@href".format(
n=record
)
yield html_tree.xpath(path)[0]
def _get_case_dates(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[3]/text()".format(
n=record
)
yield datetime.strptime(html_tree.xpath(path)[0], "%m/%d/%Y")
def _get_docket_numbers(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[2]/a/text()".format(
n=record
)
yield html_tree.xpath(path)[0]
def _get_precedential_statuses(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[5]/text()".format(
n=record
)
yield "Unpublished" if "unpub" in html_tree.xpath(path)[
0
] else "Published"
@staticmethod
def _get_opinion_count(html_tree):
return int(
html_tree.xpath(
"count(//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')])"
)
)
def _download_backwards(self, d):
self.case_date = d
logger.info(
"Running backscraper with date range: %s to %s"
% (
self.case_date - timedelta(days=self.interval),
self.case_date,
)
)
self.html = self._download()
if self.html is not None:
# Setting status is important because it prevents the download
# function from being run a second time by the parse method.
self.status = 200
| 39.23301 | 118 | 0.548627 | from lxml import html
from datetime import datetime, timedelta, date
from dateutil.rrule import DAILY, rrule
from selenium.common.exceptions import NoSuchElementException
from juriscraper.AbstractSite import logger
from juriscraper.OpinionSiteWebDriven import OpinionSiteWebDriven
class Site(OpinionSiteWebDriven):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = "http://www.ca5.uscourts.gov/electronic-case-filing/case-information/current-opinions"
self.court_id = self.__module__
self.interval = 5
self.case_date = datetime.today()
self.back_scrape_iterable = [
i.date()
for i in rrule(
DAILY,
interval=self.interval,
dtstart=date(1992, 5, 14),
until=date(2015, 1, 1),
)
]
self.uses_selenium = True
def _download(self, request_dict={}):
if self.test_mode_enabled():
html_tree_list = [
super(Site, self)._download(request_dict=request_dict)
]
self.records_nr = len(
html_tree_list[0].xpath(
"//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')]"
)
)
return html_tree_list
else:
logger.info("Running Selenium browser...")
self.initiate_webdriven_session()
self.wait_for_id("ctl00_Body_C010_ctl00_ctl00_endDate_dateInput")
start_date = self.webdriver.find_element_by_id(
"ctl00_Body_C010_ctl00_ctl00_startDate_dateInput"
)
start_date.send_keys(
(self.case_date - timedelta(days=self.interval)).strftime(
"%m/%d/%Y"
)
)
end_date = self.webdriver.find_element_by_id(
"ctl00_Body_C010_ctl00_ctl00_endDate_dateInput"
)
end_date.send_keys(self.case_date.strftime("%m/%d/%Y"))
submit = self.webdriver.find_element_by_id(
"Body_C010_ctl00_ctl00_btnSearch"
)
submit.click()
self.wait_for_id(
"ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00"
)
self.status = 200
try:
nr_of_pages = self.webdriver.find_element_by_xpath(
'//div[contains(concat(" ", @class, " "), "rgInfoPart")]/strong[2]'
)
records_nr = self.webdriver.find_element_by_xpath(
'//div[contains(concat(" ", @class, " "), "rgInfoPart")]/strong[1]'
)
self.records_nr = int(records_nr.text)
nr_of_pages = int(nr_of_pages.text)
except NoSuchElementException:
try:
self.records_nr = len(
self.webdriver.find_elements_by_xpath(
"//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')]"
)
)
nr_of_pages = 1
except NoSuchElementException:
self.webdriver.quit()
return []
html_pages = []
logger.info(
"records: {}, pages: {}".format(self.records_nr, nr_of_pages)
)
if nr_of_pages == 1:
text = self.webdriver.page_source
self.webdriver.quit()
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
else:
logger.info(
"Paginating through %s pages of results." % nr_of_pages
)
logger.info(" Getting page 1")
text = self.webdriver.page_source
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
for i in range(nr_of_pages - 1):
logger.info(" Getting page %s" % (i + 2))
next_page = self.webdriver.find_element_by_class_name(
"rgPageNext"
)
next_page.click()
self.webdriver.implicitly_wait(5)
text = self.webdriver.page_source
html_tree = html.fromstring(text)
html_tree.make_links_absolute(self.url)
remove_anchors = lambda url: url.split("#")[0]
html_tree.rewrite_links(remove_anchors)
html_pages.append(html_tree)
self.webdriver.quit()
return html_pages
def _get_case_names(self):
case_names = []
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[4]/text()".format(
n=record
)
case_names.append(html_tree.xpath(path)[0])
return case_names
def _get_download_urls(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[2]/a/@href".format(
n=record
)
yield html_tree.xpath(path)[0]
def _get_case_dates(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[3]/text()".format(
n=record
)
yield datetime.strptime(html_tree.xpath(path)[0], "%m/%d/%Y")
def _get_docket_numbers(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[2]/a/text()".format(
n=record
)
yield html_tree.xpath(path)[0]
def _get_precedential_statuses(self):
for html_tree in self.html:
page_records_count = self._get_opinion_count(html_tree)
for record in range(page_records_count):
path = "id('ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00__{n}')/td[5]/text()".format(
n=record
)
yield "Unpublished" if "unpub" in html_tree.xpath(path)[
0
] else "Published"
@staticmethod
def _get_opinion_count(html_tree):
return int(
html_tree.xpath(
"count(//tr[contains(concat('', @id, ''), 'ctl00_Body_C010_ctl00_ctl00_radGridOpinions_ctl00')])"
)
)
def _download_backwards(self, d):
self.case_date = d
logger.info(
"Running backscraper with date range: %s to %s"
% (
self.case_date - timedelta(days=self.interval),
self.case_date,
)
)
self.html = self._download()
if self.html is not None:
self.status = 200
| true | true |
f7256de9d9a438b0f395aac6da42babe3f3800f4 | 11,937 | py | Python | tests/integration/test_polymorphic_parts/test.py | monadbobo/ClickHouse | 73b0f8db8c327a1d63cc7ebcc56087a3f9866dae | [
"Apache-2.0"
] | 3 | 2021-09-14T08:36:18.000Z | 2022-02-24T02:55:38.000Z | tests/integration/test_polymorphic_parts/test.py | monadbobo/ClickHouse | 73b0f8db8c327a1d63cc7ebcc56087a3f9866dae | [
"Apache-2.0"
] | 1 | 2020-04-04T04:25:47.000Z | 2020-04-04T04:25:47.000Z | tests/integration/test_polymorphic_parts/test.py | monadbobo/ClickHouse | 73b0f8db8c327a1d63cc7ebcc56087a3f9866dae | [
"Apache-2.0"
] | 1 | 2020-05-18T11:31:48.000Z | 2020-05-18T11:31:48.000Z | import time
import pytest
import random
import string
from helpers.test_tools import TSV
from helpers.test_tools import assert_eq_with_retry
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
def get_random_array():
return [random.randint(0, 1000) % 1000 for _ in range(random.randint(0, 1000))]
def get_random_string():
length = random.randint(0, 1000)
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def insert_random_data(table, node, size):
data = [
'(' + ','.join((
"'2019-10-11'",
str(i),
"'" + get_random_string() + "'",
str(get_random_array()))) +
')' for i in range(size)
]
node.query("INSERT INTO {} VALUES {}".format(table, ','.join(data)))
def create_tables(name, nodes, node_settings, shard):
for i, (node, settings) in enumerate(zip(nodes, node_settings)):
node.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes},
min_rows_for_wide_part = {min_rows_for_wide_part}, min_bytes_for_wide_part = {min_bytes_for_wide_part}
'''.format(name=name, shard=shard, repl=i, **settings))
def create_tables_old_format(name, nodes, shard):
for i, node in enumerate(nodes):
node.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}', date, id, 64)
'''.format(name=name, shard=shard, repl=i))
node1 = cluster.add_instance('node1', config_dir="configs", with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", with_zookeeper=True)
settings_default = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
settings_not_adaptive = {'index_granularity' : 64, 'index_granularity_bytes' : 0, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
node3 = cluster.add_instance('node3', config_dir="configs", with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", main_configs=['configs/no_leader.xml'], with_zookeeper=True)
settings_compact = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
settings_wide = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 0, 'min_bytes_for_wide_part' : 0}
node5 = cluster.add_instance('node5', config_dir='configs', main_configs=['configs/compact_parts.xml'], with_zookeeper=True)
node6 = cluster.add_instance('node6', config_dir='configs', main_configs=['configs/compact_parts.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
create_tables('polymorphic_table', [node1, node2], [settings_default, settings_default], "shard1")
create_tables('non_adaptive_table', [node1, node2], [settings_not_adaptive, settings_default], "shard1")
create_tables('polymorphic_table_compact', [node3, node4], [settings_compact, settings_wide], "shard2")
create_tables('polymorphic_table_wide', [node3, node4], [settings_wide, settings_compact], "shard2")
create_tables_old_format('polymorphic_table', [node5, node6], "shard3")
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize(
('first_node', 'second_node'),
[
(node1, node2),
(node5, node6)
]
)
def test_polymorphic_parts_basics(start_cluster, first_node, second_node):
first_node.query("SYSTEM STOP MERGES")
second_node.query("SYSTEM STOP MERGES")
for size in [300, 300, 600]:
insert_random_data('polymorphic_table', first_node, size)
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "1200\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "1200\n"
expected = "Compact\t2\nWide\t1\n"
assert TSV(first_node.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)
assert TSV(second_node.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)
first_node.query("SYSTEM START MERGES")
second_node.query("SYSTEM START MERGES")
for _ in range(40):
insert_random_data('polymorphic_table', first_node, 10)
insert_random_data('polymorphic_table', second_node, 10)
first_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
first_node.query("OPTIMIZE TABLE polymorphic_table FINAL")
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert first_node.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n"
assert second_node.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n"
# Check alters and mutations also work
first_node.query("ALTER TABLE polymorphic_table ADD COLUMN ss String")
first_node.query("ALTER TABLE polymorphic_table UPDATE ss = toString(id) WHERE 1")
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
first_node.query("SELECT count(ss) FROM polymorphic_table") == "2000\n"
first_node.query("SELECT uniqExact(ss) FROM polymorphic_table") == "600\n"
second_node.query("SELECT count(ss) FROM polymorphic_table") == "2000\n"
second_node.query("SELECT uniqExact(ss) FROM polymorphic_table") == "600\n"
# Check that follower replicas create parts of the same type, which leader has chosen at merge.
@pytest.mark.parametrize(
('table', 'part_type'),
[
('polymorphic_table_compact', 'Compact'),
('polymorphic_table_wide', 'Wide')
]
)
def test_different_part_types_on_replicas(start_cluster, table, part_type):
leader = node3
follower = node4
assert leader.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "1\n"
assert node4.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "0\n"
for _ in range(3):
insert_random_data(table, leader, 100)
leader.query("OPTIMIZE TABLE {} FINAL".format(table))
follower.query("SYSTEM SYNC REPLICA {}".format(table), timeout=20)
expected = "{}\t1\n".format(part_type)
assert TSV(leader.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format(table))) == TSV(expected)
assert TSV(follower.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format(table))) == TSV(expected)
node7 = cluster.add_instance('node7', config_dir="configs", with_zookeeper=True, image='yandex/clickhouse-server:19.17.8.54', stay_alive=True, with_installed_binary=True)
node8 = cluster.add_instance('node8', config_dir="configs", with_zookeeper=True)
settings7 = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760}
settings8 = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
@pytest.fixture(scope="module")
def start_cluster_diff_versions():
try:
for name in ['polymorphic_table', 'polymorphic_table_2']:
cluster.start()
node7.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '1')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes}
'''.format(name=name, **settings7)
)
node8.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '2')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes},
min_rows_for_wide_part = {min_rows_for_wide_part}, min_bytes_for_wide_part = {min_bytes_for_wide_part}
'''.format(name=name, **settings8)
)
yield cluster
finally:
cluster.shutdown()
@pytest.mark.skip(reason="compatability is temporary broken")
def test_polymorphic_parts_diff_versions(start_cluster_diff_versions):
# Check that replication with Wide parts works between different versions.
node_old = node7
node_new = node8
insert_random_data('polymorphic_table', node7, 100)
node8.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert node8.query("SELECT count() FROM polymorphic_table") == "100\n"
assert node8.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' and active") == "Wide\n"
@pytest.mark.skip(reason="compatability is temporary broken")
def test_polymorphic_parts_diff_versions_2(start_cluster_diff_versions):
# Replication doesn't work on old version if part is created in compact format, because
# this version doesn't know anything about it. It's considered to be ok.
node_old = node7
node_new = node8
insert_random_data('polymorphic_table_2', node_new, 100)
assert node_new.query("SELECT count() FROM polymorphic_table_2") == "100\n"
assert node_old.query("SELECT count() FROM polymorphic_table_2") == "0\n"
with pytest.raises(Exception):
node_old.query("SYSTEM SYNC REPLICA polymorphic_table_2", timeout=3)
node_old.restart_with_latest_version()
node_old.query("SYSTEM SYNC REPLICA polymorphic_table_2", timeout=20)
# Works after update
assert node_old.query("SELECT count() FROM polymorphic_table_2") == "100\n"
assert node_old.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table_2' and active") == "Compact\n"
def test_polymorphic_parts_non_adaptive(start_cluster):
node1.query("SYSTEM STOP MERGES")
node2.query("SYSTEM STOP MERGES")
insert_random_data('non_adaptive_table', node1, 100)
node2.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20)
insert_random_data('non_adaptive_table', node2, 100)
node1.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20)
assert TSV(node1.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t2\n")
assert TSV(node2.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t2\n")
assert node1.contains_in_log("<Warning> default.non_adaptive_table: Table can't create parts with adaptive granularity")
| 45.387833 | 170 | 0.70981 | import time
import pytest
import random
import string
from helpers.test_tools import TSV
from helpers.test_tools import assert_eq_with_retry
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
def get_random_array():
return [random.randint(0, 1000) % 1000 for _ in range(random.randint(0, 1000))]
def get_random_string():
length = random.randint(0, 1000)
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def insert_random_data(table, node, size):
data = [
'(' + ','.join((
"'2019-10-11'",
str(i),
"'" + get_random_string() + "'",
str(get_random_array()))) +
')' for i in range(size)
]
node.query("INSERT INTO {} VALUES {}".format(table, ','.join(data)))
def create_tables(name, nodes, node_settings, shard):
for i, (node, settings) in enumerate(zip(nodes, node_settings)):
node.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes},
min_rows_for_wide_part = {min_rows_for_wide_part}, min_bytes_for_wide_part = {min_bytes_for_wide_part}
'''.format(name=name, shard=shard, repl=i, **settings))
def create_tables_old_format(name, nodes, shard):
for i, node in enumerate(nodes):
node.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}', date, id, 64)
'''.format(name=name, shard=shard, repl=i))
node1 = cluster.add_instance('node1', config_dir="configs", with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", with_zookeeper=True)
settings_default = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
settings_not_adaptive = {'index_granularity' : 64, 'index_granularity_bytes' : 0, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
node3 = cluster.add_instance('node3', config_dir="configs", with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", main_configs=['configs/no_leader.xml'], with_zookeeper=True)
settings_compact = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
settings_wide = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 0, 'min_bytes_for_wide_part' : 0}
node5 = cluster.add_instance('node5', config_dir='configs', main_configs=['configs/compact_parts.xml'], with_zookeeper=True)
node6 = cluster.add_instance('node6', config_dir='configs', main_configs=['configs/compact_parts.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
create_tables('polymorphic_table', [node1, node2], [settings_default, settings_default], "shard1")
create_tables('non_adaptive_table', [node1, node2], [settings_not_adaptive, settings_default], "shard1")
create_tables('polymorphic_table_compact', [node3, node4], [settings_compact, settings_wide], "shard2")
create_tables('polymorphic_table_wide', [node3, node4], [settings_wide, settings_compact], "shard2")
create_tables_old_format('polymorphic_table', [node5, node6], "shard3")
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize(
('first_node', 'second_node'),
[
(node1, node2),
(node5, node6)
]
)
def test_polymorphic_parts_basics(start_cluster, first_node, second_node):
first_node.query("SYSTEM STOP MERGES")
second_node.query("SYSTEM STOP MERGES")
for size in [300, 300, 600]:
insert_random_data('polymorphic_table', first_node, size)
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "1200\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "1200\n"
expected = "Compact\t2\nWide\t1\n"
assert TSV(first_node.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)
assert TSV(second_node.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)
first_node.query("SYSTEM START MERGES")
second_node.query("SYSTEM START MERGES")
for _ in range(40):
insert_random_data('polymorphic_table', first_node, 10)
insert_random_data('polymorphic_table', second_node, 10)
first_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
first_node.query("OPTIMIZE TABLE polymorphic_table FINAL")
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert first_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert second_node.query("SELECT count() FROM polymorphic_table") == "2000\n"
assert first_node.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n"
assert second_node.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n"
first_node.query("ALTER TABLE polymorphic_table ADD COLUMN ss String")
first_node.query("ALTER TABLE polymorphic_table UPDATE ss = toString(id) WHERE 1")
second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
first_node.query("SELECT count(ss) FROM polymorphic_table") == "2000\n"
first_node.query("SELECT uniqExact(ss) FROM polymorphic_table") == "600\n"
second_node.query("SELECT count(ss) FROM polymorphic_table") == "2000\n"
second_node.query("SELECT uniqExact(ss) FROM polymorphic_table") == "600\n"
@pytest.mark.parametrize(
('table', 'part_type'),
[
('polymorphic_table_compact', 'Compact'),
('polymorphic_table_wide', 'Wide')
]
)
def test_different_part_types_on_replicas(start_cluster, table, part_type):
leader = node3
follower = node4
assert leader.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "1\n"
assert node4.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "0\n"
for _ in range(3):
insert_random_data(table, leader, 100)
leader.query("OPTIMIZE TABLE {} FINAL".format(table))
follower.query("SYSTEM SYNC REPLICA {}".format(table), timeout=20)
expected = "{}\t1\n".format(part_type)
assert TSV(leader.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format(table))) == TSV(expected)
assert TSV(follower.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format(table))) == TSV(expected)
node7 = cluster.add_instance('node7', config_dir="configs", with_zookeeper=True, image='yandex/clickhouse-server:19.17.8.54', stay_alive=True, with_installed_binary=True)
node8 = cluster.add_instance('node8', config_dir="configs", with_zookeeper=True)
settings7 = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760}
settings8 = {'index_granularity' : 64, 'index_granularity_bytes' : 10485760, 'min_rows_for_wide_part' : 512, 'min_bytes_for_wide_part' : 0}
@pytest.fixture(scope="module")
def start_cluster_diff_versions():
try:
for name in ['polymorphic_table', 'polymorphic_table_2']:
cluster.start()
node7.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '1')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes}
'''.format(name=name, **settings7)
)
node8.query(
'''
CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '2')
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = {index_granularity}, index_granularity_bytes = {index_granularity_bytes},
min_rows_for_wide_part = {min_rows_for_wide_part}, min_bytes_for_wide_part = {min_bytes_for_wide_part}
'''.format(name=name, **settings8)
)
yield cluster
finally:
cluster.shutdown()
@pytest.mark.skip(reason="compatability is temporary broken")
def test_polymorphic_parts_diff_versions(start_cluster_diff_versions):
node_old = node7
node_new = node8
insert_random_data('polymorphic_table', node7, 100)
node8.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20)
assert node8.query("SELECT count() FROM polymorphic_table") == "100\n"
assert node8.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' and active") == "Wide\n"
@pytest.mark.skip(reason="compatability is temporary broken")
def test_polymorphic_parts_diff_versions_2(start_cluster_diff_versions):
# this version doesn't know anything about it. It's considered to be ok.
node_old = node7
node_new = node8
insert_random_data('polymorphic_table_2', node_new, 100)
assert node_new.query("SELECT count() FROM polymorphic_table_2") == "100\n"
assert node_old.query("SELECT count() FROM polymorphic_table_2") == "0\n"
with pytest.raises(Exception):
node_old.query("SYSTEM SYNC REPLICA polymorphic_table_2", timeout=3)
node_old.restart_with_latest_version()
node_old.query("SYSTEM SYNC REPLICA polymorphic_table_2", timeout=20)
# Works after update
assert node_old.query("SELECT count() FROM polymorphic_table_2") == "100\n"
assert node_old.query("SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table_2' and active") == "Compact\n"
def test_polymorphic_parts_non_adaptive(start_cluster):
node1.query("SYSTEM STOP MERGES")
node2.query("SYSTEM STOP MERGES")
insert_random_data('non_adaptive_table', node1, 100)
node2.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20)
insert_random_data('non_adaptive_table', node2, 100)
node1.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20)
assert TSV(node1.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t2\n")
assert TSV(node2.query("SELECT part_type, count() FROM system.parts " \
"WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t2\n")
assert node1.contains_in_log("<Warning> default.non_adaptive_table: Table can't create parts with adaptive granularity")
| true | true |
f7256dea781fbaf1c92c2fd539b3e3cfbad4cd6e | 214 | py | Python | Lectures/9 - Matlib/test.py | JensRL/PPaNM | a28d9826d24c821cbc35a2e5fb5c478118f1e693 | [
"MIT"
] | null | null | null | Lectures/9 - Matlib/test.py | JensRL/PPaNM | a28d9826d24c821cbc35a2e5fb5c478118f1e693 | [
"MIT"
] | null | null | null | Lectures/9 - Matlib/test.py | JensRL/PPaNM | a28d9826d24c821cbc35a2e5fb5c478118f1e693 | [
"MIT"
] | null | null | null | import math
import scipy.integrate as integrate
ncalls = 0
def f(x):
global ncalls
ncalls +=1
return math.log(x)/math.sqrt(x)
result = integrate.quad(f,0,1)
print("result=", result, "ncalls =",ncalls) | 214 | 214 | 0.682243 | import math
import scipy.integrate as integrate
ncalls = 0
def f(x):
global ncalls
ncalls +=1
return math.log(x)/math.sqrt(x)
result = integrate.quad(f,0,1)
print("result=", result, "ncalls =",ncalls) | true | true |
f7256e40446cfaf2d265b8165ba99a61224d4a30 | 1,336 | py | Python | apache2/htdocs/syntax/string4.py | tigerish009/mampstack-8.0.0-0 | d4d0550e0d29d850ebd9a2b70c3f16641de0e1bf | [
"Apache-2.0"
] | null | null | null | apache2/htdocs/syntax/string4.py | tigerish009/mampstack-8.0.0-0 | d4d0550e0d29d850ebd9a2b70c3f16641de0e1bf | [
"Apache-2.0"
] | null | null | null | apache2/htdocs/syntax/string4.py | tigerish009/mampstack-8.0.0-0 | d4d0550e0d29d850ebd9a2b70c3f16641de0e1bf | [
"Apache-2.0"
] | null | null | null | #positional formatting
print('to {}.Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry sstandard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. {} It was popularised {} in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of {} Lorem Ipsum.'.format('egoing', 12, 'egoing', 'egoing'))
#named placeholder
print('to {name}.Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry sstandard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. {age:d} It was popularised {name} in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of {name} Lorem Ipsum.'.format(name='apple!!!!!!!', age=11111))
| 222.666667 | 653 | 0.797156 |
print('to {}.Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry sstandard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. {} It was popularised {} in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of {} Lorem Ipsum.'.format('egoing', 12, 'egoing', 'egoing'))
print('to {name}.Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry sstandard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. {age:d} It was popularised {name} in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of {name} Lorem Ipsum.'.format(name='apple!!!!!!!', age=11111))
| true | true |
f7256ea03f1ba9c7d22b3e985d8dc7af2edfcdc4 | 2,525 | py | Python | bentoml/adapters/dataframe_output.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/dataframe_output.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/dataframe_output.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | null | null | null | import json
from typing import Sequence
from bentoml.adapters.json_output import JsonOutput
from bentoml.types import InferenceError, InferenceResult, InferenceTask
from bentoml.utils.dataframe_util import PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS
def df_to_json(result, pandas_dataframe_orient="records"):
import pandas as pd
assert (
pandas_dataframe_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS
), f"unknown pandas dataframe orient '{pandas_dataframe_orient}'"
if isinstance(result, pd.DataFrame):
return result.to_json(orient=pandas_dataframe_orient)
if isinstance(result, pd.Series):
return pd.DataFrame(result).to_json(orient=pandas_dataframe_orient)
return json.dumps(result)
class DataframeOutput(JsonOutput):
"""
Converts result of user defined API function into specific output.
Args:
cors (str): The value of the Access-Control-Allow-Origin header set in the
AWS Lambda response object. Default is "*". If set to None,
the header will not be set.
"""
BATCH_MODE_SUPPORTED = True
def __init__(self, output_orient='records', **kwargs):
super().__init__(**kwargs)
self.output_orient = output_orient
assert self.output_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS, (
f"Invalid 'output_orient'='{self.orient}', valid options are "
f"{PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS}"
)
@property
def config(self):
base_config = super(DataframeOutput, self).config
return dict(base_config, output_orient=self.output_orient)
@property
def pip_dependencies(self):
"""
:return: List of PyPI package names required by this OutputAdapter
"""
return ['pandas']
def pack_user_func_return_value(
self, return_result, tasks: Sequence[InferenceTask]
) -> Sequence[InferenceResult[str]]:
rv = []
i = 0
for task in tasks:
if task.batch is None:
result = return_result[i : i + 1]
i += 1
else:
result = return_result[i : i + task.batch]
i += task.batch
try:
result = df_to_json(result, self.output_orient)
rv.append(InferenceResult(http_status=200, data=result))
except Exception as e: # pylint: disable=broad-except
rv.append(InferenceError(err_msg=str(e), http_status=500))
return rv
| 33.666667 | 82 | 0.65901 | import json
from typing import Sequence
from bentoml.adapters.json_output import JsonOutput
from bentoml.types import InferenceError, InferenceResult, InferenceTask
from bentoml.utils.dataframe_util import PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS
def df_to_json(result, pandas_dataframe_orient="records"):
import pandas as pd
assert (
pandas_dataframe_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS
), f"unknown pandas dataframe orient '{pandas_dataframe_orient}'"
if isinstance(result, pd.DataFrame):
return result.to_json(orient=pandas_dataframe_orient)
if isinstance(result, pd.Series):
return pd.DataFrame(result).to_json(orient=pandas_dataframe_orient)
return json.dumps(result)
class DataframeOutput(JsonOutput):
BATCH_MODE_SUPPORTED = True
def __init__(self, output_orient='records', **kwargs):
super().__init__(**kwargs)
self.output_orient = output_orient
assert self.output_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS, (
f"Invalid 'output_orient'='{self.orient}', valid options are "
f"{PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS}"
)
@property
def config(self):
base_config = super(DataframeOutput, self).config
return dict(base_config, output_orient=self.output_orient)
@property
def pip_dependencies(self):
return ['pandas']
def pack_user_func_return_value(
self, return_result, tasks: Sequence[InferenceTask]
) -> Sequence[InferenceResult[str]]:
rv = []
i = 0
for task in tasks:
if task.batch is None:
result = return_result[i : i + 1]
i += 1
else:
result = return_result[i : i + task.batch]
i += task.batch
try:
result = df_to_json(result, self.output_orient)
rv.append(InferenceResult(http_status=200, data=result))
except Exception as e:
rv.append(InferenceError(err_msg=str(e), http_status=500))
return rv
| true | true |
f7256eb9c87e025bed52453bc1d07f3c08e79dcc | 1,581 | py | Python | samples/generated_samples/vmmigration_v1_generated_vm_migration_create_source_sync.py | renovate-bot/python-vmmigration | 80a2cf46a21f516899da818a7aec0f2a67222047 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/vmmigration_v1_generated_vm_migration_create_source_sync.py | renovate-bot/python-vmmigration | 80a2cf46a21f516899da818a7aec0f2a67222047 | [
"Apache-2.0"
] | 10 | 2021-11-18T10:47:48.000Z | 2022-03-07T15:48:54.000Z | samples/generated_samples/vmmigration_v1_generated_vm_migration_create_source_sync.py | renovate-bot/python-vmmigration | 80a2cf46a21f516899da818a7aec0f2a67222047 | [
"Apache-2.0"
] | 1 | 2022-01-29T08:15:02.000Z | 2022-01-29T08:15:02.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_CreateSource_sync]
from google.cloud import vmmigration_v1
def sample_create_source():
# Create a client
client = vmmigration_v1.VmMigrationClient()
# Initialize request argument(s)
request = vmmigration_v1.CreateSourceRequest(
parent="parent_value",
source_id="source_id_value",
)
# Make the request
operation = client.create_source(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_CreateSource_sync]
| 31 | 85 | 0.752056 |
from google.cloud import vmmigration_v1
def sample_create_source():
client = vmmigration_v1.VmMigrationClient()
request = vmmigration_v1.CreateSourceRequest(
parent="parent_value",
source_id="source_id_value",
)
operation = client.create_source(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
| true | true |
f7256ed5b108211f2458fd64f6a79da2e72b170a | 62,996 | py | Python | site-packages/sklearn/linear_model/_stochastic_gradient.py | linusg/Pyto | eab3c3e093a8cace53d5b9425d1af2f535d456ee | [
"MIT"
] | 2 | 2020-08-25T13:55:00.000Z | 2020-08-25T16:36:03.000Z | site-packages/sklearn/linear_model/_stochastic_gradient.py | linusg/Pyto | eab3c3e093a8cace53d5b9425d1af2f535d456ee | [
"MIT"
] | 1 | 2020-04-25T20:36:07.000Z | 2020-04-25T20:36:07.000Z | site-packages/sklearn/linear_model/_stochastic_gradient.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | null | null | null | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel, delayed
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import plain_sgd, average_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
from ..utils.fixes import _joblib_parallel_args
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val,
classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params()
def set_params(self, **kwargs):
super().set_params(**kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, for_partial_fit=False):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, y):
"""Split the dataset between training set and validation set.
Parameters
----------
y : array, shape (n_samples, )
Target values.
Returns
-------
validation_mask : array, shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction,
random_state=self.random_state)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,
classes=None):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self, X[validation_mask], y[validation_mask],
sample_weight[validation_mask], classes=classes)
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight, validation_mask=None,
random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : string
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ] or None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
if not est.average:
result = plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping,
validation_score_cb, int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, average_intercept, \
n_iter_ = average_sgd(coef, intercept, average_coef,
average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping,
validation_score_cb,
int(est.n_iter_no_change), max_iter, tol,
int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight,
neg_weight, learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
result = standard_coef, standard_intercept, n_iter_
return result
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight,
validation_mask=validation_mask,
random_state=seed)
for i, seed in enumerate(seeds))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001.
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional (default=1000)
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, optional (default=1e-3)
The stopping criterion. If it is not None, the iterations will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, default=0
The verbosity level
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : int or None, optional (default=None)
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
'constant':
eta = eta0
'optimal': [default]
eta = 1.0 / (alpha * (t + t0))
where t0 is chosen by a heuristic proposed by Leon Bottou.
'invscaling':
eta = eta0 / pow(t, power_t)
'adaptive':
eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : double
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
classes_ : array of shape (n_classes,)
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)
>>> clf.fit(X, Y)
SGDClassifier()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.svm.LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(RegressorMixin, BaseSGD):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features, coef_init,
intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.max_iter, sample_weight, coef_init,
intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_, self.n_iter_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += self.n_iter_ * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_, self.n_iter_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += self.n_iter_ * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional (default=1000)
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, optional (default=1e-3)
The stopping criterion. If it is not None, the iterations will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
'constant':
eta = eta0
'optimal':
eta = 1.0 / (alpha * (t + t0))
where t0 is chosen by a heuristic proposed by Leon Bottou.
'invscaling': [default]
eta = eta0 / pow(t, power_t)
'adaptive':
eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : double
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.01.
power_t : double
The exponent for inverse scaling learning rate [default 0.25].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor(max_iter=1000, tol=1e-3)
>>> clf.fit(X, y)
SGDRegressor()
See also
--------
Ridge, ElasticNet, Lasso, sklearn.svm.SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
| 41.254748 | 79 | 0.604308 |
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel, delayed
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import plain_sgd, average_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
from ..utils.fixes import _joblib_parallel_args
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
def __init__(self, estimator, X_val, y_val, sample_weight_val,
classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
self._validate_params()
def set_params(self, **kwargs):
super().set_params(**kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
def _validate_params(self, for_partial_fit=False):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
if n_classes > 2:
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, y):
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction,
random_state=self.random_state)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,
classes=None):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self, X[validation_mask], y[validation_mask],
sample_weight[validation_mask], classes=classes)
def _prepare_fit_binary(est, y, i):
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight, validation_mask=None,
random_state=None):
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes)
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
if not est.average:
result = plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping,
validation_score_cb, int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, average_intercept, \
n_iter_ = average_sgd(coef, intercept, average_coef,
average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping,
validation_score_cb,
int(est.n_iter_no_change), max_iter, tol,
int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight,
neg_weight, learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
result = standard_coef, standard_intercept, n_iter_
return result
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
validation_mask = self._make_validation_split(y)
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight,
validation_mask=validation_mask,
random_state=seed)
for i, seed in enumerate(seeds))
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(RegressorMixin, BaseSGD):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features, coef_init,
intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.max_iter, sample_weight, coef_init,
intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_, self.n_iter_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += self.n_iter_ * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_, self.n_iter_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += self.n_iter_ * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
| true | true |
f7256edb1cd981f4d5a110f018a377b55aa4f7c7 | 1,246 | py | Python | Model prediction/app.py | choudhury722k/English-to-French-translator | e792ce92adbdd3100d73d9d8aebc109cc7c560d7 | [
"MIT"
] | null | null | null | Model prediction/app.py | choudhury722k/English-to-French-translator | e792ce92adbdd3100d73d9d8aebc109cc7c560d7 | [
"MIT"
] | null | null | null | Model prediction/app.py | choudhury722k/English-to-French-translator | e792ce92adbdd3100d73d9d8aebc109cc7c560d7 | [
"MIT"
] | null | null | null | from re import X
from flask import Flask,render_template,url_for,request
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import models
import numpy as np
import pickle
french_tokenizer = pickle.load(open('french_tokenizer.pickle', 'rb'))
english_tokenizer = pickle.load(open('english_tokenizer.pickle', 'rb'))
model = models.load_model("translator_model.h5")
y_id_to_word = {value: key for key, value in french_tokenizer.word_index.items()}
y_id_to_word[0] = '<PAD>'
#y_id_to_word
app = Flask(__name__)
@app.route('/')
def hello_World():
return "Hello Soumya"
@app.route('/translator', methods = ['GET', 'POST'])
def eng_to_french():
message = request.args.get("message")
sentence = [english_tokenizer.word_index[word] for word in message.split()]
#sentence
sentence = pad_sequences([sentence], maxlen=15, padding='post')
sentences = np.array([sentence[0]])
predictions = model.predict(sentences, len(sentences))
x = ' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])
if '<PAD>' in x:
x=x.replace('<PAD>','')
print(x)
return x
if __name__ == '__main__':
app.run(debug=True)
| 31.948718 | 81 | 0.719904 | from re import X
from flask import Flask,render_template,url_for,request
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import models
import numpy as np
import pickle
french_tokenizer = pickle.load(open('french_tokenizer.pickle', 'rb'))
english_tokenizer = pickle.load(open('english_tokenizer.pickle', 'rb'))
model = models.load_model("translator_model.h5")
y_id_to_word = {value: key for key, value in french_tokenizer.word_index.items()}
y_id_to_word[0] = '<PAD>'
app = Flask(__name__)
@app.route('/')
def hello_World():
return "Hello Soumya"
@app.route('/translator', methods = ['GET', 'POST'])
def eng_to_french():
message = request.args.get("message")
sentence = [english_tokenizer.word_index[word] for word in message.split()]
sentence = pad_sequences([sentence], maxlen=15, padding='post')
sentences = np.array([sentence[0]])
predictions = model.predict(sentences, len(sentences))
x = ' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])
if '<PAD>' in x:
x=x.replace('<PAD>','')
print(x)
return x
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f7256eedcf3a758fc0b86618617827425e34c972 | 438 | py | Python | carpyncho2/carpyncho/steps/prepare_pawprint_to_sync.py | carpyncho/yeolde_carpyncho | fba72ebf9d4a3e4e4ea18160310058c6812a0457 | [
"BSD-3-Clause"
] | null | null | null | carpyncho2/carpyncho/steps/prepare_pawprint_to_sync.py | carpyncho/yeolde_carpyncho | fba72ebf9d4a3e4e4ea18160310058c6812a0457 | [
"BSD-3-Clause"
] | 2 | 2020-06-05T19:37:26.000Z | 2020-06-05T19:40:38.000Z | carpyncho2/carpyncho/steps/prepare_pawprint_to_sync.py | carpyncho/yeolde_carpyncho | fba72ebf9d4a3e4e4ea18160310058c6812a0457 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from corral import run
from ..models import PawprintXTile
class PreparePawprintToSync(run.Step):
model = PawprintXTile
conditions = [
PawprintXTile.status == "raw",
PawprintXTile.tile.has(status="loaded"),
PawprintXTile.pawprint.has(status="loaded")
]
limit = 500
groups = ["match"]
def process(self, pxt):
pxt.status = "pending"
| 19.909091 | 51 | 0.630137 |
from corral import run
from ..models import PawprintXTile
class PreparePawprintToSync(run.Step):
model = PawprintXTile
conditions = [
PawprintXTile.status == "raw",
PawprintXTile.tile.has(status="loaded"),
PawprintXTile.pawprint.has(status="loaded")
]
limit = 500
groups = ["match"]
def process(self, pxt):
pxt.status = "pending"
| true | true |
f7256fad4dd4f8677f2d6bac3cf8110e20ecf681 | 423 | py | Python | mysite/mysite/development_settings.py | timmahrt/gamecorpus | 6ce170f3d590475a320410c9d937039555207ee9 | [
"MIT"
] | null | null | null | mysite/mysite/development_settings.py | timmahrt/gamecorpus | 6ce170f3d590475a320410c9d937039555207ee9 | [
"MIT"
] | null | null | null | mysite/mysite/development_settings.py | timmahrt/gamecorpus | 6ce170f3d590475a320410c9d937039555207ee9 | [
"MIT"
] | null | null | null | from mysite.common_settings import *
SECRET_KEY = "aje#lg$7!t!tc5*i%ittn(to%5#5%vjvi*oc=ib25wx%+##_b+"
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "game_corpus_db",
"HOST": "127.0.0.1",
"USER": "tmahrt",
"PASSWORD": "12345678",
}
} | 21.15 | 65 | 0.591017 | from mysite.common_settings import *
SECRET_KEY = "aje#lg$7!t!tc5*i%ittn(to%5#5%vjvi*oc=ib25wx%+##_b+"
DEBUG = True
ALLOWED_HOSTS = ["*"]
S = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "game_corpus_db",
"HOST": "127.0.0.1",
"USER": "tmahrt",
"PASSWORD": "12345678",
}
} | true | true |
f725707d1175e051257c22b78d93284af9b6061b | 4,819 | py | Python | catalyst/engines/tests/test_parallel.py | alxmamaev/catalyst | d05120c68fbc5174ff74297d29c0fc00d7e94924 | [
"Apache-2.0"
] | 1 | 2021-03-02T12:06:32.000Z | 2021-03-02T12:06:32.000Z | catalyst/engines/tests/test_parallel.py | alxmamaev/catalyst | d05120c68fbc5174ff74297d29c0fc00d7e94924 | [
"Apache-2.0"
] | null | null | null | catalyst/engines/tests/test_parallel.py | alxmamaev/catalyst | d05120c68fbc5174ff74297d29c0fc00d7e94924 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from typing import Any, Dict, List
import logging
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch.utils.data import DataLoader
from catalyst.callbacks import CheckpointCallback, CriterionCallback, OptimizerCallback
from catalyst.core.runner import IRunner
from catalyst.engines import DataParallelEngine
from catalyst.engines.torch import DeviceEngine
from catalyst.loggers import ConsoleLogger, CSVLogger
from catalyst.runners.config import SupervisedConfigRunner
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES
from .misc import DataParallelTypeChecker, DummyDataset, DummyModel, LossMinimizationCallback
logger = logging.getLogger(__name__)
class CustomRunner(IRunner):
def __init__(self, logdir):
super().__init__()
self._logdir = logdir
def get_engine(self):
return DataParallelEngine()
def get_callbacks(self, stage: str):
return {
"criterion": CriterionCallback(
metric_key="loss", input_key="logits", target_key="targets"
),
"optimizer": OptimizerCallback(metric_key="loss"),
# "scheduler": dl.SchedulerCallback(loader_key="valid", metric_key="loss"),
"checkpoint": CheckpointCallback(
self._logdir, loader_key="valid", metric_key="loss", minimize=True, save_n_best=3
),
"test_nn_parallel_data_parallel": DataParallelTypeChecker(),
"test_loss_minimization": LossMinimizationCallback("loss", logger=logger),
}
@property
def stages(self) -> "Iterable[str]":
return ["train"]
def get_stage_len(self, stage: str) -> int:
return 10
def get_loaders(self, stage: str) -> "OrderedDict[str, DataLoader]":
dataset = DummyDataset(6)
loader = DataLoader(dataset, batch_size=4)
return {"train": loader, "valid": loader}
def get_model(self, stage: str):
return DummyModel(4, 2)
def get_criterion(self, stage: str):
return torch.nn.MSELoss()
def get_optimizer(self, model, stage: str):
return torch.optim.Adam(model.parameters())
def get_scheduler(self, optimizer, stage: str):
return None
def get_trial(self):
return None
def get_loggers(self):
return {"console": ConsoleLogger(), "csv": CSVLogger(logdir=self._logdir)}
def handle_batch(self, batch):
x, y = batch
logits = self.model(x)
self.batch = {"features": x, "targets": y, "logits": logits}
def train_from_runner():
with TemporaryDirectory() as logdir:
runner = CustomRunner(logdir)
runner.run()
def train_from_config():
with TemporaryDirectory() as logdir:
dataset = DummyDataset(6)
runner = SupervisedConfigRunner(
config={
"args": {"logdir": logdir},
"model": {"_target_": "DummyModel", "in_features": 4, "out_features": 2},
"engine": {"_target_": "DataParallelEngine"},
"args": {"logdir": logdir},
"stages": {
"stage1": {
"num_epochs": 10,
"loaders": {"batch_size": 4, "num_workers": 0},
"criterion": {"_target_": "MSELoss"},
"optimizer": {"_target_": "Adam", "lr": 1e-3},
"callbacks": {
"criterion": {
"_target_": "CriterionCallback",
"metric_key": "loss",
"input_key": "logits",
"target_key": "targets",
},
"optimizer": {"_target_": "OptimizerCallback", "metric_key": "loss"},
"test_nn_parallel_data_parallel": {
"_target_": "DataParallelTypeChecker"
},
"test_loss_minimization": {
"_target_": "LossMinimizationCallback",
"key": "loss",
},
},
},
},
}
)
runner.get_datasets = lambda *args, **kwargs: {
"train": dataset,
"valid": dataset,
}
runner.run()
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_experiment_parallel_engine_with_cuda():
train_from_runner()
# @mark.skip("Config experiment is in development phase!")
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_config_experiment_engine_with_cuda():
train_from_config()
| 34.421429 | 97 | 0.573563 |
from typing import Any, Dict, List
import logging
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch.utils.data import DataLoader
from catalyst.callbacks import CheckpointCallback, CriterionCallback, OptimizerCallback
from catalyst.core.runner import IRunner
from catalyst.engines import DataParallelEngine
from catalyst.engines.torch import DeviceEngine
from catalyst.loggers import ConsoleLogger, CSVLogger
from catalyst.runners.config import SupervisedConfigRunner
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES
from .misc import DataParallelTypeChecker, DummyDataset, DummyModel, LossMinimizationCallback
logger = logging.getLogger(__name__)
class CustomRunner(IRunner):
def __init__(self, logdir):
super().__init__()
self._logdir = logdir
def get_engine(self):
return DataParallelEngine()
def get_callbacks(self, stage: str):
return {
"criterion": CriterionCallback(
metric_key="loss", input_key="logits", target_key="targets"
),
"optimizer": OptimizerCallback(metric_key="loss"),
"checkpoint": CheckpointCallback(
self._logdir, loader_key="valid", metric_key="loss", minimize=True, save_n_best=3
),
"test_nn_parallel_data_parallel": DataParallelTypeChecker(),
"test_loss_minimization": LossMinimizationCallback("loss", logger=logger),
}
@property
def stages(self) -> "Iterable[str]":
return ["train"]
def get_stage_len(self, stage: str) -> int:
return 10
def get_loaders(self, stage: str) -> "OrderedDict[str, DataLoader]":
dataset = DummyDataset(6)
loader = DataLoader(dataset, batch_size=4)
return {"train": loader, "valid": loader}
def get_model(self, stage: str):
return DummyModel(4, 2)
def get_criterion(self, stage: str):
return torch.nn.MSELoss()
def get_optimizer(self, model, stage: str):
return torch.optim.Adam(model.parameters())
def get_scheduler(self, optimizer, stage: str):
return None
def get_trial(self):
return None
def get_loggers(self):
return {"console": ConsoleLogger(), "csv": CSVLogger(logdir=self._logdir)}
def handle_batch(self, batch):
x, y = batch
logits = self.model(x)
self.batch = {"features": x, "targets": y, "logits": logits}
def train_from_runner():
with TemporaryDirectory() as logdir:
runner = CustomRunner(logdir)
runner.run()
def train_from_config():
with TemporaryDirectory() as logdir:
dataset = DummyDataset(6)
runner = SupervisedConfigRunner(
config={
"args": {"logdir": logdir},
"model": {"_target_": "DummyModel", "in_features": 4, "out_features": 2},
"engine": {"_target_": "DataParallelEngine"},
"args": {"logdir": logdir},
"stages": {
"stage1": {
"num_epochs": 10,
"loaders": {"batch_size": 4, "num_workers": 0},
"criterion": {"_target_": "MSELoss"},
"optimizer": {"_target_": "Adam", "lr": 1e-3},
"callbacks": {
"criterion": {
"_target_": "CriterionCallback",
"metric_key": "loss",
"input_key": "logits",
"target_key": "targets",
},
"optimizer": {"_target_": "OptimizerCallback", "metric_key": "loss"},
"test_nn_parallel_data_parallel": {
"_target_": "DataParallelTypeChecker"
},
"test_loss_minimization": {
"_target_": "LossMinimizationCallback",
"key": "loss",
},
},
},
},
}
)
runner.get_datasets = lambda *args, **kwargs: {
"train": dataset,
"valid": dataset,
}
runner.run()
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_experiment_parallel_engine_with_cuda():
train_from_runner()
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_config_experiment_engine_with_cuda():
train_from_config()
| true | true |
f72572a5ded8e8384c8775f3509841aff1d8e01a | 1,939 | py | Python | tests/test_issues/test_issue_25.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 25 | 2018-01-11T10:59:16.000Z | 2021-07-02T03:44:02.000Z | tests/test_issues/test_issue_25.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 66 | 2018-03-12T01:12:02.000Z | 2022-03-18T07:56:31.000Z | tests/test_issues/test_issue_25.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 12 | 2018-04-06T11:29:40.000Z | 2021-12-17T22:48:07.000Z | import os
import unittest
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from pyshex.shex_evaluator import evaluate_cli
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))
validation_dir = os.path.join(data_dir, 'validation')
rdffile = os.path.join(validation_dir, 'simple.ttl')
shexfile = os.path.join(validation_dir, 'simple.shex')
class Issue25TestCase(unittest.TestCase):
def test_nostart(self):
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A".split())
self.assertEqual("""Errors:
Focus: None
Start: None
Reason: START node is not specified""", outf.getvalue().strip())
def test_all_nodes(self):
outf = StringIO()
with(redirect_stderr(outf)):
evaluate_cli(f"{rdffile} {shexfile} -s http://example.org/shapes/S".split())
self.assertEqual('Error: You must specify one or more graph focus nodes, supply a SPARQL query, '
'or use the "-A" option',
outf.getvalue().strip())
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A -s http://example.org/shapes/S".split())
self.assertEqual("""Errors:
Focus: http://a.example/s1
Start: http://example.org/shapes/S
Reason: Testing :s1 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s2
Start: http://example.org/shapes/S
Reason: Testing :s2 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s3
Start: http://example.org/shapes/S
Reason: Testing :s3 against shape http://example.org/shapes/S
No matching triples found for predicate :s4""", outf.getvalue().strip())
if __name__ == '__main__':
unittest.main()
| 35.254545 | 105 | 0.66426 | import os
import unittest
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from pyshex.shex_evaluator import evaluate_cli
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))
validation_dir = os.path.join(data_dir, 'validation')
rdffile = os.path.join(validation_dir, 'simple.ttl')
shexfile = os.path.join(validation_dir, 'simple.shex')
class Issue25TestCase(unittest.TestCase):
def test_nostart(self):
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A".split())
self.assertEqual("""Errors:
Focus: None
Start: None
Reason: START node is not specified""", outf.getvalue().strip())
def test_all_nodes(self):
outf = StringIO()
with(redirect_stderr(outf)):
evaluate_cli(f"{rdffile} {shexfile} -s http://example.org/shapes/S".split())
self.assertEqual('Error: You must specify one or more graph focus nodes, supply a SPARQL query, '
'or use the "-A" option',
outf.getvalue().strip())
outf = StringIO()
with(redirect_stdout(outf)):
evaluate_cli(f"{rdffile} {shexfile} -A -s http://example.org/shapes/S".split())
self.assertEqual("""Errors:
Focus: http://a.example/s1
Start: http://example.org/shapes/S
Reason: Testing :s1 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s2
Start: http://example.org/shapes/S
Reason: Testing :s2 against shape http://example.org/shapes/S
No matching triples found for predicate :s4
Focus: http://a.example/s3
Start: http://example.org/shapes/S
Reason: Testing :s3 against shape http://example.org/shapes/S
No matching triples found for predicate :s4""", outf.getvalue().strip())
if __name__ == '__main__':
unittest.main()
| true | true |
f725742c34d7363a4d55d7cc29936dc1953858f7 | 3,975 | py | Python | cpp/build-support/lint_cpp_cli.py | LuoZijun/arrow | 8219a8b878d9344fe73e07def34a18a71a8f85a8 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | 1 | 2020-09-15T16:47:08.000Z | 2020-09-15T16:47:08.000Z | cpp/build-support/lint_cpp_cli.py | LuoZijun/arrow | 8219a8b878d9344fe73e07def34a18a71a8f85a8 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | 3 | 2018-10-25T13:52:14.000Z | 2018-10-27T08:44:27.000Z | cpp/build-support/lint_cpp_cli.py | LuoZijun/arrow | 8219a8b878d9344fe73e07def34a18a71a8f85a8 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | 10 | 2019-03-18T08:19:16.000Z | 2020-09-15T09:05:39.000Z | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
import os
parser = argparse.ArgumentParser(
description="Check for illegal headers for C++/CLI applications")
parser.add_argument("source_path",
help="Path to source code")
arguments = parser.parse_args()
_STRIP_COMMENT_REGEX = re.compile('(.+)?(?=//)')
_NULLPTR_REGEX = re.compile(r'.*\bnullptr\b.*')
_RETURN_NOT_OK_REGEX = re.compile(r'.*\sRETURN_NOT_OK.*')
_ASSIGN_OR_RAISE_REGEX = re.compile(r'.*\sASSIGN_OR_RAISE.*')
def _paths(paths):
return [p.strip().replace('/', os.path.sep) for p in paths.splitlines()]
def _strip_comments(line):
m = _STRIP_COMMENT_REGEX.match(line)
if not m:
return line
else:
return m.group(0)
def lint_file(path):
fail_rules = [
# rule, error message, rule-specific exclusions list
(lambda x: '<mutex>' in x, 'Uses <mutex>', []),
(lambda x: '<iostream>' in x, 'Uses <iostream>', []),
(lambda x: re.match(_NULLPTR_REGEX, x), 'Uses nullptr', []),
(lambda x: re.match(_RETURN_NOT_OK_REGEX, x),
'Use ARROW_RETURN_NOT_OK in header files', _paths('''\
arrow/status.h
test
arrow/util/hash.h
arrow/python/util''')),
(lambda x: re.match(_ASSIGN_OR_RAISE_REGEX, x),
'Use ARROW_ASSIGN_OR_RAISE in header files', _paths('''\
arrow/result_internal.h
test
'''))
]
with open(path) as f:
for i, line in enumerate(f):
stripped_line = _strip_comments(line)
for rule, why, rule_exclusions in fail_rules:
if any([True for excl in rule_exclusions if excl in path]):
continue
if rule(stripped_line):
yield path, why, i, line
EXCLUSIONS = _paths('''\
arrow/python/iterators.h
arrow/util/hashing.h
arrow/util/macros.h
arrow/util/parallel.h
arrow/vendored
arrow/visitor_inline.h
gandiva/cache.h
gandiva/jni
jni/
test
internal
_generated''')
def lint_files():
for dirpath, _, filenames in os.walk(arguments.source_path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
exclude = False
for exclusion in EXCLUSIONS:
if exclusion in full_path:
exclude = True
break
if exclude:
continue
# Lint file name, except for pkgconfig templates
if not filename.endswith('.pc.in'):
if '-' in filename:
why = ("Please user underscores, not hyphens, "
"in source file names")
yield full_path, why, 0, full_path
# Only run on header files
if filename.endswith('.h'):
for _ in lint_file(full_path):
yield _
if __name__ == '__main__':
failures = list(lint_files())
for path, why, i, line in failures:
print('File {0} failed C++/CLI lint check: {1}\n'
'Line {2}: {3}'.format(path, why, i + 1, line))
if failures:
exit(1)
| 31.054688 | 76 | 0.610063 |
import argparse
import re
import os
parser = argparse.ArgumentParser(
description="Check for illegal headers for C++/CLI applications")
parser.add_argument("source_path",
help="Path to source code")
arguments = parser.parse_args()
_STRIP_COMMENT_REGEX = re.compile('(.+)?(?=//)')
_NULLPTR_REGEX = re.compile(r'.*\bnullptr\b.*')
_RETURN_NOT_OK_REGEX = re.compile(r'.*\sRETURN_NOT_OK.*')
_ASSIGN_OR_RAISE_REGEX = re.compile(r'.*\sASSIGN_OR_RAISE.*')
def _paths(paths):
return [p.strip().replace('/', os.path.sep) for p in paths.splitlines()]
def _strip_comments(line):
m = _STRIP_COMMENT_REGEX.match(line)
if not m:
return line
else:
return m.group(0)
def lint_file(path):
fail_rules = [
(lambda x: '<mutex>' in x, 'Uses <mutex>', []),
(lambda x: '<iostream>' in x, 'Uses <iostream>', []),
(lambda x: re.match(_NULLPTR_REGEX, x), 'Uses nullptr', []),
(lambda x: re.match(_RETURN_NOT_OK_REGEX, x),
'Use ARROW_RETURN_NOT_OK in header files', _paths('''\
arrow/status.h
test
arrow/util/hash.h
arrow/python/util''')),
(lambda x: re.match(_ASSIGN_OR_RAISE_REGEX, x),
'Use ARROW_ASSIGN_OR_RAISE in header files', _paths('''\
arrow/result_internal.h
test
'''))
]
with open(path) as f:
for i, line in enumerate(f):
stripped_line = _strip_comments(line)
for rule, why, rule_exclusions in fail_rules:
if any([True for excl in rule_exclusions if excl in path]):
continue
if rule(stripped_line):
yield path, why, i, line
EXCLUSIONS = _paths('''\
arrow/python/iterators.h
arrow/util/hashing.h
arrow/util/macros.h
arrow/util/parallel.h
arrow/vendored
arrow/visitor_inline.h
gandiva/cache.h
gandiva/jni
jni/
test
internal
_generated''')
def lint_files():
for dirpath, _, filenames in os.walk(arguments.source_path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
exclude = False
for exclusion in EXCLUSIONS:
if exclusion in full_path:
exclude = True
break
if exclude:
continue
if not filename.endswith('.pc.in'):
if '-' in filename:
why = ("Please user underscores, not hyphens, "
"in source file names")
yield full_path, why, 0, full_path
if filename.endswith('.h'):
for _ in lint_file(full_path):
yield _
if __name__ == '__main__':
failures = list(lint_files())
for path, why, i, line in failures:
print('File {0} failed C++/CLI lint check: {1}\n'
'Line {2}: {3}'.format(path, why, i + 1, line))
if failures:
exit(1)
| true | true |
f72574687e5a2e271c479e18ac1e881bdf5eae75 | 578 | py | Python | azure-mgmt-consumption/azure/mgmt/consumption/models/consumption_management_client_enums.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-consumption/azure/mgmt/consumption/models/consumption_management_client_enums.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure-mgmt-consumption/azure/mgmt/consumption/models/consumption_management_client_enums.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class Datagrain(Enum):
daily_grain = "daily"
monthly_grain = "monthly"
| 30.421053 | 76 | 0.550173 |
from enum import Enum
class Datagrain(Enum):
daily_grain = "daily"
monthly_grain = "monthly"
| true | true |
f725747bf99d90df726266e21ecdd3c5dc730fc5 | 16,227 | py | Python | pyfarm/models/core/mixins.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | pyfarm/models/core/mixins.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | pyfarm/models/core/mixins.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | # No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mixin Classes
=============
Module containing mixins which can be used by multiple models.
"""
from datetime import datetime
from collections import namedtuple
try:
from httplib import INTERNAL_SERVER_ERROR
except ImportError:
from http.client import INTERNAL_SERVER_ERROR
from sqlalchemy.orm import validates, class_mapper
from pyfarm.core.enums import _WorkState, Values, PY2
from pyfarm.core.logger import getLogger
from pyfarm.models.core.types import IPAddress
from pyfarm.master.config import config
logger = getLogger("models.mixin")
# stores information about a model's columns
# and relationships
ModelTypes = namedtuple(
"ModelTypes",
("primary_keys", "autoincrementing", "columns", "required",
"relationships", "mappings"))
class ValidatePriorityMixin(object):
"""
Mixin that adds a `state` column and uses a class
level `STATE_ENUM` attribute to assist in validation.
"""
MIN_PRIORITY = config.get("queue_min_priority")
MAX_PRIORITY = config.get("queue_max_priority")
if MAX_PRIORITY <= MIN_PRIORITY:
raise AssertionError(
"`queue_min_priority` must be <= `queue_max_priority`")
@validates("priority")
def validate_priority(self, key, value):
"""ensures the value provided to priority is valid"""
if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:
return value
err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)
raise ValueError(
"%s must be between %s and %s, got %s instead" % err_args)
@validates("attempts")
def validate_attempts(self, key, value):
"""ensures the number of attempts provided is valid"""
if value is None or value >= 0:
return value
raise ValueError("%s cannot be less than zero" % key)
class ValidateWorkStateMixin(object):
STATE_ENUM = NotImplemented
def validate_state(self, key, value):
"""Ensures that ``value`` is a member of ``STATE_ENUM``"""
assert self.STATE_ENUM is not NotImplemented
if value not in self.STATE_ENUM:
raise ValueError("`%s` is not a valid state" % value)
return value
@validates("state")
def validate_state_column(self, key, value):
"""validates the state column"""
return self.validate_state(key, value)
class WorkStateChangedMixin(object):
"""
Mixin which adds a static method to be used when the model
state changes
"""
@staticmethod
def state_changed(target, new_value, old_value, initiator):
"""update the datetime objects depending on the new value"""
if (new_value == _WorkState.RUNNING and
(old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or
target.time_started == None)):
target.time_started = datetime.utcnow()
target.time_finished = None
elif new_value in (_WorkState.DONE, _WorkState.FAILED):
target.time_finished = datetime.utcnow()
class UtilityMixins(object):
"""
Mixins which can be used to produce dictionaries
of existing data.
:const dict DICT_CONVERT_COLUMN:
A dictionary containing key value pairs of attribute names
and a function to retrieve the attribute. The function should
take a single input and return the value itself. Optionally,
you can also use the ``NotImplemented`` object to exclude
some columns from the results.
"""
DICT_CONVERT_COLUMN = {}
def _to_dict_column(self, name):
"""
Default method used by :meth:`.to_dict` to convert a column to
a standard value.
"""
value = getattr(self, name)
if isinstance(value, Values):
return value.str
elif isinstance(value, IPAddress):
return str(value)
else:
return value
def _to_dict_relationship(self, name):
"""
Default method used by :meth:`.to_dict` to convert a relationship
to a standard value. In the event this method does not know
how to unpack a relationship it will raise a ``NotImplementedError``
"""
relation = getattr(self.__class__, name)
relation_object = getattr(self, name)
if relation_object is None:
return
if relation.property.uselist:
out = []
for relationship in relation_object:
if name == "tags":
out.append(relationship.tag)
elif name == "projects":
out.append(relationship.name)
elif name == "software":
out.append(relationship.name)
elif name == "versions":
out.append({"id": relationship.id,
"version": relationship.version,
"rank": relationship.rank})
elif name == "software_versions":
out.append({"id": relationship.id,
"software": relationship.software.software,
"version": relationship.version,
"rank": relationship.rank})
elif name in ("jobs", "agents"):
out.append(relationship.id)
elif name == "software_requirements":
out.append({"software_id": relationship.software_id,
"software": relationship.software.software,
"min_version_id": relationship.min_version_id,
"min_version":
(relationship.min_version.version
if relationship.min_version else None),
"max_version_id": relationship.max_version_id,
"max_version":
(relationship.max_version.version
if relationship.max_version else None)})
elif name in ("tasks", "tasks_queued", "tasks_done",
"tasks_failed"):
out.append({"id": relationship.id,
"frame": relationship.frame,
"state": str(relationship.state)})
elif name == "notified_users":
out.append({"id": relationship.user_id,
"username": relationship.user.username,
"email": relationship.user.email,
"on_success": relationship.on_success,
"on_failure": relationship.on_failure,
"on_deletion": relationship.on_deletion})
elif name == "parents":
out.append({"id": relationship.id,
"title": relationship.title})
elif name == "children":
out.append({"id": relationship.id,
"title": relationship.title})
elif name == "tag_requirements":
out.append({"tag": relationship.tag.tag,
"negate": relationship.negate})
elif name == "gpus":
out.append({"fullname": relationship.fullname})
elif name == "disks":
out.append({"mountpoint": relationship.mountpoint,
"size": relationship.size,
"free": relationship.free})
else:
raise NotImplementedError(
"don't know how to unpack relationships for `%s`" % name)
else:
if name == "software":
out = {"software": relation_object.software,
"id": relation_object.id}
elif name == "jobtype_version":
out = {"version": relation_object.version,
"jobtype": relation_object.jobtype.name}
elif name in ("min_version", "max_version"):
out = {"id": relation_object.id,
"version": relation_object.version}
elif name == "job":
out = {"id": relation_object.id,
"title": relation_object.title}
elif name == "agent":
out = {"id": relation_object.id,
"hostname": relation_object.hostname,
"remote_ip": str(relation_object.remote_ip),
"port": relation_object.port}
elif name == "parent":
out = {"id": relation_object.id,
"name": relation_object.name,
"priority": relation_object.priority,
"weight": relation_object.weight,
"maximum_agents": relation_object.maximum_agents,
"minimum_agents": relation_object.minimum_agents}
elif name == "user":
out = relation_object.username
elif name == "main_jobtype":
out = relation_object.name
else:
raise NotImplementedError(
"don't know how to unpack relationships for `%s`" % name)
return out
def to_dict(self, unpack_relationships=True):
"""
Produce a dictionary of existing data in the table
:type unpack_relationships: list, tuple, set, bool
:param unpack_relationships:
If ``True`` then unpack all relationships. If
``unpack_relationships`` is an iterable such as a list or
tuple object then only unpack those relationships.
"""
if not isinstance(self.DICT_CONVERT_COLUMN, dict):
raise TypeError(
"expected %s.DICT_CONVERT_COLUMN to "
"be a dictionary" % self.__class__.__name__)
results = {}
types = self.types()
# first convert all the non-relationship columns
for name in types.columns:
converter = self.DICT_CONVERT_COLUMN.get(
name, self._to_dict_column)
if converter is NotImplemented:
continue
elif not callable(converter):
raise TypeError(
"converter function for %s was not callable" % name)
else:
results[name] = converter(name)
# unpack all relationships
if unpack_relationships is True:
relationships = types.relationships
# unpack the intersection of the requested relationships
# and the real relationships
elif isinstance(unpack_relationships, (list, set, tuple)):
relationships = set(unpack_relationships) & types.relationships
else:
relationships = set()
for name in relationships:
converter = self.DICT_CONVERT_COLUMN.get(
name, self._to_dict_relationship)
if converter is NotImplemented:
continue
elif not callable(converter):
raise TypeError(
"converter function for %s was not callable" % name)
else:
results[name] = converter(name)
return results
@classmethod
def to_schema(cls):
"""
Produce a dictionary which represents the
table's schema in a basic format
"""
result = {}
for name in cls.types().columns:
column = cls.__table__.c[name]
try:
column.type.python_type
except NotImplementedError:
result[name] = column.type.__class__.__name__
else:
result[name] = str(column.type)
return result
@classmethod
def types(cls):
"""
A classmethod that constructs a ``namedtuple`` object with four
attributes:
* primary_keys - set of all primary key(s) names
* autoincrementing - set of all columns which have autoincrement set
* columns - set of all column names
* required - set of all required columns (non-nullable wo/defaults)
* relationships - not columns themselves but do store relationships
* mappings - contains a dictionary with each field mapping to a
Python type
"""
mapper = class_mapper(cls)
primary_keys = set()
autoincrementing = set()
columns = set()
required = set()
relationships = set(
name for name, column in mapper.relationships.items())
# TODO: it's possible though unlikely, based on our current tables,
# that a relationship this could be some other than a list
type_mapping = dict((name, list) for name in relationships)
# create sets for all true columns, primary keys,
# and required columns
for name, column in mapper.c.items():
columns.add(name)
if column.primary_key:
primary_keys.add(name)
if column.autoincrement:
autoincrementing.add(name)
if column.primary_key and not column.autoincrement:
required.add(name)
if not column.nullable and column.default is None:
required.add(name)
# get the Python type(s)
try:
python_types = column.type.python_type
except NotImplementedError: # custom type object
python_types = column.type.json_types
# if we're using Python 2.x be sure that we include
# a couple of extra types that could potentially
# come in with a request
if PY2 and python_types is str:
# pylint: disable=undefined-variable
python_types = (python_types, unicode)
elif PY2 and python_types is int:
# pylint: disable=undefined-variable
python_types = (python_types, long)
type_mapping[name] = python_types
return ModelTypes(
primary_keys=primary_keys,
autoincrementing=autoincrementing,
columns=columns,
required=required,
relationships=relationships,
mappings=type_mapping)
class ReprMixin(object):
"""
Mixin which allows model classes to to convert columns into a more
easily read object format.
:cvar tuple REPR_COLUMNS:
the columns to convert
:cvar dict REPR_CONVERT_COLUMN:
optional dictionary containing columns names and functions
for converting to a more readable string format
"""
REPR_COLUMNS = NotImplemented
REPR_CONVERT_COLUMN = {}
def __repr__(self):
if self.REPR_COLUMNS is NotImplemented:
return super(ReprMixin, self).__repr__()
column_data = []
for name in self.REPR_COLUMNS:
convert = self.REPR_CONVERT_COLUMN.get(name, repr)
try:
column_data.append(
"%s=%s" % (name, convert(getattr(self, name))))
except AttributeError:
logger.warning("%s has no such column %s" % (
self.__class__.__name__, repr(name)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(column_data))
| 37.21789 | 81 | 0.570038 |
from datetime import datetime
from collections import namedtuple
try:
from httplib import INTERNAL_SERVER_ERROR
except ImportError:
from http.client import INTERNAL_SERVER_ERROR
from sqlalchemy.orm import validates, class_mapper
from pyfarm.core.enums import _WorkState, Values, PY2
from pyfarm.core.logger import getLogger
from pyfarm.models.core.types import IPAddress
from pyfarm.master.config import config
logger = getLogger("models.mixin")
# and relationships
ModelTypes = namedtuple(
"ModelTypes",
("primary_keys", "autoincrementing", "columns", "required",
"relationships", "mappings"))
class ValidatePriorityMixin(object):
MIN_PRIORITY = config.get("queue_min_priority")
MAX_PRIORITY = config.get("queue_max_priority")
if MAX_PRIORITY <= MIN_PRIORITY:
raise AssertionError(
"`queue_min_priority` must be <= `queue_max_priority`")
@validates("priority")
def validate_priority(self, key, value):
if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:
return value
err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)
raise ValueError(
"%s must be between %s and %s, got %s instead" % err_args)
@validates("attempts")
def validate_attempts(self, key, value):
if value is None or value >= 0:
return value
raise ValueError("%s cannot be less than zero" % key)
class ValidateWorkStateMixin(object):
STATE_ENUM = NotImplemented
def validate_state(self, key, value):
assert self.STATE_ENUM is not NotImplemented
if value not in self.STATE_ENUM:
raise ValueError("`%s` is not a valid state" % value)
return value
@validates("state")
def validate_state_column(self, key, value):
return self.validate_state(key, value)
class WorkStateChangedMixin(object):
@staticmethod
def state_changed(target, new_value, old_value, initiator):
if (new_value == _WorkState.RUNNING and
(old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or
target.time_started == None)):
target.time_started = datetime.utcnow()
target.time_finished = None
elif new_value in (_WorkState.DONE, _WorkState.FAILED):
target.time_finished = datetime.utcnow()
class UtilityMixins(object):
DICT_CONVERT_COLUMN = {}
def _to_dict_column(self, name):
value = getattr(self, name)
if isinstance(value, Values):
return value.str
elif isinstance(value, IPAddress):
return str(value)
else:
return value
def _to_dict_relationship(self, name):
relation = getattr(self.__class__, name)
relation_object = getattr(self, name)
if relation_object is None:
return
if relation.property.uselist:
out = []
for relationship in relation_object:
if name == "tags":
out.append(relationship.tag)
elif name == "projects":
out.append(relationship.name)
elif name == "software":
out.append(relationship.name)
elif name == "versions":
out.append({"id": relationship.id,
"version": relationship.version,
"rank": relationship.rank})
elif name == "software_versions":
out.append({"id": relationship.id,
"software": relationship.software.software,
"version": relationship.version,
"rank": relationship.rank})
elif name in ("jobs", "agents"):
out.append(relationship.id)
elif name == "software_requirements":
out.append({"software_id": relationship.software_id,
"software": relationship.software.software,
"min_version_id": relationship.min_version_id,
"min_version":
(relationship.min_version.version
if relationship.min_version else None),
"max_version_id": relationship.max_version_id,
"max_version":
(relationship.max_version.version
if relationship.max_version else None)})
elif name in ("tasks", "tasks_queued", "tasks_done",
"tasks_failed"):
out.append({"id": relationship.id,
"frame": relationship.frame,
"state": str(relationship.state)})
elif name == "notified_users":
out.append({"id": relationship.user_id,
"username": relationship.user.username,
"email": relationship.user.email,
"on_success": relationship.on_success,
"on_failure": relationship.on_failure,
"on_deletion": relationship.on_deletion})
elif name == "parents":
out.append({"id": relationship.id,
"title": relationship.title})
elif name == "children":
out.append({"id": relationship.id,
"title": relationship.title})
elif name == "tag_requirements":
out.append({"tag": relationship.tag.tag,
"negate": relationship.negate})
elif name == "gpus":
out.append({"fullname": relationship.fullname})
elif name == "disks":
out.append({"mountpoint": relationship.mountpoint,
"size": relationship.size,
"free": relationship.free})
else:
raise NotImplementedError(
"don't know how to unpack relationships for `%s`" % name)
else:
if name == "software":
out = {"software": relation_object.software,
"id": relation_object.id}
elif name == "jobtype_version":
out = {"version": relation_object.version,
"jobtype": relation_object.jobtype.name}
elif name in ("min_version", "max_version"):
out = {"id": relation_object.id,
"version": relation_object.version}
elif name == "job":
out = {"id": relation_object.id,
"title": relation_object.title}
elif name == "agent":
out = {"id": relation_object.id,
"hostname": relation_object.hostname,
"remote_ip": str(relation_object.remote_ip),
"port": relation_object.port}
elif name == "parent":
out = {"id": relation_object.id,
"name": relation_object.name,
"priority": relation_object.priority,
"weight": relation_object.weight,
"maximum_agents": relation_object.maximum_agents,
"minimum_agents": relation_object.minimum_agents}
elif name == "user":
out = relation_object.username
elif name == "main_jobtype":
out = relation_object.name
else:
raise NotImplementedError(
"don't know how to unpack relationships for `%s`" % name)
return out
def to_dict(self, unpack_relationships=True):
if not isinstance(self.DICT_CONVERT_COLUMN, dict):
raise TypeError(
"expected %s.DICT_CONVERT_COLUMN to "
"be a dictionary" % self.__class__.__name__)
results = {}
types = self.types()
# first convert all the non-relationship columns
for name in types.columns:
converter = self.DICT_CONVERT_COLUMN.get(
name, self._to_dict_column)
if converter is NotImplemented:
continue
elif not callable(converter):
raise TypeError(
"converter function for %s was not callable" % name)
else:
results[name] = converter(name)
# unpack all relationships
if unpack_relationships is True:
relationships = types.relationships
# unpack the intersection of the requested relationships
# and the real relationships
elif isinstance(unpack_relationships, (list, set, tuple)):
relationships = set(unpack_relationships) & types.relationships
else:
relationships = set()
for name in relationships:
converter = self.DICT_CONVERT_COLUMN.get(
name, self._to_dict_relationship)
if converter is NotImplemented:
continue
elif not callable(converter):
raise TypeError(
"converter function for %s was not callable" % name)
else:
results[name] = converter(name)
return results
@classmethod
def to_schema(cls):
result = {}
for name in cls.types().columns:
column = cls.__table__.c[name]
try:
column.type.python_type
except NotImplementedError:
result[name] = column.type.__class__.__name__
else:
result[name] = str(column.type)
return result
@classmethod
def types(cls):
mapper = class_mapper(cls)
primary_keys = set()
autoincrementing = set()
columns = set()
required = set()
relationships = set(
name for name, column in mapper.relationships.items())
# TODO: it's possible though unlikely, based on our current tables,
type_mapping = dict((name, list) for name in relationships)
for name, column in mapper.c.items():
columns.add(name)
if column.primary_key:
primary_keys.add(name)
if column.autoincrement:
autoincrementing.add(name)
if column.primary_key and not column.autoincrement:
required.add(name)
if not column.nullable and column.default is None:
required.add(name)
try:
python_types = column.type.python_type
except NotImplementedError:
python_types = column.type.json_types
# a couple of extra types that could potentially
# come in with a request
if PY2 and python_types is str:
# pylint: disable=undefined-variable
python_types = (python_types, unicode)
elif PY2 and python_types is int:
# pylint: disable=undefined-variable
python_types = (python_types, long)
type_mapping[name] = python_types
return ModelTypes(
primary_keys=primary_keys,
autoincrementing=autoincrementing,
columns=columns,
required=required,
relationships=relationships,
mappings=type_mapping)
class ReprMixin(object):
REPR_COLUMNS = NotImplemented
REPR_CONVERT_COLUMN = {}
def __repr__(self):
if self.REPR_COLUMNS is NotImplemented:
return super(ReprMixin, self).__repr__()
column_data = []
for name in self.REPR_COLUMNS:
convert = self.REPR_CONVERT_COLUMN.get(name, repr)
try:
column_data.append(
"%s=%s" % (name, convert(getattr(self, name))))
except AttributeError:
logger.warning("%s has no such column %s" % (
self.__class__.__name__, repr(name)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(column_data))
| true | true |
f7257517d39b2f8675741bc3a48ae828e67d5468 | 495 | py | Python | src/commercetools/services/abstract.py | labd/commercetools-python-sdk | d8ec285f08d56ede2e4cad45c74833f5b609ab5c | [
"MIT"
] | 15 | 2018-11-02T14:35:52.000Z | 2022-03-16T07:51:44.000Z | src/commercetools/services/abstract.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 84 | 2018-11-02T12:50:32.000Z | 2022-03-22T01:25:54.000Z | src/commercetools/services/abstract.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 13 | 2019-01-03T09:16:50.000Z | 2022-02-15T18:37:19.000Z | import typing
from marshmallow.base import SchemaABC
if typing.TYPE_CHECKING:
from commercetools.client import Client
class AbstractService:
def __init__(self, client: "Client") -> None:
self._client = client
self._schemas: typing.Dict[str, SchemaABC] = {}
def _serialize_params(self, params, schema) -> typing.Dict[str, str]:
if schema not in self._schemas:
self._schemas[schema] = schema()
return self._schemas[schema].dump(params)
| 27.5 | 73 | 0.684848 | import typing
from marshmallow.base import SchemaABC
if typing.TYPE_CHECKING:
from commercetools.client import Client
class AbstractService:
def __init__(self, client: "Client") -> None:
self._client = client
self._schemas: typing.Dict[str, SchemaABC] = {}
def _serialize_params(self, params, schema) -> typing.Dict[str, str]:
if schema not in self._schemas:
self._schemas[schema] = schema()
return self._schemas[schema].dump(params)
| true | true |
f72576ba10ecc45859e387a54131b2e173076e5e | 13,744 | py | Python | pinpayments/tests/models.py | branchup/django-pinpayments | e342970c6309facf35b804de9994d326abaa094f | [
"Unlicense"
] | null | null | null | pinpayments/tests/models.py | branchup/django-pinpayments | e342970c6309facf35b804de9994d326abaa094f | [
"Unlicense"
] | null | null | null | pinpayments/tests/models.py | branchup/django-pinpayments | e342970c6309facf35b804de9994d326abaa094f | [
"Unlicense"
] | null | null | null | """ Ensure that the models work as intended """
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from pinpayments.models import (
ConfigError,
CustomerToken,
PinError,
PinTransaction
)
from requests import Response
ENV_MISSING_SECRET = {
'test': {
'key': 'key1',
'host': 'test-api.pin.net.au',
},
}
ENV_MISSING_HOST = {
'test': {
'key': 'key1',
'secret': 'secret1',
},
}
class FakeResponse(Response):
def __init__(self, status_code, content):
super(FakeResponse, self).__init__()
self.status_code = status_code
self._content = content
class CustomerTokenTests(TestCase):
# Need to override the setting so we can delete it, not sure why.
@override_settings(PIN_DEFAULT_ENVIRONMENT=None)
def test_default_environment(self):
"""
Unset PIN_DEFAULT_ENVIRONMENT to test that the environment defaults
to 'test'.
"""
del settings.PIN_DEFAULT_ENVIRONMENT
token = CustomerToken()
token.user = User.objects.create()
token.environment = None
token.save()
self.assertEqual(token.environment, 'test')
class CreateFromCardTokenTests(TestCase):
""" Test the creation of customer tokens from card tokens """
def setUp(self):
""" Common setup for methods """
super(CreateFromCardTokenTests, self).setUp()
self.user = User.objects.create()
self.response_data = json.dumps({
'response': {
'token': '1234',
'email': 'test@example.com',
'created_at': '2012-06-22T06:27:33Z',
'card': {
'token': '54321',
'display_number': 'XXXX-XXXX-XXXX-0000',
'scheme': 'master',
'expiry_month': 6,
'expiry_year': 2017,
'name': 'Roland Robot',
'address_line1': '42 Sevenoaks St',
'address_line2': None,
'address_city': 'Lathlain',
'address_postcode': '6454',
'address_state': 'WA',
'address_country': 'Australia',
'primary': None,
}
}
})
self.response_error = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.'
})
@patch('requests.post')
def test_default_environment(self, mock_request):
""" return a default environment """
mock_request.return_value = FakeResponse(200, self.response_data)
token = CustomerToken.create_from_card_token('1234', self.user)
self.assertEqual(token.environment, 'test')
@override_settings(PIN_ENVIRONMENTS={})
@patch('requests.post')
def test_valid_environment(self, mock_request):
""" Check errors are raised with no environments """
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_SECRET)
@patch('requests.post')
def test_secret_set(self, mock_request):
""" Check errors are raised when the secret is not set """
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_HOST)
@patch('requests.post')
def test_host_set(self, mock_request):
""" Check errors are raised when the host is not set """
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_not_json(self, mock_request):
""" Validate non-json response """
mock_request.return_value = FakeResponse(200, '')
with self.assertRaises(PinError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_error(self, mock_request):
""" Validate generic error response """
mock_request.return_value = FakeResponse(200, self.response_error)
with self.assertRaises(PinError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_success(self, mock_request):
""" Validate successful response """
mock_request.return_value = FakeResponse(200, self.response_data)
customer = CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
self.assertIsInstance(customer, CustomerToken)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.token, '1234')
self.assertEqual(customer.environment, 'test')
self.assertEqual(customer.card_number, 'XXXX-XXXX-XXXX-0000')
self.assertEqual(customer.card_type, 'master')
class PinTransactionTests(TestCase):
""" Transaction construction/init related tests """
def setUp(self):
""" Common setup for methods """
super(PinTransactionTests, self).setUp()
self.transaction = PinTransaction()
self.transaction.card_token = '12345'
self.transaction.ip_address = '127.0.0.1'
self.transaction.amount = 500
self.transaction.currency = 'AUD'
self.transaction.email_address = 'test@example.com'
self.transaction.environment = 'test'
# Need to override the setting so we can delete it, not sure why.
@override_settings(PIN_DEFAULT_ENVIRONMENT=None)
def test_save_defaults(self):
"""
Unset PIN_DEFAULT_ENVIRONMENT to test that the environment defaults
to 'test'.
"""
del settings.PIN_DEFAULT_ENVIRONMENT
self.transaction.environment = None
self.transaction.save()
self.assertEqual(self.transaction.environment, 'test')
self.assertTrue(self.transaction.date)
def test_save_notokens(self):
"""
Check that an error is thrown if neither card nor customer token
are provided to the transaction
"""
self.transaction.card_token = None
self.transaction.customer_token = None
self.assertRaises(PinError, self.transaction.save)
def test_valid_environment(self):
"""
Check that errors are thrown when a fake environment is requested
"""
self.transaction.environment = 'this should not exist'
self.assertRaises(PinError, self.transaction.save)
class ProcessTransactionsTests(TestCase):
""" Transaction processing related tests """
def setUp(self):
""" Common setup for methods """
super(ProcessTransactionsTests, self).setUp()
self.transaction = PinTransaction()
self.transaction.card_token = '12345'
self.transaction.ip_address = '127.0.0.1'
self.transaction.amount = 500
self.transaction.currency = 'AUD'
self.transaction.email_address = 'test@example.com'
self.transaction.environment = 'test'
self.transaction.save()
self.response_data = json.dumps({
'response': {
'token': '12345',
'success': True,
'amount': 500,
'total_fees': 500,
'currency': 'AUD',
'description': 'test charge',
'email': 'test@example.com',
'ip_address': '127.0.0.1',
'created_at': '2012-06-20T03:10:49Z',
'status_message': 'Success!',
'error_message': None,
'card': {
'token': 'card_nytGw7koRg23EEp9NTmz9w',
'display_number': 'XXXX-XXXX-XXXX-0000',
'scheme': 'master',
'expiry_month': 6,
'expiry_year': 2017,
'name': 'Roland Robot',
'address_line1': '42 Sevenoaks St',
'address_line2': None,
'address_city': 'Lathlain',
'address_postcode': '6454',
'address_state': 'WA',
'address_country': 'Australia',
'primary': None,
},
'transfer': None
}
})
self.response_error = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.',
# Should there really be a charge token?
'charge_token': '1234',
'messages': [{
'code': 'description_invalid',
'message': 'Description can\'t be blank',
'param': 'description'
}]
})
self.response_error_no_messages = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.',
# Should there really be a charge token?
'charge_token': '1234'
})
@patch('requests.post')
def test_only_process_once(self, mock_request):
""" Check that transactions are processed exactly once """
mock_request.return_value = FakeResponse(200, self.response_data)
# Shouldn't be marked as processed before process_transaction is called
# for the first time.
self.assertFalse(self.transaction.processed)
# Should be marked after the first call.
result = self.transaction.process_transaction()
self.assertTrue(self.transaction.processed)
# Shouldn't process anything the second time
result = self.transaction.process_transaction()
self.assertIsNone(result)
@override_settings(PIN_ENVIRONMENTS={})
@patch('requests.post')
def test_valid_environment(self, mock_request):
""" Check that an error is thrown with no environment """
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(PinError, self.transaction.process_transaction)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_SECRET)
@patch('requests.post')
def test_secret_set(self, mock_request):
""" Check that an error is thrown with no secret """
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(ConfigError, self.transaction.process_transaction)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_HOST)
@patch('requests.post')
def test_host_set(self, mock_request):
""" Check that an error is thrown with no host """
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(ConfigError, self.transaction.process_transaction)
@patch('requests.post')
def test_response_not_json(self, mock_request):
""" Check that failure is returned for non-JSON responses """
mock_request.return_value = FakeResponse(200, '')
response = self.transaction.process_transaction()
self.assertEqual(response, 'Failure.')
@patch('requests.post')
def test_response_badparam(self, mock_request):
""" Check that a specific error is thrown for invalid parameters """
mock_request.return_value = FakeResponse(200, self.response_error)
response = self.transaction.process_transaction()
self.assertEqual(response, 'Failure: Description can\'t be blank')
@patch('requests.post')
def test_response_noparam(self, mock_request):
""" Check that a specific error is thrown for missing parameters """
mock_request.return_value = FakeResponse(
200, self.response_error_no_messages
)
response = self.transaction.process_transaction()
self.assertEqual(
response,
'Failure: One or more parameters were missing or invalid.'
)
@patch('requests.post')
def test_response_success(self, mock_request):
""" Check that the success response is correctly processed """
mock_request.return_value = FakeResponse(200, self.response_data)
response = self.transaction.process_transaction()
self.assertEqual(response, 'Success!')
self.assertTrue(self.transaction.succeeded)
self.assertEqual(self.transaction.transaction_token, '12345')
self.assertEqual(self.transaction.fees, 5.0)
self.assertEqual(self.transaction.pin_response, 'Success!')
self.assertEqual(self.transaction.card_address1, '42 Sevenoaks St')
self.assertIsNone(self.transaction.card_address2)
self.assertEqual(self.transaction.card_city, 'Lathlain')
self.assertEqual(self.transaction.card_state, 'WA')
self.assertEqual(self.transaction.card_postcode, '6454')
self.assertEqual(self.transaction.card_country, 'Australia')
self.assertEqual(self.transaction.card_number, 'XXXX-XXXX-XXXX-0000')
self.assertEqual(self.transaction.card_type, 'master')
| 39.608069 | 79 | 0.621799 | import json
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from pinpayments.models import (
ConfigError,
CustomerToken,
PinError,
PinTransaction
)
from requests import Response
ENV_MISSING_SECRET = {
'test': {
'key': 'key1',
'host': 'test-api.pin.net.au',
},
}
ENV_MISSING_HOST = {
'test': {
'key': 'key1',
'secret': 'secret1',
},
}
class FakeResponse(Response):
def __init__(self, status_code, content):
super(FakeResponse, self).__init__()
self.status_code = status_code
self._content = content
class CustomerTokenTests(TestCase):
@override_settings(PIN_DEFAULT_ENVIRONMENT=None)
def test_default_environment(self):
del settings.PIN_DEFAULT_ENVIRONMENT
token = CustomerToken()
token.user = User.objects.create()
token.environment = None
token.save()
self.assertEqual(token.environment, 'test')
class CreateFromCardTokenTests(TestCase):
def setUp(self):
super(CreateFromCardTokenTests, self).setUp()
self.user = User.objects.create()
self.response_data = json.dumps({
'response': {
'token': '1234',
'email': 'test@example.com',
'created_at': '2012-06-22T06:27:33Z',
'card': {
'token': '54321',
'display_number': 'XXXX-XXXX-XXXX-0000',
'scheme': 'master',
'expiry_month': 6,
'expiry_year': 2017,
'name': 'Roland Robot',
'address_line1': '42 Sevenoaks St',
'address_line2': None,
'address_city': 'Lathlain',
'address_postcode': '6454',
'address_state': 'WA',
'address_country': 'Australia',
'primary': None,
}
}
})
self.response_error = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.'
})
@patch('requests.post')
def test_default_environment(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
token = CustomerToken.create_from_card_token('1234', self.user)
self.assertEqual(token.environment, 'test')
@override_settings(PIN_ENVIRONMENTS={})
@patch('requests.post')
def test_valid_environment(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_SECRET)
@patch('requests.post')
def test_secret_set(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_HOST)
@patch('requests.post')
def test_host_set(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
with self.assertRaises(ConfigError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_not_json(self, mock_request):
mock_request.return_value = FakeResponse(200, '')
with self.assertRaises(PinError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_error(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_error)
with self.assertRaises(PinError):
CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
@patch('requests.post')
def test_response_success(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
customer = CustomerToken.create_from_card_token(
'1234', self.user, environment='test'
)
self.assertIsInstance(customer, CustomerToken)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.token, '1234')
self.assertEqual(customer.environment, 'test')
self.assertEqual(customer.card_number, 'XXXX-XXXX-XXXX-0000')
self.assertEqual(customer.card_type, 'master')
class PinTransactionTests(TestCase):
def setUp(self):
super(PinTransactionTests, self).setUp()
self.transaction = PinTransaction()
self.transaction.card_token = '12345'
self.transaction.ip_address = '127.0.0.1'
self.transaction.amount = 500
self.transaction.currency = 'AUD'
self.transaction.email_address = 'test@example.com'
self.transaction.environment = 'test'
@override_settings(PIN_DEFAULT_ENVIRONMENT=None)
def test_save_defaults(self):
del settings.PIN_DEFAULT_ENVIRONMENT
self.transaction.environment = None
self.transaction.save()
self.assertEqual(self.transaction.environment, 'test')
self.assertTrue(self.transaction.date)
def test_save_notokens(self):
self.transaction.card_token = None
self.transaction.customer_token = None
self.assertRaises(PinError, self.transaction.save)
def test_valid_environment(self):
self.transaction.environment = 'this should not exist'
self.assertRaises(PinError, self.transaction.save)
class ProcessTransactionsTests(TestCase):
def setUp(self):
super(ProcessTransactionsTests, self).setUp()
self.transaction = PinTransaction()
self.transaction.card_token = '12345'
self.transaction.ip_address = '127.0.0.1'
self.transaction.amount = 500
self.transaction.currency = 'AUD'
self.transaction.email_address = 'test@example.com'
self.transaction.environment = 'test'
self.transaction.save()
self.response_data = json.dumps({
'response': {
'token': '12345',
'success': True,
'amount': 500,
'total_fees': 500,
'currency': 'AUD',
'description': 'test charge',
'email': 'test@example.com',
'ip_address': '127.0.0.1',
'created_at': '2012-06-20T03:10:49Z',
'status_message': 'Success!',
'error_message': None,
'card': {
'token': 'card_nytGw7koRg23EEp9NTmz9w',
'display_number': 'XXXX-XXXX-XXXX-0000',
'scheme': 'master',
'expiry_month': 6,
'expiry_year': 2017,
'name': 'Roland Robot',
'address_line1': '42 Sevenoaks St',
'address_line2': None,
'address_city': 'Lathlain',
'address_postcode': '6454',
'address_state': 'WA',
'address_country': 'Australia',
'primary': None,
},
'transfer': None
}
})
self.response_error = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.',
'charge_token': '1234',
'messages': [{
'code': 'description_invalid',
'message': 'Description can\'t be blank',
'param': 'description'
}]
})
self.response_error_no_messages = json.dumps({
'error': 'invalid_resource',
'error_description':
'One or more parameters were missing or invalid.',
# Should there really be a charge token?
'charge_token': '1234'
})
@patch('requests.post')
def test_only_process_once(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
# Shouldn't be marked as processed before process_transaction is called
self.assertFalse(self.transaction.processed)
result = self.transaction.process_transaction()
self.assertTrue(self.transaction.processed)
result = self.transaction.process_transaction()
self.assertIsNone(result)
@override_settings(PIN_ENVIRONMENTS={})
@patch('requests.post')
def test_valid_environment(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(PinError, self.transaction.process_transaction)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_SECRET)
@patch('requests.post')
def test_secret_set(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(ConfigError, self.transaction.process_transaction)
@override_settings(PIN_ENVIRONMENTS=ENV_MISSING_HOST)
@patch('requests.post')
def test_host_set(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
self.assertRaises(ConfigError, self.transaction.process_transaction)
@patch('requests.post')
def test_response_not_json(self, mock_request):
mock_request.return_value = FakeResponse(200, '')
response = self.transaction.process_transaction()
self.assertEqual(response, 'Failure.')
@patch('requests.post')
def test_response_badparam(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_error)
response = self.transaction.process_transaction()
self.assertEqual(response, 'Failure: Description can\'t be blank')
@patch('requests.post')
def test_response_noparam(self, mock_request):
mock_request.return_value = FakeResponse(
200, self.response_error_no_messages
)
response = self.transaction.process_transaction()
self.assertEqual(
response,
'Failure: One or more parameters were missing or invalid.'
)
@patch('requests.post')
def test_response_success(self, mock_request):
mock_request.return_value = FakeResponse(200, self.response_data)
response = self.transaction.process_transaction()
self.assertEqual(response, 'Success!')
self.assertTrue(self.transaction.succeeded)
self.assertEqual(self.transaction.transaction_token, '12345')
self.assertEqual(self.transaction.fees, 5.0)
self.assertEqual(self.transaction.pin_response, 'Success!')
self.assertEqual(self.transaction.card_address1, '42 Sevenoaks St')
self.assertIsNone(self.transaction.card_address2)
self.assertEqual(self.transaction.card_city, 'Lathlain')
self.assertEqual(self.transaction.card_state, 'WA')
self.assertEqual(self.transaction.card_postcode, '6454')
self.assertEqual(self.transaction.card_country, 'Australia')
self.assertEqual(self.transaction.card_number, 'XXXX-XXXX-XXXX-0000')
self.assertEqual(self.transaction.card_type, 'master')
| true | true |
f72576c2d1acd4d4c339871d110e259e22d8e73b | 13,114 | py | Python | sdk/python/pulumi_azure_native/network/v20200501/express_route_circuit_authorization.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200501/express_route_circuit_authorization.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200501/express_route_circuit_authorization.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['ExpressRouteCircuitAuthorization']
class ExpressRouteCircuitAuthorization(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
authorization_name: Optional[pulumi.Input[str]] = None,
authorization_use_status: Optional[pulumi.Input[Union[str, 'AuthorizationUseStatus']]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Authorization in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] authorization_name: The name of the authorization.
:param pulumi.Input[Union[str, 'AuthorizationUseStatus']] authorization_use_status: The authorization use status.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
__props__['authorization_name'] = authorization_name
__props__['authorization_use_status'] = authorization_use_status
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['id'] = id
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20171001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitAuthorization")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitAuthorization, __self__).__init__(
'azure-native:network/v20200501:ExpressRouteCircuitAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitAuthorization':
"""
Get an existing ExpressRouteCircuitAuthorization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authorization_key"] = None
__props__["authorization_use_status"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["type"] = None
return ExpressRouteCircuitAuthorization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> pulumi.Output[Optional[str]]:
"""
The authorization use status.
"""
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the authorization resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 82.477987 | 6,429 | 0.753317 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['ExpressRouteCircuitAuthorization']
class ExpressRouteCircuitAuthorization(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
authorization_name: Optional[pulumi.Input[str]] = None,
authorization_use_status: Optional[pulumi.Input[Union[str, 'AuthorizationUseStatus']]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
__props__['authorization_name'] = authorization_name
__props__['authorization_use_status'] = authorization_use_status
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['id'] = id
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20171001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitAuthorization")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitAuthorization, __self__).__init__(
'azure-native:network/v20200501:ExpressRouteCircuitAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitAuthorization':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authorization_key"] = None
__props__["authorization_use_status"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["type"] = None
return ExpressRouteCircuitAuthorization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f72577237e5fb89fd2d8fbec32f4f79d18801dae | 2,616 | py | Python | lib/ohdevtools/commands/create-content-xml.py | LukiLeu/loadify | 740532324e7f94c7ad1f94f78bdce9bc2575811a | [
"MIT"
] | null | null | null | lib/ohdevtools/commands/create-content-xml.py | LukiLeu/loadify | 740532324e7f94c7ad1f94f78bdce9bc2575811a | [
"MIT"
] | null | null | null | lib/ohdevtools/commands/create-content-xml.py | LukiLeu/loadify | 740532324e7f94c7ad1f94f78bdce9bc2575811a | [
"MIT"
] | null | null | null | from antglob import ant_glob
import json
import sys
import os
import ntpath
from xml.sax.saxutils import escape, quoteattr
description = "Generates MSBuild fragments for embedding content."
# Hide from "go help" for now: not relevant to most projects.
command_hidden = True
usage_text = """
Creates XML to insert in a csproj file to embed content.
Usage:
create-content-xml config.json
Configuration file should be pure JSON, and contain one object
with these fields:
basedir - Directory that contains the csproj file.
files - List of file specification objects.
File specification objects have these fields:
source - Relative path from the project directory to the
tree of files to add.
target - Location to output the tree of files during a build.
patterns - List of glob patterns to select content files.
For example:
{ "basedir":"src/ChatFormsTouch",
"files":[
{ "source":"../../build/main/http/ohj",
"target":"http/ohj",
"patterns":["**/*"]
},
{ "source":"../../build/apps/OpenHome.ChatForms/http",
"target":"app/http",
"patterns":["**/*"]
},
{ "source":"../../build/main/modules",
"target":"modules",
"patterns":["**/*"]
}
]
}
""".strip()
def main():
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print usage_text
return
with open(sys.argv[1]) as f:
config = json.load(f)
basedir = os.path.abspath(config['basedir'])
os.chdir(basedir)
output_directories = set()
output_files = []
for fileset in config['files']:
os.chdir(basedir)
os.chdir(fileset['source'])
for pattern in fileset['patterns']:
files = ant_glob(pattern)
for filename in files:
frompath = ntpath.normpath(ntpath.join(fileset['source'], filename))
topath = ntpath.normpath(ntpath.join(fileset['target'], filename))
output_directories.update(topath[:index+1] for (index, ch) in enumerate(topath) if ch=='\\')
output_files.append((frompath, topath))
print " <ItemGroup>"
for dirname in sorted(output_directories):
print " <Folder Include={0} />".format(quoteattr(dirname))
print " </ItemGroup>"
print " <ItemGroup>"
for (frompath, topath) in output_files:
print " <Content Include=\"{0}\">\n <Link>{1}</Link>\n </Content>".format(escape(frompath), escape(topath))
print " </ItemGroup>"
if __name__ == "__main__":
main()
| 30.418605 | 126 | 0.60474 | from antglob import ant_glob
import json
import sys
import os
import ntpath
from xml.sax.saxutils import escape, quoteattr
description = "Generates MSBuild fragments for embedding content."
command_hidden = True
usage_text = """
Creates XML to insert in a csproj file to embed content.
Usage:
create-content-xml config.json
Configuration file should be pure JSON, and contain one object
with these fields:
basedir - Directory that contains the csproj file.
files - List of file specification objects.
File specification objects have these fields:
source - Relative path from the project directory to the
tree of files to add.
target - Location to output the tree of files during a build.
patterns - List of glob patterns to select content files.
For example:
{ "basedir":"src/ChatFormsTouch",
"files":[
{ "source":"../../build/main/http/ohj",
"target":"http/ohj",
"patterns":["**/*"]
},
{ "source":"../../build/apps/OpenHome.ChatForms/http",
"target":"app/http",
"patterns":["**/*"]
},
{ "source":"../../build/main/modules",
"target":"modules",
"patterns":["**/*"]
}
]
}
""".strip()
def main():
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print usage_text
return
with open(sys.argv[1]) as f:
config = json.load(f)
basedir = os.path.abspath(config['basedir'])
os.chdir(basedir)
output_directories = set()
output_files = []
for fileset in config['files']:
os.chdir(basedir)
os.chdir(fileset['source'])
for pattern in fileset['patterns']:
files = ant_glob(pattern)
for filename in files:
frompath = ntpath.normpath(ntpath.join(fileset['source'], filename))
topath = ntpath.normpath(ntpath.join(fileset['target'], filename))
output_directories.update(topath[:index+1] for (index, ch) in enumerate(topath) if ch=='\\')
output_files.append((frompath, topath))
print " <ItemGroup>"
for dirname in sorted(output_directories):
print " <Folder Include={0} />".format(quoteattr(dirname))
print " </ItemGroup>"
print " <ItemGroup>"
for (frompath, topath) in output_files:
print " <Content Include=\"{0}\">\n <Link>{1}</Link>\n </Content>".format(escape(frompath), escape(topath))
print " </ItemGroup>"
if __name__ == "__main__":
main()
| false | true |
f725772c43f53c9873559775e2e56aabd5ec8fda | 13,185 | py | Python | aps/loader/simu.py | LvHang/aps | 3e9c8b247e0526481970c28e8af1a6a93cc7f2cc | [
"Apache-2.0"
] | 5 | 2021-07-05T12:21:44.000Z | 2021-11-23T08:09:45.000Z | aps/loader/simu.py | LvHang/aps | 3e9c8b247e0526481970c28e8af1a6a93cc7f2cc | [
"Apache-2.0"
] | null | null | null | aps/loader/simu.py | LvHang/aps | 3e9c8b247e0526481970c28e8af1a6a93cc7f2cc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Adopt from my another project: https://github.com/funcwj/setk
See https://github.com/funcwj/setk/tree/master/doc/data_simu for command line usage
"""
import argparse
import numpy as np
from aps.loader.audio import read_audio, add_room_response
from aps.opts import StrToBoolAction
from aps.const import EPSILON
def coeff_snr(sig_pow, ref_pow, snr):
"""
For
mix = Sa + alpha*Sb
Given
SNR = 10*log10[Pa/(Pb * alpha^2)]
we got
alpha = Pa/[Pb*10^(SNR/10)]^0.5
"""
return (ref_pow / (sig_pow * 10**(snr / 10) + EPSILON))**0.5
def add_speaker(mix_nsamps,
src_spk,
src_begin,
sdr,
src_rir=None,
channel=-1,
sr=16000):
"""
Mix source speakers
"""
spk_image, spk_power = [], []
for i, spk in enumerate(src_spk):
if src_rir is None:
src = spk[None, ...] if spk.ndim == 1 else spk
spk_image.append(src)
spk_power.append(np.mean(src[0]**2))
else:
rir = src_rir[i]
if rir.ndim == 1:
rir = rir[None, ...]
if channel >= 0:
if rir.ndim == 2:
rir = rir[channel:channel + 1]
revb, p = add_room_response(spk, rir, sr=sr)
spk_image.append(revb)
spk_power.append(p)
# make mix
N, _ = spk_image[0].shape
mix = [np.zeros([N, mix_nsamps], dtype=np.float32) for _ in src_spk]
# start mixing
ref_power = spk_power[0]
for i, image in enumerate(spk_image):
dur = image.shape[-1]
beg = src_begin[i]
coeff = 1 if i == 0 else coeff_snr(spk_power[i], ref_power, sdr[i])
mix[i][..., beg:beg + dur] += coeff * image
return mix
def add_point_noise(mix_nsamps,
ref_power,
noise,
noise_begin,
snr,
noise_rir=None,
channel=-1,
repeat=False,
sr=16000):
"""
Add pointsource noises
"""
image = []
image_power = []
for i, noise in enumerate(noise):
beg = noise_begin[i]
if not repeat:
dur = min(noise.shape[-1], mix_nsamps - beg)
else:
dur = mix_nsamps - beg
# if short, then padding
if noise.shape[-1] < dur:
noise = np.pad(noise, (0, dur - noise.shape[-1]), mode="wrap")
if noise_rir is None:
src = noise[None, ...] if noise.ndim == 1 else noise
image.append(src)
image_power.append(np.mean(src[0, :dur]**2) if dur > 0 else 0)
else:
rir = noise_rir[i]
if rir.ndim == 1:
rir = rir[None, ...]
if channel >= 0:
if rir.ndim == 2:
rir = rir[channel:channel + 1]
revb, revb_power = add_room_response(noise[:dur], rir, sr=sr)
image.append(revb)
image_power.append(revb_power)
# make noise mix
N, _ = image[0].shape
mix = np.zeros([N, mix_nsamps], dtype=np.float32)
# start mixing
for i, img in enumerate(image):
beg = noise_begin[i]
coeff = coeff_snr(image_power[i], ref_power, snr[i])
mix[..., beg:beg + dur] += coeff * img[..., :dur]
return mix
def load_audio(src_args, beg=None, end=None, sr=16000):
"""
Load audio from args.xxx
"""
if src_args:
src_path = src_args.split(",")
beg_int = [None for _ in src_path]
end_int = [None for _ in src_path]
if beg:
beg_int = [int(v) for v in beg.split(",")]
if end:
end_int = [int(v) for v in end.split(",")]
return [
read_audio(s, sr=sr, beg=b, end=e)
for s, b, e in zip(src_path, beg_int, end_int)
]
else:
return None
def run_simu(args):
def arg_float(src_args):
return [float(s) for s in src_args.split(",")] if src_args else None
src_spk = load_audio(args.src_spk, sr=args.sr)
src_rir = load_audio(args.src_rir, sr=args.sr)
if src_rir:
if len(src_rir) != len(src_spk):
raise RuntimeError(
f"Number of --src-rir={args.src_rir} do not match with " +
f"--src-spk={args.src_spk} option")
sdr = arg_float(args.src_sdr)
if len(src_spk) > 1 and not sdr:
raise RuntimeError("--src-sdr need to be assigned for " +
f"--src-spk={args.src_spk}")
if sdr:
if len(src_spk) - 1 != len(sdr):
raise RuntimeError("Number of --src-snr - 1 do not match with " +
"--src-snr option")
sdr = [0] + sdr
src_begin = arg_float(args.src_begin)
if src_begin:
src_begin = [int(v) for v in src_begin]
else:
src_begin = [0 for _ in src_spk]
# number samples of the mixture
mix_nsamps = max([b + s.size for b, s in zip(src_begin, src_spk)])
point_noise_rir = load_audio(args.point_noise_rir, sr=args.sr)
point_noise_end = [
str(int(v) + mix_nsamps) for v in args.point_noise_offset.split()
]
point_noise = load_audio(args.point_noise,
beg=args.point_noise_offset,
end=",".join(point_noise_end),
sr=args.sr)
if args.point_noise:
if point_noise_rir:
if len(point_noise) != len(point_noise_rir):
raise RuntimeError(
f"Number of --point-noise-rir={args.point_noise_rir} do not match with "
+ f"--point-noise={args.point_noise} option")
point_snr = arg_float(args.point_noise_snr)
if not point_snr:
raise RuntimeError("--point-noise-snr need to be assigned for " +
f"--point-noise={args.point_noise}")
if len(point_noise) != len(point_snr):
raise RuntimeError(
f"Number of --point-noise-snr={args.point_noise_snr} do not match with "
+ f"--point-noise={args.point_noise} option")
point_begin = arg_float(args.point_noise_begin)
if point_begin:
point_begin = [int(v) for v in point_begin]
else:
point_begin = [0 for _ in point_noise]
isotropic_noise = load_audio(args.isotropic_noise,
beg=str(args.isotropic_noise_offset),
end=str(args.isotropic_noise_offset +
mix_nsamps),
sr=args.sr)
if isotropic_noise:
isotropic_noise = isotropic_noise[0]
isotropic_snr = arg_float(args.isotropic_noise_snr)
if not isotropic_snr:
raise RuntimeError(
"--isotropic-snr need to be assigned for " +
f"--isotropic-noise={args.isotropic_noise} option")
isotropic_snr = isotropic_snr[0]
else:
isotropic_snr = None
# add speakers
spk = add_speaker(mix_nsamps,
src_spk,
src_begin,
sdr,
src_rir=src_rir,
channel=args.dump_channel,
sr=args.sr)
spk_utt = sum(spk)
mix = spk_utt.copy()
spk_power = np.mean(spk_utt[0]**2)
if point_noise:
noise = add_point_noise(mix_nsamps,
spk_power,
point_noise,
point_begin,
point_snr,
noise_rir=point_noise_rir,
channel=args.dump_channel,
repeat=args.point_noise_repeat,
sr=args.sr)
num_channels = spk_utt.shape[0]
if num_channels != noise.shape[0]:
if num_channels == 1:
noise = noise[0:1]
else:
raise RuntimeError("Channel mismatch between source speaker " +
"configuration and pointsource noise's, " +
f"{num_channels} vs {noise.shape[0]}")
mix = spk_utt + noise
else:
noise = None
ch = args.dump_channel
if isotropic_noise is not None:
N, _ = spk_utt.shape
if N == 1:
if isotropic_noise.ndim == 1:
isotropic_noise = isotropic_noise[None, ...]
else:
if ch >= 0:
isotropic_noise = isotropic_noise[ch:ch + 1]
else:
raise RuntimeError(
"Single channel mixture vs multi-channel "
"isotropic noise")
else:
if isotropic_noise.shape[0] != N:
raise RuntimeError(
"Channel number mismatch between mixture and isotropic noise, "
+ f"{N} vs {isotropic_noise.shape[0]}")
dur = min(mix_nsamps, isotropic_noise.shape[-1])
isotropic_chunk = isotropic_noise[0, :dur]
power = np.mean(isotropic_chunk**2)
coeff = coeff_snr(power, spk_power, isotropic_snr)
mix[..., :dur] += coeff * isotropic_chunk
if noise is None:
noise = coeff * isotropic_chunk
else:
noise[..., :dur] += coeff * isotropic_chunk
factor = args.norm_factor / (np.max(np.abs(mix)) + EPSILON)
mix = mix.squeeze() * factor
spk = [s[0] * factor for s in spk]
if noise is None:
return mix, spk, None
else:
return mix, spk, noise[0] * factor
def make_argparse():
parser = argparse.ArgumentParser(
description="Command to do audio data simulation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--src-spk",
type=str,
required=True,
help="Source speakers, e.g., spk1.wav,spk2.wav")
parser.add_argument("--src-rir",
type=str,
default="",
help="RIRs for each source speakers")
parser.add_argument("--src-sdr",
type=str,
default="",
help="SDR for each speakers (if needed)")
parser.add_argument("--src-begin",
type=str,
default="",
help="Begining samples on the mixture utterances")
parser.add_argument("--point-noise",
type=str,
default="",
help="Add pointsource noises")
parser.add_argument("--point-noise-rir",
type=str,
default="",
help="RIRs of the pointsource noises (if needed)")
parser.add_argument("--point-noise-snr",
type=str,
default="",
help="SNR of the pointsource noises")
parser.add_argument("--point-noise-begin",
type=str,
default="",
help="Begining samples of the "
"pointsource noises on the mixture "
"utterances (if needed)")
parser.add_argument("--point-noise-offset",
type=str,
default="",
help="Add from the offset position "
"of the pointsource noise")
parser.add_argument("--point-noise-repeat",
action=StrToBoolAction,
default=False,
help="Repeat the pointsource noise or not")
parser.add_argument("--isotropic-noise",
type=str,
default="",
help="Add isotropic noises")
parser.add_argument("--isotropic-noise-snr",
type=str,
default="",
help="SNR of the isotropic noises")
parser.add_argument("--isotropic-noise-offset",
type=int,
default=0,
help="Add noise from the offset position "
"of the isotropic noise")
parser.add_argument("--dump-channel",
type=int,
default=-1,
help="Index of the channel to dump out (-1 means all)")
parser.add_argument('--norm-factor',
type=float,
default=0.9,
help="Normalization factor of the final output")
parser.add_argument("--sr",
type=int,
default=16000,
help="Value of the sample rate")
return parser
| 36.222527 | 92 | 0.498597 |
import argparse
import numpy as np
from aps.loader.audio import read_audio, add_room_response
from aps.opts import StrToBoolAction
from aps.const import EPSILON
def coeff_snr(sig_pow, ref_pow, snr):
return (ref_pow / (sig_pow * 10**(snr / 10) + EPSILON))**0.5
def add_speaker(mix_nsamps,
src_spk,
src_begin,
sdr,
src_rir=None,
channel=-1,
sr=16000):
spk_image, spk_power = [], []
for i, spk in enumerate(src_spk):
if src_rir is None:
src = spk[None, ...] if spk.ndim == 1 else spk
spk_image.append(src)
spk_power.append(np.mean(src[0]**2))
else:
rir = src_rir[i]
if rir.ndim == 1:
rir = rir[None, ...]
if channel >= 0:
if rir.ndim == 2:
rir = rir[channel:channel + 1]
revb, p = add_room_response(spk, rir, sr=sr)
spk_image.append(revb)
spk_power.append(p)
N, _ = spk_image[0].shape
mix = [np.zeros([N, mix_nsamps], dtype=np.float32) for _ in src_spk]
ref_power = spk_power[0]
for i, image in enumerate(spk_image):
dur = image.shape[-1]
beg = src_begin[i]
coeff = 1 if i == 0 else coeff_snr(spk_power[i], ref_power, sdr[i])
mix[i][..., beg:beg + dur] += coeff * image
return mix
def add_point_noise(mix_nsamps,
ref_power,
noise,
noise_begin,
snr,
noise_rir=None,
channel=-1,
repeat=False,
sr=16000):
image = []
image_power = []
for i, noise in enumerate(noise):
beg = noise_begin[i]
if not repeat:
dur = min(noise.shape[-1], mix_nsamps - beg)
else:
dur = mix_nsamps - beg
if noise.shape[-1] < dur:
noise = np.pad(noise, (0, dur - noise.shape[-1]), mode="wrap")
if noise_rir is None:
src = noise[None, ...] if noise.ndim == 1 else noise
image.append(src)
image_power.append(np.mean(src[0, :dur]**2) if dur > 0 else 0)
else:
rir = noise_rir[i]
if rir.ndim == 1:
rir = rir[None, ...]
if channel >= 0:
if rir.ndim == 2:
rir = rir[channel:channel + 1]
revb, revb_power = add_room_response(noise[:dur], rir, sr=sr)
image.append(revb)
image_power.append(revb_power)
N, _ = image[0].shape
mix = np.zeros([N, mix_nsamps], dtype=np.float32)
for i, img in enumerate(image):
beg = noise_begin[i]
coeff = coeff_snr(image_power[i], ref_power, snr[i])
mix[..., beg:beg + dur] += coeff * img[..., :dur]
return mix
def load_audio(src_args, beg=None, end=None, sr=16000):
if src_args:
src_path = src_args.split(",")
beg_int = [None for _ in src_path]
end_int = [None for _ in src_path]
if beg:
beg_int = [int(v) for v in beg.split(",")]
if end:
end_int = [int(v) for v in end.split(",")]
return [
read_audio(s, sr=sr, beg=b, end=e)
for s, b, e in zip(src_path, beg_int, end_int)
]
else:
return None
def run_simu(args):
def arg_float(src_args):
return [float(s) for s in src_args.split(",")] if src_args else None
src_spk = load_audio(args.src_spk, sr=args.sr)
src_rir = load_audio(args.src_rir, sr=args.sr)
if src_rir:
if len(src_rir) != len(src_spk):
raise RuntimeError(
f"Number of --src-rir={args.src_rir} do not match with " +
f"--src-spk={args.src_spk} option")
sdr = arg_float(args.src_sdr)
if len(src_spk) > 1 and not sdr:
raise RuntimeError("--src-sdr need to be assigned for " +
f"--src-spk={args.src_spk}")
if sdr:
if len(src_spk) - 1 != len(sdr):
raise RuntimeError("Number of --src-snr - 1 do not match with " +
"--src-snr option")
sdr = [0] + sdr
src_begin = arg_float(args.src_begin)
if src_begin:
src_begin = [int(v) for v in src_begin]
else:
src_begin = [0 for _ in src_spk]
mix_nsamps = max([b + s.size for b, s in zip(src_begin, src_spk)])
point_noise_rir = load_audio(args.point_noise_rir, sr=args.sr)
point_noise_end = [
str(int(v) + mix_nsamps) for v in args.point_noise_offset.split()
]
point_noise = load_audio(args.point_noise,
beg=args.point_noise_offset,
end=",".join(point_noise_end),
sr=args.sr)
if args.point_noise:
if point_noise_rir:
if len(point_noise) != len(point_noise_rir):
raise RuntimeError(
f"Number of --point-noise-rir={args.point_noise_rir} do not match with "
+ f"--point-noise={args.point_noise} option")
point_snr = arg_float(args.point_noise_snr)
if not point_snr:
raise RuntimeError("--point-noise-snr need to be assigned for " +
f"--point-noise={args.point_noise}")
if len(point_noise) != len(point_snr):
raise RuntimeError(
f"Number of --point-noise-snr={args.point_noise_snr} do not match with "
+ f"--point-noise={args.point_noise} option")
point_begin = arg_float(args.point_noise_begin)
if point_begin:
point_begin = [int(v) for v in point_begin]
else:
point_begin = [0 for _ in point_noise]
isotropic_noise = load_audio(args.isotropic_noise,
beg=str(args.isotropic_noise_offset),
end=str(args.isotropic_noise_offset +
mix_nsamps),
sr=args.sr)
if isotropic_noise:
isotropic_noise = isotropic_noise[0]
isotropic_snr = arg_float(args.isotropic_noise_snr)
if not isotropic_snr:
raise RuntimeError(
"--isotropic-snr need to be assigned for " +
f"--isotropic-noise={args.isotropic_noise} option")
isotropic_snr = isotropic_snr[0]
else:
isotropic_snr = None
spk = add_speaker(mix_nsamps,
src_spk,
src_begin,
sdr,
src_rir=src_rir,
channel=args.dump_channel,
sr=args.sr)
spk_utt = sum(spk)
mix = spk_utt.copy()
spk_power = np.mean(spk_utt[0]**2)
if point_noise:
noise = add_point_noise(mix_nsamps,
spk_power,
point_noise,
point_begin,
point_snr,
noise_rir=point_noise_rir,
channel=args.dump_channel,
repeat=args.point_noise_repeat,
sr=args.sr)
num_channels = spk_utt.shape[0]
if num_channels != noise.shape[0]:
if num_channels == 1:
noise = noise[0:1]
else:
raise RuntimeError("Channel mismatch between source speaker " +
"configuration and pointsource noise's, " +
f"{num_channels} vs {noise.shape[0]}")
mix = spk_utt + noise
else:
noise = None
ch = args.dump_channel
if isotropic_noise is not None:
N, _ = spk_utt.shape
if N == 1:
if isotropic_noise.ndim == 1:
isotropic_noise = isotropic_noise[None, ...]
else:
if ch >= 0:
isotropic_noise = isotropic_noise[ch:ch + 1]
else:
raise RuntimeError(
"Single channel mixture vs multi-channel "
"isotropic noise")
else:
if isotropic_noise.shape[0] != N:
raise RuntimeError(
"Channel number mismatch between mixture and isotropic noise, "
+ f"{N} vs {isotropic_noise.shape[0]}")
dur = min(mix_nsamps, isotropic_noise.shape[-1])
isotropic_chunk = isotropic_noise[0, :dur]
power = np.mean(isotropic_chunk**2)
coeff = coeff_snr(power, spk_power, isotropic_snr)
mix[..., :dur] += coeff * isotropic_chunk
if noise is None:
noise = coeff * isotropic_chunk
else:
noise[..., :dur] += coeff * isotropic_chunk
factor = args.norm_factor / (np.max(np.abs(mix)) + EPSILON)
mix = mix.squeeze() * factor
spk = [s[0] * factor for s in spk]
if noise is None:
return mix, spk, None
else:
return mix, spk, noise[0] * factor
def make_argparse():
parser = argparse.ArgumentParser(
description="Command to do audio data simulation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--src-spk",
type=str,
required=True,
help="Source speakers, e.g., spk1.wav,spk2.wav")
parser.add_argument("--src-rir",
type=str,
default="",
help="RIRs for each source speakers")
parser.add_argument("--src-sdr",
type=str,
default="",
help="SDR for each speakers (if needed)")
parser.add_argument("--src-begin",
type=str,
default="",
help="Begining samples on the mixture utterances")
parser.add_argument("--point-noise",
type=str,
default="",
help="Add pointsource noises")
parser.add_argument("--point-noise-rir",
type=str,
default="",
help="RIRs of the pointsource noises (if needed)")
parser.add_argument("--point-noise-snr",
type=str,
default="",
help="SNR of the pointsource noises")
parser.add_argument("--point-noise-begin",
type=str,
default="",
help="Begining samples of the "
"pointsource noises on the mixture "
"utterances (if needed)")
parser.add_argument("--point-noise-offset",
type=str,
default="",
help="Add from the offset position "
"of the pointsource noise")
parser.add_argument("--point-noise-repeat",
action=StrToBoolAction,
default=False,
help="Repeat the pointsource noise or not")
parser.add_argument("--isotropic-noise",
type=str,
default="",
help="Add isotropic noises")
parser.add_argument("--isotropic-noise-snr",
type=str,
default="",
help="SNR of the isotropic noises")
parser.add_argument("--isotropic-noise-offset",
type=int,
default=0,
help="Add noise from the offset position "
"of the isotropic noise")
parser.add_argument("--dump-channel",
type=int,
default=-1,
help="Index of the channel to dump out (-1 means all)")
parser.add_argument('--norm-factor',
type=float,
default=0.9,
help="Normalization factor of the final output")
parser.add_argument("--sr",
type=int,
default=16000,
help="Value of the sample rate")
return parser
| true | true |
f7257753ce0834da08cef9848377fb031be44fc6 | 2,553 | py | Python | tests/test_parametric_printer_coverage.py | ka3bhy/wexpect | 14a4279579a740ce15743db44228f3b0cf4ee8f4 | [
"MIT"
] | 52 | 2019-04-24T14:38:43.000Z | 2022-03-08T22:03:11.000Z | tests/test_parametric_printer_coverage.py | ka3bhy/wexpect | 14a4279579a740ce15743db44228f3b0cf4ee8f4 | [
"MIT"
] | 51 | 2019-05-13T12:15:09.000Z | 2021-12-15T14:00:15.000Z | tests/test_parametric_printer_coverage.py | ka3bhy/wexpect | 14a4279579a740ce15743db44228f3b0cf4ee8f4 | [
"MIT"
] | 20 | 2019-07-15T15:48:31.000Z | 2022-03-27T08:55:17.000Z | import wexpect
import unittest
import sys
import os
import time
from tests import PexpectTestCase
@unittest.skipIf(wexpect.spawn_class_name == 'legacy_wexpect', "legacy unsupported")
class TestCaseParametricPrinter(PexpectTestCase.PexpectTestCase):
def test_all_line_length (self):
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
# With quotes (C:\Program Files\Python37\python.exe needs quotes)
python_executable = '"' + sys.executable + '" '
child_script = here + '\\parametric_printer.py'
self.prompt = '> '
# Start the child process
self.p = wexpect.spawn(python_executable + ' ' + child_script, coverage_console_reader=True)
# Wait for prompt
self.p.expect(self.prompt)
self._test(['a'], range(1,200), [1], [0])
self.p.terminate()
def test_long_console(self):
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
# With quotes (C:\Program Files\Python37\python.exe needs quotes)
python_executable = '"' + sys.executable + '" '
child_script = here + '\\parametric_printer.py'
self.prompt = '> '
# Start the child process
self.p = wexpect.spawn(python_executable + ' ' + child_script, coverage_console_reader=True)
# Wait for prompt
self.p.expect(self.prompt)
self._test(['a', 'b', 'c', 'd', 'e', 'f'], [8, 16, 32, 64], [64, 128, 256], [-1, 0])
self.p.terminate()
def _test(self, character_list, character_count_list, line_count_list, speed_ms_list):
# print(f'character_list: {character_list} character_count_list: {character_count_list} line_count_list: {line_count_list} speed_ms_list: {speed_ms_list}')
for character in character_list:
for character_count in character_count_list:
for line_count in line_count_list:
for speed_ms in speed_ms_list:
command = f'{character},{character_count},{line_count},{speed_ms}'
self.p.sendline(command)
self.p.expect(self.prompt)
expected = [character*character_count] * line_count
try:
self.assertEqual(self.p.before.splitlines()[1:-1], expected)
except:
raise
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCaseParametricPrinter,'test')
| 36.471429 | 166 | 0.613396 | import wexpect
import unittest
import sys
import os
import time
from tests import PexpectTestCase
@unittest.skipIf(wexpect.spawn_class_name == 'legacy_wexpect', "legacy unsupported")
class TestCaseParametricPrinter(PexpectTestCase.PexpectTestCase):
def test_all_line_length (self):
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
python_executable = '"' + sys.executable + '" '
child_script = here + '\\parametric_printer.py'
self.prompt = '> '
self.p = wexpect.spawn(python_executable + ' ' + child_script, coverage_console_reader=True)
self.p.expect(self.prompt)
self._test(['a'], range(1,200), [1], [0])
self.p.terminate()
def test_long_console(self):
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
python_executable = '"' + sys.executable + '" '
child_script = here + '\\parametric_printer.py'
self.prompt = '> '
self.p = wexpect.spawn(python_executable + ' ' + child_script, coverage_console_reader=True)
self.p.expect(self.prompt)
self._test(['a', 'b', 'c', 'd', 'e', 'f'], [8, 16, 32, 64], [64, 128, 256], [-1, 0])
self.p.terminate()
def _test(self, character_list, character_count_list, line_count_list, speed_ms_list):
for character in character_list:
for character_count in character_count_list:
for line_count in line_count_list:
for speed_ms in speed_ms_list:
command = f'{character},{character_count},{line_count},{speed_ms}'
self.p.sendline(command)
self.p.expect(self.prompt)
expected = [character*character_count] * line_count
try:
self.assertEqual(self.p.before.splitlines()[1:-1], expected)
except:
raise
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCaseParametricPrinter,'test')
| true | true |
f725787c40eac809defe6a07fbbbbed9170067b9 | 3,731 | py | Python | contrib/macdeploy/custom_dsstore.py | BufferUnderwhelm/asspennies | 919be76d6d4be42fea02af1194df2875b91c85dc | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | BufferUnderwhelm/asspennies | 919be76d6d4be42fea02af1194df2875b91c85dc | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | BufferUnderwhelm/asspennies | 919be76d6d4be42fea02af1194df2875b91c85dc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00asspenniesuser:\x00Documents:\x00asspennies:\x00asspennies:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/asspenniesuser/Documents/asspennies/asspennies/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['AssPennies-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62.183333 | 1,817 | 0.725811 |
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00asspenniesuser:\x00Documents:\x00asspennies:\x00asspennies:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/asspenniesuser/Documents/asspennies/asspennies/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['AssPennies-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f72579efb9020b0aeda91f6744f1f71d26ad9971 | 380 | py | Python | yandex_checkout/domain/models/payment_data/payment_data_factory.py | pavel52rus/yandex-checkout-sdk-python | 10c8b0ce12712bca675254f2a230f9fc0e1cb9b4 | [
"MIT"
] | null | null | null | yandex_checkout/domain/models/payment_data/payment_data_factory.py | pavel52rus/yandex-checkout-sdk-python | 10c8b0ce12712bca675254f2a230f9fc0e1cb9b4 | [
"MIT"
] | null | null | null | yandex_checkout/domain/models/payment_data/payment_data_factory.py | pavel52rus/yandex-checkout-sdk-python | 10c8b0ce12712bca675254f2a230f9fc0e1cb9b4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from yandex_checkout.domain.common.type_factory import TypeFactory
from yandex_checkout.domain.models.payment_data.payment_data_class_map import PaymentDataClassMap
class PaymentDataFactory(TypeFactory):
"""
Factory for payment data objects
"""
def __init__(self):
super(PaymentDataFactory, self).__init__(PaymentDataClassMap())
| 29.230769 | 97 | 0.765789 |
from yandex_checkout.domain.common.type_factory import TypeFactory
from yandex_checkout.domain.models.payment_data.payment_data_class_map import PaymentDataClassMap
class PaymentDataFactory(TypeFactory):
def __init__(self):
super(PaymentDataFactory, self).__init__(PaymentDataClassMap())
| true | true |
f7257ab79e200ce2c0c75e0ae6d7b38cf586e521 | 4,649 | py | Python | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | from typing import Any, Dict, Tuple
from ee.clickhouse.models.property import get_property_string_expr
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from posthog.constants import AUTOCAPTURE_EVENT, PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
class PathEventQuery(ClickhouseEventQuery):
FUNNEL_PERSONS_ALIAS = "funnel_persons"
_filter: PathFilter
def __init__(
self,
filter: PathFilter,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
**kwargs,
) -> None:
super().__init__(filter, team_id, round_interval, should_join_distinct_ids, should_join_persons, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
# TODO: ColumnOptimizer with options like self._filter.include_pageviews, self._filter.include_screenviews,
funnel_paths_timestamp = ""
funnel_paths_join = ""
funnel_paths_filter = ""
if self._filter.funnel_paths:
funnel_paths_timestamp = f"{self.FUNNEL_PERSONS_ALIAS}.timestamp as min_timestamp"
funnel_paths_join = f"JOIN {self.FUNNEL_PERSONS_ALIAS} ON {self.FUNNEL_PERSONS_ALIAS}.person_id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id"
funnel_paths_filter = f"AND {self.EVENT_TABLE_ALIAS}.timestamp >= min_timestamp"
_fields = [
f"{self.EVENT_TABLE_ALIAS}.timestamp AS timestamp",
(
f"if(event = '{SCREEN_EVENT}', {self._get_screen_name_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{PAGEVIEW_EVENT}', {self._get_current_url_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{AUTOCAPTURE_EVENT}', concat('autocapture:', {self.EVENT_TABLE_ALIAS}.elements_chain), "
f"{self.EVENT_TABLE_ALIAS}.event))) AS path_item"
),
f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "",
funnel_paths_timestamp,
]
_fields = list(filter(None, _fields))
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = self._filter.properties
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
event_query, event_params = self._get_event_query()
self.params.update(event_params)
query = f"""
SELECT {','.join(_fields)} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
{funnel_paths_join}
WHERE team_id = %(team_id)s
{event_query}
{date_query}
{prop_query}
{funnel_paths_filter}
ORDER BY {self.DISTINCT_ID_TABLE_ALIAS}.person_id, {self.EVENT_TABLE_ALIAS}.timestamp
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
self._should_join_distinct_ids = True
def _get_current_url_parsing(self):
path_type, _ = get_property_string_expr("events", "$current_url", "'$current_url'", "properties")
return f"if(length({path_type}) > 1, trim( TRAILING '/' FROM {path_type}), {path_type})"
def _get_screen_name_parsing(self):
path_type, _ = get_property_string_expr("events", "$screen_name", "'$screen_name'", "properties")
return path_type
def _get_event_query(self) -> Tuple[str, Dict[str, Any]]:
params: Dict[str, Any] = {}
conditions = []
or_conditions = []
if self._filter.include_pageviews:
or_conditions.append(f"event = '{PAGEVIEW_EVENT}'")
if self._filter.include_screenviews:
or_conditions.append(f"event = '{SCREEN_EVENT}'")
if self._filter.include_autocaptures:
or_conditions.append(f"event = '{AUTOCAPTURE_EVENT}'")
if self._filter.include_all_custom_events:
or_conditions.append(f"NOT event LIKE '$%%'")
if self._filter.custom_events:
or_conditions.append(f"event IN %(custom_events)s")
params["custom_events"] = self._filter.custom_events
if or_conditions:
conditions.append(f"({' OR '.join(or_conditions)})")
if self._filter.exclude_events:
conditions.append(f"NOT event IN %(exclude_events)s")
params["exclude_events"] = self._filter.exclude_events
if conditions:
return f" AND {' AND '.join(conditions)}", params
return "", {}
| 39.735043 | 151 | 0.655195 | from typing import Any, Dict, Tuple
from ee.clickhouse.models.property import get_property_string_expr
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from posthog.constants import AUTOCAPTURE_EVENT, PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
class PathEventQuery(ClickhouseEventQuery):
FUNNEL_PERSONS_ALIAS = "funnel_persons"
_filter: PathFilter
def __init__(
self,
filter: PathFilter,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
**kwargs,
) -> None:
super().__init__(filter, team_id, round_interval, should_join_distinct_ids, should_join_persons, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
funnel_paths_timestamp = ""
funnel_paths_join = ""
funnel_paths_filter = ""
if self._filter.funnel_paths:
funnel_paths_timestamp = f"{self.FUNNEL_PERSONS_ALIAS}.timestamp as min_timestamp"
funnel_paths_join = f"JOIN {self.FUNNEL_PERSONS_ALIAS} ON {self.FUNNEL_PERSONS_ALIAS}.person_id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id"
funnel_paths_filter = f"AND {self.EVENT_TABLE_ALIAS}.timestamp >= min_timestamp"
_fields = [
f"{self.EVENT_TABLE_ALIAS}.timestamp AS timestamp",
(
f"if(event = '{SCREEN_EVENT}', {self._get_screen_name_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{PAGEVIEW_EVENT}', {self._get_current_url_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{AUTOCAPTURE_EVENT}', concat('autocapture:', {self.EVENT_TABLE_ALIAS}.elements_chain), "
f"{self.EVENT_TABLE_ALIAS}.event))) AS path_item"
),
f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "",
funnel_paths_timestamp,
]
_fields = list(filter(None, _fields))
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = self._filter.properties
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
event_query, event_params = self._get_event_query()
self.params.update(event_params)
query = f"""
SELECT {','.join(_fields)} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
{funnel_paths_join}
WHERE team_id = %(team_id)s
{event_query}
{date_query}
{prop_query}
{funnel_paths_filter}
ORDER BY {self.DISTINCT_ID_TABLE_ALIAS}.person_id, {self.EVENT_TABLE_ALIAS}.timestamp
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
self._should_join_distinct_ids = True
def _get_current_url_parsing(self):
path_type, _ = get_property_string_expr("events", "$current_url", "'$current_url'", "properties")
return f"if(length({path_type}) > 1, trim( TRAILING '/' FROM {path_type}), {path_type})"
def _get_screen_name_parsing(self):
path_type, _ = get_property_string_expr("events", "$screen_name", "'$screen_name'", "properties")
return path_type
def _get_event_query(self) -> Tuple[str, Dict[str, Any]]:
params: Dict[str, Any] = {}
conditions = []
or_conditions = []
if self._filter.include_pageviews:
or_conditions.append(f"event = '{PAGEVIEW_EVENT}'")
if self._filter.include_screenviews:
or_conditions.append(f"event = '{SCREEN_EVENT}'")
if self._filter.include_autocaptures:
or_conditions.append(f"event = '{AUTOCAPTURE_EVENT}'")
if self._filter.include_all_custom_events:
or_conditions.append(f"NOT event LIKE '$%%'")
if self._filter.custom_events:
or_conditions.append(f"event IN %(custom_events)s")
params["custom_events"] = self._filter.custom_events
if or_conditions:
conditions.append(f"({' OR '.join(or_conditions)})")
if self._filter.exclude_events:
conditions.append(f"NOT event IN %(exclude_events)s")
params["exclude_events"] = self._filter.exclude_events
if conditions:
return f" AND {' AND '.join(conditions)}", params
return "", {}
| true | true |
f7257ab8f76526d2fc5780943a2451a7b5e04d54 | 4,946 | py | Python | example_scenes/basic.py | Pow3r5/manim | 2972a64342aa5ae72977b444f653b05250ab1f8f | [
"MIT"
] | 2 | 2022-03-31T08:31:00.000Z | 2022-03-31T08:31:43.000Z | example_scenes/basic.py | Pow3r5/manim | 2972a64342aa5ae72977b444f653b05250ab1f8f | [
"MIT"
] | null | null | null | example_scenes/basic.py | Pow3r5/manim | 2972a64342aa5ae72977b444f653b05250ab1f8f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from manim import *
# To watch one of these scenes, run the following:
# python --quality m manim -p example_scenes.py SquareToCircle
#
# Use the flag --quality l for a faster rendering at a lower quality.
# Use -s to skip to the end and just save the final frame
# Use the -p to have preview of the animation (or image, if -s was
# used) pop up once done.
# Use -n <number> to skip ahead to the nth animation of a scene.
# Use -r <number> to specify a resolution (for example, -r 1920,1080
# for a 1920x1080 video)
class OpeningManim(Scene):
def construct(self):
title = Tex(r"This is some \LaTeX")
basel = MathTex(r"\sum_{n=1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}")
VGroup(title, basel).arrange(DOWN)
self.play(
Write(title),
FadeIn(basel, shift=DOWN),
)
self.wait()
transform_title = Tex("That was a transform")
transform_title.to_corner(UP + LEFT)
self.play(
Transform(title, transform_title),
LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),
)
self.wait()
grid = NumberPlane()
grid_title = Tex("This is a grid", font_size=72)
grid_title.move_to(transform_title)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeOut(title),
FadeIn(grid_title, shift=UP),
Create(grid, run_time=3, lag_ratio=0.1),
)
self.wait()
grid_transform_title = Tex(
r"That was a non-linear function \\ applied to the grid",
)
grid_transform_title.move_to(grid_title, UL)
grid.prepare_for_nonlinear_transform()
self.play(
grid.animate.apply_function(
lambda p: p
+ np.array(
[
np.sin(p[1]),
np.sin(p[0]),
0,
],
),
),
run_time=3,
)
self.wait()
self.play(Transform(grid_title, grid_transform_title))
self.wait()
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
square = Square()
square.flip(RIGHT)
square.rotate(-3 * TAU / 8)
circle.set_fill(PINK, opacity=0.5)
self.play(Create(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
class WarpSquare(Scene):
def construct(self):
square = Square()
self.play(
ApplyPointwiseFunction(
lambda point: complex_to_R3(np.exp(R3_to_complex(point))),
square,
),
)
self.wait()
class WriteStuff(Scene):
def construct(self):
example_text = Tex("This is a some text", tex_to_color_map={"text": YELLOW})
example_tex = MathTex(
"\\sum_{k=1}^\\infty {1 \\over k^2} = {\\pi^2 \\over 6}",
)
group = VGroup(example_text, example_tex)
group.arrange(DOWN)
group.width = config["frame_width"] - 2 * LARGE_BUFF
self.play(Write(example_text))
self.play(Write(example_tex))
self.wait()
class UpdatersExample(Scene):
def construct(self):
decimal = DecimalNumber(
0,
show_ellipsis=True,
num_decimal_places=3,
include_sign=True,
)
square = Square().to_edge(UP)
decimal.add_updater(lambda d: d.next_to(square, RIGHT))
decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))
self.add(square, decimal)
self.play(
square.animate.to_edge(DOWN),
rate_func=there_and_back,
run_time=5,
)
self.wait()
class SpiralInExample(Scene):
def construct(self):
logo_green = "#81b29a"
logo_blue = "#454866"
logo_red = "#e07a5f"
font_color = "#ece6e2"
pi = MathTex(r"\pi").scale(7).set_color(font_color)
pi.shift(2.25 * LEFT + 1.5 * UP)
circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)
square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)
triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(
RIGHT
)
pentagon = Polygon(
*[
[np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]
for i in range(5)
],
color=PURPLE_B,
fill_opacity=1,
stroke_width=0
).shift(UP + 2 * RIGHT)
shapes = VGroup(triangle, square, circle, pentagon, pi)
self.play(SpiralIn(shapes, fade_in_fraction=0.9))
self.wait()
self.play(FadeOut(shapes))
# See many more examples at https://docs.manim.community/en/stable/examples.html
| 29.975758 | 87 | 0.558229 |
from manim import *
class OpeningManim(Scene):
def construct(self):
title = Tex(r"This is some \LaTeX")
basel = MathTex(r"\sum_{n=1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}")
VGroup(title, basel).arrange(DOWN)
self.play(
Write(title),
FadeIn(basel, shift=DOWN),
)
self.wait()
transform_title = Tex("That was a transform")
transform_title.to_corner(UP + LEFT)
self.play(
Transform(title, transform_title),
LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),
)
self.wait()
grid = NumberPlane()
grid_title = Tex("This is a grid", font_size=72)
grid_title.move_to(transform_title)
self.add(grid, grid_title)
self.play(
FadeOut(title),
FadeIn(grid_title, shift=UP),
Create(grid, run_time=3, lag_ratio=0.1),
)
self.wait()
grid_transform_title = Tex(
r"That was a non-linear function \\ applied to the grid",
)
grid_transform_title.move_to(grid_title, UL)
grid.prepare_for_nonlinear_transform()
self.play(
grid.animate.apply_function(
lambda p: p
+ np.array(
[
np.sin(p[1]),
np.sin(p[0]),
0,
],
),
),
run_time=3,
)
self.wait()
self.play(Transform(grid_title, grid_transform_title))
self.wait()
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
square = Square()
square.flip(RIGHT)
square.rotate(-3 * TAU / 8)
circle.set_fill(PINK, opacity=0.5)
self.play(Create(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
class WarpSquare(Scene):
def construct(self):
square = Square()
self.play(
ApplyPointwiseFunction(
lambda point: complex_to_R3(np.exp(R3_to_complex(point))),
square,
),
)
self.wait()
class WriteStuff(Scene):
def construct(self):
example_text = Tex("This is a some text", tex_to_color_map={"text": YELLOW})
example_tex = MathTex(
"\\sum_{k=1}^\\infty {1 \\over k^2} = {\\pi^2 \\over 6}",
)
group = VGroup(example_text, example_tex)
group.arrange(DOWN)
group.width = config["frame_width"] - 2 * LARGE_BUFF
self.play(Write(example_text))
self.play(Write(example_tex))
self.wait()
class UpdatersExample(Scene):
def construct(self):
decimal = DecimalNumber(
0,
show_ellipsis=True,
num_decimal_places=3,
include_sign=True,
)
square = Square().to_edge(UP)
decimal.add_updater(lambda d: d.next_to(square, RIGHT))
decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))
self.add(square, decimal)
self.play(
square.animate.to_edge(DOWN),
rate_func=there_and_back,
run_time=5,
)
self.wait()
class SpiralInExample(Scene):
def construct(self):
logo_green = "#81b29a"
logo_blue = "#454866"
logo_red = "#e07a5f"
font_color = "#ece6e2"
pi = MathTex(r"\pi").scale(7).set_color(font_color)
pi.shift(2.25 * LEFT + 1.5 * UP)
circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)
square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)
triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(
RIGHT
)
pentagon = Polygon(
*[
[np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]
for i in range(5)
],
color=PURPLE_B,
fill_opacity=1,
stroke_width=0
).shift(UP + 2 * RIGHT)
shapes = VGroup(triangle, square, circle, pentagon, pi)
self.play(SpiralIn(shapes, fade_in_fraction=0.9))
self.wait()
self.play(FadeOut(shapes))
| true | true |
f7257ab9bbdab0ca1a82849244a703ef451ec023 | 87,342 | py | Python | venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/work_item_tracking_client.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/work_item_tracking_client.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/work_item_tracking_client.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class WorkItemTrackingClient(VssClient):
"""WorkItemTracking
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WorkItemTrackingClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '5264459e-e5e0-4bd8-b118-0985e68a4ec5'
def get_work_artifact_link_types(self):
"""GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
"""
response = self._send(http_method='GET',
location_id='1a31de40-e318-41cd-a6c6-881077df52e3',
version='4.1-preview.1')
return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response))
def query_work_items_for_artifact_uris(self, artifact_uri_query, project=None):
"""QueryWorkItemsForArtifactUris.
[Preview API] Queries work items linked to a given list of artifact URI.
:param :class:`<ArtifactUriQuery> <work-item-tracking.v4_1.models.ArtifactUriQuery>` artifact_uri_query: Defines a list of artifact URI for querying work items.
:param str project: Project ID or project name
:rtype: :class:`<ArtifactUriQueryResult> <work-item-tracking.v4_1.models.ArtifactUriQueryResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(artifact_uri_query, 'ArtifactUriQuery')
response = self._send(http_method='POST',
location_id='a9a9aa7a-8c09-44d3-ad1b-46e855c1e3d3',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ArtifactUriQueryResult', response)
def create_attachment(self, upload_stream, project=None, file_name=None, upload_type=None, area_path=None, **kwargs):
"""CreateAttachment.
Uploads an attachment.
:param object upload_stream: Stream to upload
:param str project: Project ID or project name
:param str file_name: The name of the file
:param str upload_type: Attachment upload type: Simple or Chunked
:param str area_path: Target project Area Path
:rtype: :class:`<AttachmentReference> <work-item-tracking.v4_1.models.AttachmentReference>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if upload_type is not None:
query_parameters['uploadType'] = self._serialize.query('upload_type', upload_type, 'str')
if area_path is not None:
query_parameters['areaPath'] = self._serialize.query('area_path', area_path, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('AttachmentReference', response)
def get_attachment_content(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentContent.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_attachment_zip(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentZip.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_classification_nodes(self, project, ids, depth=None, error_policy=None):
"""GetClassificationNodes.
Gets root classification nodes or list of classification nodes for a given list of nodes ids, for a given project. In case ids parameter is supplied you will get list of classification nodes for those ids. Otherwise you will get root classification nodes for this project.
:param str project: Project ID or project name
:param [int] ids: Comma seperated integer classification nodes ids. It's not required, if you want root nodes.
:param int depth: Depth of children to fetch.
:param str error_policy: Flag to handle errors in getting some nodes. Possible options are Fail and Omit.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def get_root_nodes(self, project, depth=None):
"""GetRootNodes.
Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def create_or_update_classification_node(self, posted_node, project, structure_group, path=None):
"""CreateOrUpdateClassificationNode.
Create new or update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='POST',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response)
def update_classification_node(self, posted_node, project, structure_group, path=None):
"""UpdateClassificationNode.
Update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='PATCH',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def get_comment(self, id, revision, project=None):
"""GetComment.
[Preview API] Gets a comment for a work item at the specified revision.
:param int id: Work item id
:param int revision: Revision for which the comment need to be fetched
:param str project: Project ID or project name
:rtype: :class:`<WorkItemComment> <work-item-tracking.v4_1.models.WorkItemComment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision is not None:
route_values['revision'] = self._serialize.url('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values)
return self._deserialize('WorkItemComment', response)
def get_comments(self, id, project=None, from_revision=None, top=None, order=None):
"""GetComments.
[Preview API] Gets the specified number of comments for a work item from the specified revision.
:param int id: Work item id
:param str project: Project ID or project name
:param int from_revision: Revision from which comments are to be fetched (default is 1)
:param int top: The number of comments to return (default is 200)
:param str order: Ascending or descending by revision id (default is ascending)
:rtype: :class:`<WorkItemComments> <work-item-tracking.v4_1.models.WorkItemComments>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if from_revision is not None:
query_parameters['fromRevision'] = self._serialize.query('from_revision', from_revision, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if order is not None:
query_parameters['order'] = self._serialize.query('order', order, 'str')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemComments', response)
def delete_field(self, field_name_or_ref_name, project=None):
"""DeleteField.
Deletes the field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
self._send(http_method='DELETE',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
def get_field(self, field_name_or_ref_name, project=None):
"""GetField.
Gets information on a specific field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
:rtype: :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemField', response)
def get_fields(self, project=None, expand=None):
"""GetFields.
Returns information for all fields.
:param str project: Project ID or project name
:param str expand: Use ExtensionFields to include extension fields, otherwise exclude them. Unless the feature flag for this parameter is enabled, extension fields are always included.
:rtype: [WorkItemField]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemField]', self._unwrap_collection(response))
def update_field(self, work_item_field, field_name_or_ref_name, project=None):
"""UpdateField.
Updates the field.
:param :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>` work_item_field: New field definition
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
content = self._serialize.body(work_item_field, 'WorkItemField')
self._send(http_method='PATCH',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
content=content)
def create_query(self, posted_query, project, query):
"""CreateQuery.
Creates a query, or moves a query.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` posted_query: The query to create.
:param str project: Project ID or project name
:param str query: The parent path for the query to create.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
content = self._serialize.body(posted_query, 'QueryHierarchyItem')
response = self._send(http_method='POST',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def delete_query(self, project, query):
"""DeleteQuery.
Delete a query or a folder. This deletes any permission change on the deleted query or folder and any of its descendants if it is a folder. It is important to note that the deleted permission changes cannot be recovered upon undeleting the query or folder.
:param str project: Project ID or project name
:param str query: ID or path of the query or folder to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
self._send(http_method='DELETE',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values)
def get_queries(self, project, expand=None, depth=None, include_deleted=None):
"""GetQueries.
Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response))
def get_query(self, project, query, expand=None, depth=None, include_deleted=None):
"""GetQuery.
Retrieves an individual query and its children
:param str project: Project ID or project name
:param str query:
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItem', response)
def search_queries(self, project, filter, top=None, expand=None, include_deleted=None):
"""SearchQueries.
Searches all queries the user has access to in the current project
:param str project: Project ID or project name
:param str filter: The text to filter the queries with.
:param int top: The number of queries to return (Default is 50 and maximum is 200).
:param str expand:
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItemsResult> <work-item-tracking.v4_1.models.QueryHierarchyItemsResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query('filter', filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItemsResult', response)
def update_query(self, query_update, project, query, undelete_descendants=None):
"""UpdateQuery.
Update a query or a folder. This allows you to update, rename and move queries and folders.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` query_update: The query to update.
:param str project: Project ID or project name
:param str query: The path for the query to update.
:param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if undelete_descendants is not None:
query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool')
content = self._serialize.body(query_update, 'QueryHierarchyItem')
response = self._send(http_method='PATCH',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def destroy_work_item(self, id, project=None):
"""DestroyWorkItem.
Destroys the specified work item permanently from the Recycle Bin. This action can not be undone.
:param int id: ID of the work item to be destroyed permanently
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
self._send(http_method='DELETE',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
def get_deleted_work_item(self, id, project=None):
"""GetDeletedWorkItem.
Gets a deleted work item from Recycle Bin.
:param int id: ID of the work item to be returned
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemDelete', response)
def get_deleted_work_items(self, ids, project=None):
"""GetDeletedWorkItems.
Gets the work items from the recycle bin, whose IDs have been specified in the parameters
:param [int] ids: Comma separated list of IDs of the deleted work items to be returned
:param str project: Project ID or project name
:rtype: [WorkItemDeleteReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemDeleteReference]', self._unwrap_collection(response))
def get_deleted_work_item_shallow_references(self, project=None):
"""GetDeletedWorkItemShallowReferences.
Gets a list of the IDs and the URLs of the deleted the work items in the Recycle Bin.
:param str project: Project ID or project name
:rtype: [WorkItemDeleteShallowReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemDeleteShallowReference]', self._unwrap_collection(response))
def restore_work_item(self, payload, id, project=None):
"""RestoreWorkItem.
Restores the deleted work item from Recycle Bin.
:param :class:`<WorkItemDeleteUpdate> <work-item-tracking.v4_1.models.WorkItemDeleteUpdate>` payload: Paylod with instructions to update the IsDeleted flag to false
:param int id: ID of the work item to be restored
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
content = self._serialize.body(payload, 'WorkItemDeleteUpdate')
response = self._send(http_method='PATCH',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemDelete', response)
def get_revision(self, id, revision_number, expand=None):
"""GetRevision.
Returns a fully hydrated work item for the requested revision
:param int id:
:param int revision_number:
:param str expand:
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision_number is not None:
route_values['revisionNumber'] = self._serialize.url('revision_number', revision_number, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_revisions(self, id, top=None, skip=None, expand=None):
"""GetRevisions.
Returns the list of fully hydrated work item revisions, paged.
:param int id:
:param int top:
:param int skip:
:param str expand:
:rtype: [WorkItem]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def create_template(self, template, team_context):
"""CreateTemplate.
[Preview API] Creates a template
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template: Template contents
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(template, 'WorkItemTemplate')
response = self._send(http_method='POST',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_templates(self, team_context, workitemtypename=None):
"""GetTemplates.
[Preview API] Gets template
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str workitemtypename: Optional, When specified returns templates for given Work item type.
:rtype: [WorkItemTemplateReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if workitemtypename is not None:
query_parameters['workitemtypename'] = self._serialize.query('workitemtypename', workitemtypename, 'str')
response = self._send(http_method='GET',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTemplateReference]', self._unwrap_collection(response))
def delete_template(self, team_context, template_id):
"""DeleteTemplate.
[Preview API] Deletes the template with given id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
def get_template(self, team_context, template_id):
"""GetTemplate.
[Preview API] Gets the template with specified id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template Id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('WorkItemTemplate', response)
def replace_template(self, template_content, team_context, template_id):
"""ReplaceTemplate.
[Preview API] Replace template contents
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template_content: Template contents to replace with
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template_content, 'WorkItemTemplate')
response = self._send(http_method='PUT',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_update(self, id, update_number):
"""GetUpdate.
Returns a single update for a work item
:param int id:
:param int update_number:
:rtype: :class:`<WorkItemUpdate> <work-item-tracking.v4_1.models.WorkItemUpdate>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if update_number is not None:
route_values['updateNumber'] = self._serialize.url('update_number', update_number, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemUpdate', response)
def get_updates(self, id, top=None, skip=None):
"""GetUpdates.
Returns a the deltas between work item revisions
:param int id:
:param int top:
:param int skip:
:rtype: [WorkItemUpdate]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemUpdate]', self._unwrap_collection(response))
def query_by_wiql(self, wiql, team_context=None, time_precision=None, top=None):
"""QueryByWiql.
Gets the results of the query given its WIQL.
:param :class:`<Wiql> <work-item-tracking.v4_1.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(wiql, 'Wiql')
response = self._send(http_method='POST',
location_id='1a9c53f7-f243-4447-b110-35ef023636e4',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('WorkItemQueryResult', response)
def get_query_result_count(self, id, team_context=None, time_precision=None):
"""GetQueryResultCount.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: int
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='HEAD',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('int', response)
def query_by_id(self, id, team_context=None, time_precision=None):
"""QueryById.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='GET',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemQueryResult', response)
def get_work_item_icon_json(self, icon, color=None, v=None):
"""GetWorkItemIconJson.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: :class:`<WorkItemIcon> <work-item-tracking.v4_1.models.WorkItemIcon>`
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemIcon', response)
def get_work_item_icons(self):
"""GetWorkItemIcons.
[Preview API] Get a list of all work item icons.
:rtype: [WorkItemIcon]
"""
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1')
return self._deserialize('[WorkItemIcon]', self._unwrap_collection(response))
def get_work_item_icon_svg(self, icon, color=None, v=None, **kwargs):
"""GetWorkItemIconSvg.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: object
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='image/svg+xml')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_reporting_links_by_link_type(self, project=None, link_types=None, types=None, continuation_token=None, start_date_time=None):
"""GetReportingLinksByLinkType.
Get a batch of work item links
:param str project: Project ID or project name
:param [str] link_types: A list of types to filter the results to specific link types. Omit this parameter to get work item links of all link types.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item links of all work item types.
:param str continuation_token: Specifies the continuationToken to start the batch from. Omit this parameter to get the first batch of links.
:param datetime start_date_time: Date/time to use as a starting point for link changes. Only link changes that occurred after that date/time will be returned. Cannot be used in conjunction with 'watermark' parameter.
:rtype: :class:`<ReportingWorkItemLinksBatch> <work-item-tracking.v4_1.models.ReportingWorkItemLinksBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if link_types is not None:
link_types = ",".join(link_types)
query_parameters['linkTypes'] = self._serialize.query('link_types', link_types, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='b5b5b6d0-0308-40a1-b3f4-b9bb3c66878f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemLinksBatch', response)
def get_relation_type(self, relation):
"""GetRelationType.
Gets the work item relation type definition.
:param str relation: The relation name
:rtype: :class:`<WorkItemRelationType> <work-item-tracking.v4_1.models.WorkItemRelationType>`
"""
route_values = {}
if relation is not None:
route_values['relation'] = self._serialize.url('relation', relation, 'str')
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemRelationType', response)
def get_relation_types(self):
"""GetRelationTypes.
Gets the work item relation types.
:rtype: [WorkItemRelationType]
"""
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1')
return self._deserialize('[WorkItemRelationType]', self._unwrap_collection(response))
def read_reporting_revisions_get(self, project=None, fields=None, types=None, continuation_token=None, start_date_time=None, include_identity_ref=None, include_deleted=None, include_tag_ref=None, include_latest_only=None, expand=None, include_discussion_changes_only=None, max_page_size=None):
"""ReadReportingRevisionsGet.
Get a batch of work item revisions with the option of including deleted items
:param str project: Project ID or project name
:param [str] fields: A list of fields to return in work item revisions. Omit this parameter to get all reportable fields.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item revisions of all work item types.
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param bool include_identity_ref: Return an identity reference instead of a string value for identity fields.
:param bool include_deleted: Specify if the deleted item should be returned.
:param bool include_tag_ref: Specify if the tag objects should be returned for System.Tags field.
:param bool include_latest_only: Return only the latest revisions of work items, skipping all historical revisions
:param str expand: Return all the fields in work item revisions, including long text fields which are not returned by default
:param bool include_discussion_changes_only: Return only the those revisions of work items, where only history field was changed
:param int max_page_size: The maximum number of results to return in this batch
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if include_identity_ref is not None:
query_parameters['includeIdentityRef'] = self._serialize.query('include_identity_ref', include_identity_ref, 'bool')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if include_tag_ref is not None:
query_parameters['includeTagRef'] = self._serialize.query('include_tag_ref', include_tag_ref, 'bool')
if include_latest_only is not None:
query_parameters['includeLatestOnly'] = self._serialize.query('include_latest_only', include_latest_only, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_discussion_changes_only is not None:
query_parameters['includeDiscussionChangesOnly'] = self._serialize.query('include_discussion_changes_only', include_discussion_changes_only, 'bool')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query('max_page_size', max_page_size, 'int')
response = self._send(http_method='GET',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
def read_reporting_revisions_post(self, filter, project=None, continuation_token=None, start_date_time=None, expand=None):
"""ReadReportingRevisionsPost.
Get a batch of work item revisions. This request may be used if your list of fields is large enough that it may run the URL over the length limit.
:param :class:`<ReportingWorkItemRevisionsFilter> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsFilter>` filter: An object that contains request settings: field filter, type filter, identity format
:param str project: Project ID or project name
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param str expand:
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
content = self._serialize.body(filter, 'ReportingWorkItemRevisionsFilter')
response = self._send(http_method='POST',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
def create_work_item(self, document, project, type, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""CreateWorkItem.
Creates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the work item
:param str project: Project ID or project name
:param str type: The work item type of the work item to create
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='POST',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def delete_work_item(self, id, project=None, destroy=None):
"""DeleteWorkItem.
Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if destroy is not None:
query_parameters['destroy'] = self._serialize.query('destroy', destroy, 'bool')
response = self._send(http_method='DELETE',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemDelete', response)
def get_work_item(self, id, project=None, fields=None, as_of=None, expand=None):
"""GetWorkItem.
Returns a single work item.
:param int id: The work item id
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_work_items(self, ids, project=None, fields=None, as_of=None, expand=None, error_policy=None):
"""GetWorkItems.
Returns a list of work items.
:param [int] ids: The comma-separated list of requested work item ids
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:param str error_policy: The flag to control error policy in a bulk get work items request. Possible options are {Fail, Omit}.
:rtype: [WorkItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def update_work_item(self, document, id, project=None, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""UpdateWorkItem.
Updates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the update
:param int id: The id of the work item to update
:param str project: Project ID or project name
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_next_states_on_checkin_action(self, ids, action=None):
"""GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
"""
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if action is not None:
query_parameters['action'] = self._serialize.query('action', action, 'str')
response = self._send(http_method='GET',
location_id='afae844b-e2f6-44c2-8053-17b3bb936a40',
version='4.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[WorkItemNextStateOnTransition]', self._unwrap_collection(response))
def get_work_item_type_categories(self, project):
"""GetWorkItemTypeCategories.
Get all work item type categories.
:param str project: Project ID or project name
:rtype: [WorkItemTypeCategory]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemTypeCategory]', self._unwrap_collection(response))
def get_work_item_type_category(self, project, category):
"""GetWorkItemTypeCategory.
Get specific work item type category by name.
:param str project: Project ID or project name
:param str category: The category name
:rtype: :class:`<WorkItemTypeCategory> <work-item-tracking.v4_1.models.WorkItemTypeCategory>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if category is not None:
route_values['category'] = self._serialize.url('category', category, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemTypeCategory', response)
def get_work_item_type(self, project, type):
"""GetWorkItemType.
Returns a work item type definition.
:param str project: Project ID or project name
:param str type: Work item type name
:rtype: :class:`<WorkItemType> <work-item-tracking.v4_1.models.WorkItemType>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemType', response)
def get_work_item_types(self, project):
"""GetWorkItemTypes.
Returns the list of work item types
:param str project: Project ID or project name
:rtype: [WorkItemType]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemType]', self._unwrap_collection(response))
def get_work_item_type_fields_with_references(self, project, type, expand=None):
"""GetWorkItemTypeFieldsWithReferences.
Get a list of fields for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: [WorkItemTypeFieldWithReferences]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTypeFieldWithReferences]', self._unwrap_collection(response))
def get_work_item_type_field_with_references(self, project, type, field, expand=None):
"""GetWorkItemTypeFieldWithReferences.
Get a field for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str field:
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: :class:`<WorkItemTypeFieldWithReferences> <work-item-tracking.v4_1.models.WorkItemTypeFieldWithReferences>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if field is not None:
route_values['field'] = self._serialize.url('field', field, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemTypeFieldWithReferences', response)
def get_work_item_type_states(self, project, type):
"""GetWorkItemTypeStates.
[Preview API] Returns the state names and colors for a work item type.
:param str project: Project ID or project name
:param str type: The state name
:rtype: [WorkItemStateColor]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c9d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('[WorkItemStateColor]', self._unwrap_collection(response))
| 56.096339 | 298 | 0.605722 |
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class WorkItemTrackingClient(VssClient):
"""WorkItemTracking
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WorkItemTrackingClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '5264459e-e5e0-4bd8-b118-0985e68a4ec5'
def get_work_artifact_link_types(self):
"""GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
"""
response = self._send(http_method='GET',
location_id='1a31de40-e318-41cd-a6c6-881077df52e3',
version='4.1-preview.1')
return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response))
def query_work_items_for_artifact_uris(self, artifact_uri_query, project=None):
"""QueryWorkItemsForArtifactUris.
[Preview API] Queries work items linked to a given list of artifact URI.
:param :class:`<ArtifactUriQuery> <work-item-tracking.v4_1.models.ArtifactUriQuery>` artifact_uri_query: Defines a list of artifact URI for querying work items.
:param str project: Project ID or project name
:rtype: :class:`<ArtifactUriQueryResult> <work-item-tracking.v4_1.models.ArtifactUriQueryResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(artifact_uri_query, 'ArtifactUriQuery')
response = self._send(http_method='POST',
location_id='a9a9aa7a-8c09-44d3-ad1b-46e855c1e3d3',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ArtifactUriQueryResult', response)
def create_attachment(self, upload_stream, project=None, file_name=None, upload_type=None, area_path=None, **kwargs):
"""CreateAttachment.
Uploads an attachment.
:param object upload_stream: Stream to upload
:param str project: Project ID or project name
:param str file_name: The name of the file
:param str upload_type: Attachment upload type: Simple or Chunked
:param str area_path: Target project Area Path
:rtype: :class:`<AttachmentReference> <work-item-tracking.v4_1.models.AttachmentReference>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if upload_type is not None:
query_parameters['uploadType'] = self._serialize.query('upload_type', upload_type, 'str')
if area_path is not None:
query_parameters['areaPath'] = self._serialize.query('area_path', area_path, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('AttachmentReference', response)
def get_attachment_content(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentContent.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_attachment_zip(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentZip.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_classification_nodes(self, project, ids, depth=None, error_policy=None):
"""GetClassificationNodes.
Gets root classification nodes or list of classification nodes for a given list of nodes ids, for a given project. In case ids parameter is supplied you will get list of classification nodes for those ids. Otherwise you will get root classification nodes for this project.
:param str project: Project ID or project name
:param [int] ids: Comma seperated integer classification nodes ids. It's not required, if you want root nodes.
:param int depth: Depth of children to fetch.
:param str error_policy: Flag to handle errors in getting some nodes. Possible options are Fail and Omit.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def get_root_nodes(self, project, depth=None):
"""GetRootNodes.
Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def create_or_update_classification_node(self, posted_node, project, structure_group, path=None):
"""CreateOrUpdateClassificationNode.
Create new or update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='POST',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response)
def update_classification_node(self, posted_node, project, structure_group, path=None):
"""UpdateClassificationNode.
Update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='PATCH',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def get_comment(self, id, revision, project=None):
"""GetComment.
[Preview API] Gets a comment for a work item at the specified revision.
:param int id: Work item id
:param int revision: Revision for which the comment need to be fetched
:param str project: Project ID or project name
:rtype: :class:`<WorkItemComment> <work-item-tracking.v4_1.models.WorkItemComment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision is not None:
route_values['revision'] = self._serialize.url('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values)
return self._deserialize('WorkItemComment', response)
def get_comments(self, id, project=None, from_revision=None, top=None, order=None):
"""GetComments.
[Preview API] Gets the specified number of comments for a work item from the specified revision.
:param int id: Work item id
:param str project: Project ID or project name
:param int from_revision: Revision from which comments are to be fetched (default is 1)
:param int top: The number of comments to return (default is 200)
:param str order: Ascending or descending by revision id (default is ascending)
:rtype: :class:`<WorkItemComments> <work-item-tracking.v4_1.models.WorkItemComments>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if from_revision is not None:
query_parameters['fromRevision'] = self._serialize.query('from_revision', from_revision, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if order is not None:
query_parameters['order'] = self._serialize.query('order', order, 'str')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemComments', response)
def delete_field(self, field_name_or_ref_name, project=None):
"""DeleteField.
Deletes the field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
self._send(http_method='DELETE',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
def get_field(self, field_name_or_ref_name, project=None):
"""GetField.
Gets information on a specific field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
:rtype: :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemField', response)
def get_fields(self, project=None, expand=None):
"""GetFields.
Returns information for all fields.
:param str project: Project ID or project name
:param str expand: Use ExtensionFields to include extension fields, otherwise exclude them. Unless the feature flag for this parameter is enabled, extension fields are always included.
:rtype: [WorkItemField]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemField]', self._unwrap_collection(response))
def update_field(self, work_item_field, field_name_or_ref_name, project=None):
"""UpdateField.
Updates the field.
:param :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>` work_item_field: New field definition
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
content = self._serialize.body(work_item_field, 'WorkItemField')
self._send(http_method='PATCH',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
content=content)
def create_query(self, posted_query, project, query):
"""CreateQuery.
Creates a query, or moves a query.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` posted_query: The query to create.
:param str project: Project ID or project name
:param str query: The parent path for the query to create.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
content = self._serialize.body(posted_query, 'QueryHierarchyItem')
response = self._send(http_method='POST',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def delete_query(self, project, query):
"""DeleteQuery.
Delete a query or a folder. This deletes any permission change on the deleted query or folder and any of its descendants if it is a folder. It is important to note that the deleted permission changes cannot be recovered upon undeleting the query or folder.
:param str project: Project ID or project name
:param str query: ID or path of the query or folder to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
self._send(http_method='DELETE',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values)
def get_queries(self, project, expand=None, depth=None, include_deleted=None):
"""GetQueries.
Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response))
def get_query(self, project, query, expand=None, depth=None, include_deleted=None):
"""GetQuery.
Retrieves an individual query and its children
:param str project: Project ID or project name
:param str query:
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItem', response)
def search_queries(self, project, filter, top=None, expand=None, include_deleted=None):
"""SearchQueries.
Searches all queries the user has access to in the current project
:param str project: Project ID or project name
:param str filter: The text to filter the queries with.
:param int top: The number of queries to return (Default is 50 and maximum is 200).
:param str expand:
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItemsResult> <work-item-tracking.v4_1.models.QueryHierarchyItemsResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query('filter', filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItemsResult', response)
def update_query(self, query_update, project, query, undelete_descendants=None):
"""UpdateQuery.
Update a query or a folder. This allows you to update, rename and move queries and folders.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` query_update: The query to update.
:param str project: Project ID or project name
:param str query: The path for the query to update.
:param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if undelete_descendants is not None:
query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool')
content = self._serialize.body(query_update, 'QueryHierarchyItem')
response = self._send(http_method='PATCH',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def destroy_work_item(self, id, project=None):
"""DestroyWorkItem.
Destroys the specified work item permanently from the Recycle Bin. This action can not be undone.
:param int id: ID of the work item to be destroyed permanently
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
self._send(http_method='DELETE',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
def get_deleted_work_item(self, id, project=None):
"""GetDeletedWorkItem.
Gets a deleted work item from Recycle Bin.
:param int id: ID of the work item to be returned
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemDelete', response)
def get_deleted_work_items(self, ids, project=None):
"""GetDeletedWorkItems.
Gets the work items from the recycle bin, whose IDs have been specified in the parameters
:param [int] ids: Comma separated list of IDs of the deleted work items to be returned
:param str project: Project ID or project name
:rtype: [WorkItemDeleteReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemDeleteReference]', self._unwrap_collection(response))
def get_deleted_work_item_shallow_references(self, project=None):
"""GetDeletedWorkItemShallowReferences.
Gets a list of the IDs and the URLs of the deleted the work items in the Recycle Bin.
:param str project: Project ID or project name
:rtype: [WorkItemDeleteShallowReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemDeleteShallowReference]', self._unwrap_collection(response))
def restore_work_item(self, payload, id, project=None):
"""RestoreWorkItem.
Restores the deleted work item from Recycle Bin.
:param :class:`<WorkItemDeleteUpdate> <work-item-tracking.v4_1.models.WorkItemDeleteUpdate>` payload: Paylod with instructions to update the IsDeleted flag to false
:param int id: ID of the work item to be restored
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
content = self._serialize.body(payload, 'WorkItemDeleteUpdate')
response = self._send(http_method='PATCH',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemDelete', response)
def get_revision(self, id, revision_number, expand=None):
"""GetRevision.
Returns a fully hydrated work item for the requested revision
:param int id:
:param int revision_number:
:param str expand:
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision_number is not None:
route_values['revisionNumber'] = self._serialize.url('revision_number', revision_number, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_revisions(self, id, top=None, skip=None, expand=None):
"""GetRevisions.
Returns the list of fully hydrated work item revisions, paged.
:param int id:
:param int top:
:param int skip:
:param str expand:
:rtype: [WorkItem]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def create_template(self, template, team_context):
"""CreateTemplate.
[Preview API] Creates a template
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template: Template contents
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(template, 'WorkItemTemplate')
response = self._send(http_method='POST',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_templates(self, team_context, workitemtypename=None):
"""GetTemplates.
[Preview API] Gets template
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str workitemtypename: Optional, When specified returns templates for given Work item type.
:rtype: [WorkItemTemplateReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if workitemtypename is not None:
query_parameters['workitemtypename'] = self._serialize.query('workitemtypename', workitemtypename, 'str')
response = self._send(http_method='GET',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTemplateReference]', self._unwrap_collection(response))
def delete_template(self, team_context, template_id):
"""DeleteTemplate.
[Preview API] Deletes the template with given id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
def get_template(self, team_context, template_id):
"""GetTemplate.
[Preview API] Gets the template with specified id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template Id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('WorkItemTemplate', response)
def replace_template(self, template_content, team_context, template_id):
"""ReplaceTemplate.
[Preview API] Replace template contents
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template_content: Template contents to replace with
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template_content, 'WorkItemTemplate')
response = self._send(http_method='PUT',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_update(self, id, update_number):
"""GetUpdate.
Returns a single update for a work item
:param int id:
:param int update_number:
:rtype: :class:`<WorkItemUpdate> <work-item-tracking.v4_1.models.WorkItemUpdate>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if update_number is not None:
route_values['updateNumber'] = self._serialize.url('update_number', update_number, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemUpdate', response)
def get_updates(self, id, top=None, skip=None):
"""GetUpdates.
Returns a the deltas between work item revisions
:param int id:
:param int top:
:param int skip:
:rtype: [WorkItemUpdate]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemUpdate]', self._unwrap_collection(response))
def query_by_wiql(self, wiql, team_context=None, time_precision=None, top=None):
"""QueryByWiql.
Gets the results of the query given its WIQL.
:param :class:`<Wiql> <work-item-tracking.v4_1.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(wiql, 'Wiql')
response = self._send(http_method='POST',
location_id='1a9c53f7-f243-4447-b110-35ef023636e4',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('WorkItemQueryResult', response)
def get_query_result_count(self, id, team_context=None, time_precision=None):
"""GetQueryResultCount.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: int
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='HEAD',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('int', response)
def query_by_id(self, id, team_context=None, time_precision=None):
"""QueryById.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='GET',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemQueryResult', response)
def get_work_item_icon_json(self, icon, color=None, v=None):
"""GetWorkItemIconJson.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: :class:`<WorkItemIcon> <work-item-tracking.v4_1.models.WorkItemIcon>`
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemIcon', response)
def get_work_item_icons(self):
"""GetWorkItemIcons.
[Preview API] Get a list of all work item icons.
:rtype: [WorkItemIcon]
"""
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1')
return self._deserialize('[WorkItemIcon]', self._unwrap_collection(response))
def get_work_item_icon_svg(self, icon, color=None, v=None, **kwargs):
"""GetWorkItemIconSvg.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: object
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='image/svg+xml')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_reporting_links_by_link_type(self, project=None, link_types=None, types=None, continuation_token=None, start_date_time=None):
"""GetReportingLinksByLinkType.
Get a batch of work item links
:param str project: Project ID or project name
:param [str] link_types: A list of types to filter the results to specific link types. Omit this parameter to get work item links of all link types.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item links of all work item types.
:param str continuation_token: Specifies the continuationToken to start the batch from. Omit this parameter to get the first batch of links.
:param datetime start_date_time: Date/time to use as a starting point for link changes. Only link changes that occurred after that date/time will be returned. Cannot be used in conjunction with 'watermark' parameter.
:rtype: :class:`<ReportingWorkItemLinksBatch> <work-item-tracking.v4_1.models.ReportingWorkItemLinksBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if link_types is not None:
link_types = ",".join(link_types)
query_parameters['linkTypes'] = self._serialize.query('link_types', link_types, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='b5b5b6d0-0308-40a1-b3f4-b9bb3c66878f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemLinksBatch', response)
def get_relation_type(self, relation):
"""GetRelationType.
Gets the work item relation type definition.
:param str relation: The relation name
:rtype: :class:`<WorkItemRelationType> <work-item-tracking.v4_1.models.WorkItemRelationType>`
"""
route_values = {}
if relation is not None:
route_values['relation'] = self._serialize.url('relation', relation, 'str')
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemRelationType', response)
def get_relation_types(self):
"""GetRelationTypes.
Gets the work item relation types.
:rtype: [WorkItemRelationType]
"""
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1')
return self._deserialize('[WorkItemRelationType]', self._unwrap_collection(response))
def read_reporting_revisions_get(self, project=None, fields=None, types=None, continuation_token=None, start_date_time=None, include_identity_ref=None, include_deleted=None, include_tag_ref=None, include_latest_only=None, expand=None, include_discussion_changes_only=None, max_page_size=None):
"""ReadReportingRevisionsGet.
Get a batch of work item revisions with the option of including deleted items
:param str project: Project ID or project name
:param [str] fields: A list of fields to return in work item revisions. Omit this parameter to get all reportable fields.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item revisions of all work item types.
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param bool include_identity_ref: Return an identity reference instead of a string value for identity fields.
:param bool include_deleted: Specify if the deleted item should be returned.
:param bool include_tag_ref: Specify if the tag objects should be returned for System.Tags field.
:param bool include_latest_only: Return only the latest revisions of work items, skipping all historical revisions
:param str expand: Return all the fields in work item revisions, including long text fields which are not returned by default
:param bool include_discussion_changes_only: Return only the those revisions of work items, where only history field was changed
:param int max_page_size: The maximum number of results to return in this batch
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if include_identity_ref is not None:
query_parameters['includeIdentityRef'] = self._serialize.query('include_identity_ref', include_identity_ref, 'bool')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if include_tag_ref is not None:
query_parameters['includeTagRef'] = self._serialize.query('include_tag_ref', include_tag_ref, 'bool')
if include_latest_only is not None:
query_parameters['includeLatestOnly'] = self._serialize.query('include_latest_only', include_latest_only, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_discussion_changes_only is not None:
query_parameters['includeDiscussionChangesOnly'] = self._serialize.query('include_discussion_changes_only', include_discussion_changes_only, 'bool')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query('max_page_size', max_page_size, 'int')
response = self._send(http_method='GET',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
def read_reporting_revisions_post(self, filter, project=None, continuation_token=None, start_date_time=None, expand=None):
"""ReadReportingRevisionsPost.
Get a batch of work item revisions. This request may be used if your list of fields is large enough that it may run the URL over the length limit.
:param :class:`<ReportingWorkItemRevisionsFilter> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsFilter>` filter: An object that contains request settings: field filter, type filter, identity format
:param str project: Project ID or project name
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param str expand:
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
content = self._serialize.body(filter, 'ReportingWorkItemRevisionsFilter')
response = self._send(http_method='POST',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
def create_work_item(self, document, project, type, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""CreateWorkItem.
Creates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the work item
:param str project: Project ID or project name
:param str type: The work item type of the work item to create
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='POST',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def delete_work_item(self, id, project=None, destroy=None):
"""DeleteWorkItem.
Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if destroy is not None:
query_parameters['destroy'] = self._serialize.query('destroy', destroy, 'bool')
response = self._send(http_method='DELETE',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemDelete', response)
def get_work_item(self, id, project=None, fields=None, as_of=None, expand=None):
"""GetWorkItem.
Returns a single work item.
:param int id: The work item id
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_work_items(self, ids, project=None, fields=None, as_of=None, expand=None, error_policy=None):
"""GetWorkItems.
Returns a list of work items.
:param [int] ids: The comma-separated list of requested work item ids
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:param str error_policy: The flag to control error policy in a bulk get work items request. Possible options are {Fail, Omit}.
:rtype: [WorkItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def update_work_item(self, document, id, project=None, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""UpdateWorkItem.
Updates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the update
:param int id: The id of the work item to update
:param str project: Project ID or project name
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_next_states_on_checkin_action(self, ids, action=None):
"""GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
"""
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if action is not None:
query_parameters['action'] = self._serialize.query('action', action, 'str')
response = self._send(http_method='GET',
location_id='afae844b-e2f6-44c2-8053-17b3bb936a40',
version='4.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[WorkItemNextStateOnTransition]', self._unwrap_collection(response))
def get_work_item_type_categories(self, project):
"""GetWorkItemTypeCategories.
Get all work item type categories.
:param str project: Project ID or project name
:rtype: [WorkItemTypeCategory]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemTypeCategory]', self._unwrap_collection(response))
def get_work_item_type_category(self, project, category):
"""GetWorkItemTypeCategory.
Get specific work item type category by name.
:param str project: Project ID or project name
:param str category: The category name
:rtype: :class:`<WorkItemTypeCategory> <work-item-tracking.v4_1.models.WorkItemTypeCategory>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if category is not None:
route_values['category'] = self._serialize.url('category', category, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemTypeCategory', response)
def get_work_item_type(self, project, type):
"""GetWorkItemType.
Returns a work item type definition.
:param str project: Project ID or project name
:param str type: Work item type name
:rtype: :class:`<WorkItemType> <work-item-tracking.v4_1.models.WorkItemType>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemType', response)
def get_work_item_types(self, project):
"""GetWorkItemTypes.
Returns the list of work item types
:param str project: Project ID or project name
:rtype: [WorkItemType]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemType]', self._unwrap_collection(response))
def get_work_item_type_fields_with_references(self, project, type, expand=None):
"""GetWorkItemTypeFieldsWithReferences.
Get a list of fields for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: [WorkItemTypeFieldWithReferences]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTypeFieldWithReferences]', self._unwrap_collection(response))
def get_work_item_type_field_with_references(self, project, type, field, expand=None):
"""GetWorkItemTypeFieldWithReferences.
Get a field for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str field:
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: :class:`<WorkItemTypeFieldWithReferences> <work-item-tracking.v4_1.models.WorkItemTypeFieldWithReferences>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if field is not None:
route_values['field'] = self._serialize.url('field', field, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemTypeFieldWithReferences', response)
def get_work_item_type_states(self, project, type):
"""GetWorkItemTypeStates.
[Preview API] Returns the state names and colors for a work item type.
:param str project: Project ID or project name
:param str type: The state name
:rtype: [WorkItemStateColor]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c9d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('[WorkItemStateColor]', self._unwrap_collection(response))
| false | true |
f7257af7f5369ac97b09687baeec3f79676d59fb | 17,484 | py | Python | SE4TeC_demo/GUI_function.py | JingweiZuo/SE2TeC | f2aab845aa648e366d0f6917a5d8abfd4d556d13 | [
"Apache-2.0"
] | 1 | 2020-05-10T11:23:11.000Z | 2020-05-10T11:23:11.000Z | SE4TeC_demo/GUI_function.py | JingweiZuo/SE4TeC | f2aab845aa648e366d0f6917a5d8abfd4d556d13 | [
"Apache-2.0"
] | null | null | null | SE4TeC_demo/GUI_function.py | JingweiZuo/SE4TeC | f2aab845aa648e366d0f6917a5d8abfd4d556d13 | [
"Apache-2.0"
] | null | null | null | import time
import tkinter as tk
from tkinter import *
import tkinter.filedialog as filedialog
from tkinter.filedialog import askopenfilename
import utils.utils as util
import utils.similarity_measures as sm
import SMAP.MatrixProfile as mp
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
LARGE_FONT= ("Verdana", 12)
class gui_function:
def __init__(self, master):
self.filename = 'file name'
self.training_filename = 'choose training set'
self.testing_filename = 'choose testing set'
#transfer the main test part to the class
self.master = master
self.dataset = util.Dataset()
self.testdataset = util.Dataset()
self.dataset_name = None
self.shapeletList1 = []
self.shapeletList2 = []
def add_dataset(self):
self.dataset_name = askopenfilename(parent=self.master, title="Choose a file")
array_tsdict = util.load_dataset(self.dataset_name)
dir = self.dataset_name.split("/")
datasetname = dir[-1]
self.dataset.update(array_tsdict, datasetname)
self.master.v_dsname.set(self.dataset.name)
self.master.v_tslength.set(self.dataset.tslength)
self.master.v_tsnbr.set(self.dataset.size)
self.master.v_classnbr.set(len(self.dataset.ClassList))
self.master.show_frame(self.master.frame2, "SMAPPage")
def add_testing_file(self):
self.testfile_name = askopenfilename(parent=self.master, title="Choose a file")
array_tsdict = util.load_dataset(self.testfile_name)
dir = self.testfile_name.split("/")
datasetname = dir[-1]
self.testdataset.update(array_tsdict, datasetname)
self.master.v_testdsname.set(self.testdataset.name)
self.master.v_testtslength.set(self.testdataset.tslength)
self.master.v_testtsnbr.set(self.testdataset.size)
self.master.v_testclassnbr.set(len(self.testdataset.ClassList))
self.master.testdataset= self.testdataset
def ShowAlgoFrame(self, algorithm):
self.master.frame2_1[algorithm].tkraise()
self.master.frame2_1[algorithm].grid(row=0, column=0, sticky=W)
def extractDP(self, master):
self.nbr_source = master.v_source.get()
self.nbr_target = master.v_target.get()
dataset = master.dataset
hash_source = dataset.tsNameDir[self.nbr_source]
hash_target = dataset.tsNameDir[self.nbr_target]
self.source = dataset.tsObjectDir[hash_source]
self.target = dataset.tsObjectDir[hash_target]
self.m = master.v_queryL.get()
index_start = master.v_queryI.get()
data = self.target.timeseries
index_end = index_start + self.m
query = self.source.timeseries[index_start:index_end]
#DP = sm.mass_v2(data, query)
#DP = sm.mass_v1(query, data)
DP = sm.euclidean_distance_unequal_lengths(data, query)
# display the figures on the CANVAS of the GUI
# CANVAS
# remove the axis_x of "self.axe2"
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.clear() # clear the previous plot at the same position
x = range(len(DP))
self.master.ax3.spines['top'].set_visible(False)
self.master.ax3.spines['right'].set_visible(False)
self.master.ax3.set_ylabel("Distance Profile")
self.master.ax3.plot(x, DP, linewidth=0.5, label="D. P. of Query in " +self.nbr_target)
self.master.ax3.legend()
self.master.canvas.draw()
# show the Nearest Neighbor in target TS
DP_list = DP.tolist()
index_inValue = DP_list.index(min(DP_list))
index_end = index_inValue + master.m
NearestN = self.target.timeseries[index_inValue:index_end]
x_target = range(len(self.target.timeseries))
x_NearestN = range(index_inValue, index_end)
self.ax2 = self.master.ax2
self.ax2.clear()
self.ax2.plot(x_target, self.target.timeseries, linewidth=0.5, label=self.nbr_target)
self.ax2.plot(x_NearestN, NearestN, linewidth=2, label="Nearest Neighbor of Query")
self.ax2.spines['top'].set_visible(False)
self.ax2.spines['right'].set_visible(False)
self.ax2.set_ylabel("Target TS")
self.ax2.legend(loc="upper right")
self.master.canvas.draw()
def extractMP(self, master):
self.nbr_source = master.v_source.get()
self.nbr_target = master.v_target.get()
dataset = master.dataset
hash_source = dataset.tsNameDir[self.nbr_source]
hash_target = dataset.tsNameDir[self.nbr_target]
self.source = dataset.tsObjectDir[hash_source]
self.target = dataset.tsObjectDir[hash_target]
self.m = master.v_queryL.get()
dp_all, MP= mp.computeMP(self.source, self.target, self.m, "mass_v2")
# CANVAS
# remove the axis_x of "self.axe2"
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.clear() # clear the previous plot at the same position
x = range(len(MP))
self.master.ax3.spines['top'].set_visible(False)
self.master.ax3.spines['right'].set_visible(False)
self.master.ax3.set_ylabel("Matrix Profile")
self.master.ax3.plot(x, MP, linewidth=0.5, label="M. P. of "+self.nbr_source+" towards " +self.nbr_target)
self.master.ax3.legend()
self.master.canvas.draw()
# show the matching pair in Source and Target TS
index_source = MP.index(min(MP))
index_source_end = index_source + self.m
x_pair_source = range(index_source, index_source_end)
pair_source = self.source.timeseries[index_source:index_source_end]
DP = sm.euclidean_distance_unequal_lengths(self.target.timeseries, pair_source)
index_target = DP.tolist().index(min(DP.tolist()))
index_target_end = index_target + self.m
x_pair_target = range(index_target, index_target_end)
pair_target = self.target.timeseries[index_target:index_target_end]
# remove the Query in Source TS
self.master.ax1.clear()
x = range(len(self.source.timeseries))
self.master.ax1.spines['top'].set_visible(False)
self.master.ax1.spines['right'].set_visible(False)
self.master.ax1.set_ylabel("Source TS")
self.master.ax1.plot(x_pair_source, pair_source, linewidth=2, color="red", label="Nearest Pair in source")
self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)
self.master.ax1.legend()
self.master.canvas.draw()
# remove the Nearest Neighbor in Target TS
self.master.ax2.clear()
x = range(len(self.target.timeseries))
self.master.ax2.spines['top'].set_visible(False)
self.master.ax2.spines['right'].set_visible(False)
self.master.ax2.set_ylabel("Target TS")
self.master.ax2.plot(x_pair_target, pair_target, linewidth=2, color="red", label="Nearest Pair in target")
self.master.ax2.plot(x, self.target.timeseries, linewidth=0.5, label=self.nbr_target)
self.master.ax2.legend()
self.master.canvas.draw()
def extractLB(self):
return 0
def extractRP(self, master):
source = master.source
input_class = str(master.v_class.get())
start = time.clock()
DP_all, mp_all, self.dist_differ, dist_threshold, self.dist_side_C, self.dist_side_nonC = mp.computeDistDiffer(source, master.dataset.tsObjectDir, self.m)
end = time.clock()
self.SMAP_time = round(end - start, 2)
if str(source.class_timeseries) == input_class:
RP = self.dist_side_C
else:
RP = self.dist_side_nonC
# CANVAS
# Configire the axis looking (start)
self.master.ax3.clear()
if (self.master.ax2.get_ylabel()!="Rep. Profile"):
self.master.ax2.clear()
plt.setp(self.master.ax2.get_xaxis(), visible=True)
self.master.ax2.spines['bottom'].set_visible(True)
self.master.ax3.axis("off")
# Configire the axis looking (end)
x = range(len(RP))
self.master.ax2.set_ylabel("Rep. Profile")
label = "Rep. P. of " + self.nbr_source + " in class " + input_class
self.master.ax2.plot(x, RP, linewidth=0.5, label=label)
self.master.ax2.legend()
self.master.canvas.draw()
# remove the Query in Source TS
self.master.ax1.clear()
x = range(len(self.source.timeseries))
self.master.ax1.spines['top'].set_visible(False)
self.master.ax1.spines['right'].set_visible(False)
self.master.ax1.set_ylabel("Source TS")
self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)
self.master.ax1.legend()
self.master.canvas.draw()
def extractDiscP(self, master):
'''source = master.source
dp_all, mp_all, dist_differ, dist_threshold, dist_side_C, dist_side_nonC = mp.computeDistDiffer(source, master.dataset.tsObjectDir, self.m)'''
DiscP = self.dist_differ
# CANVAS
# Configire the axis looking (start)
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.axis("on")
# Configire the axis looking (end)
x = range(len(DiscP))
self.master.ax3.set_ylabel("Discm. Profile")
label = "Discm. P. of " + self.nbr_source
self.master.ax3.plot(x, DiscP, linewidth=0.5, label=label)
self.master.ax3.legend()
self.master.canvas.draw()
# show the pattern found in source TS
discP_list = DiscP.tolist()
index_maxValue = discP_list.index(max(discP_list))
index_end = index_maxValue + master.m
source = master.source.timeseries
pattern = source[index_maxValue:index_end]
x_source = range(len(source))
x_pattern = range(index_maxValue, index_end)
# CANVAS
self.ax1 = self.master.ax1
self.ax1.clear()
self.ax1.plot(x_source, source, linewidth=0.5, label="Source TS")
self.ax1.plot(x_pattern, pattern, linewidth=2, color="red", label="Candidate Shaplet in "+ master.v_source.get())
self.ax1.spines['top'].set_visible(False)
self.ax1.spines['right'].set_visible(False)
self.ax1.set_ylabel("Source TS")
self.ax1.legend(loc="upper right")
self.master.canvas.draw()
self.master.v_timeSMAP.set(self.SMAP_time)
def extractDiscP_LB(self, master):
self.master.v_timeSMAPLB.set(0)
def drawShapelet(self, path, filename):
testFile = pd.read_csv(path + filename, header=None)
Class = testFile[0][0]
shapData = testFile[1][0]
shapData = shapData.strip('()').replace('[', '').replace(']', '')
shapeletList = []
# shapObjectList: DD, Thresh
shapObjectList = shapData.split("),(")
for shapObject in shapObjectList:
shap = Shapelet()
shapObject = shapObject.split(',')
shap.DD = float(shapObject[0])
shap.thresh = float(shapObject[1])
shap.Class = Class
shap.subseq = [float(s) for s in shapObject[2:]]
shapeletList.append(shap)
return shapeletList
def drawTS(self, path, filename):
tsObjectList1 = []
tsObjectList2 = []
testFile = pd.read_csv(path + filename, header=None)
tsClass1 = testFile[testFile[1] == 1]
tsClass2 = testFile[testFile[1] == -1]
for i in tsClass1.index:
ts = timeseries()
row = tsClass1.loc[i]
ts.id = row[0]
ts.Class = row[1]
ts.seq = row[2].split(',')
ts.seq = [float(val) for val in ts.seq]
tsObjectList1.append(ts)
for i in tsClass2.index:
ts = timeseries()
row = tsClass2.loc[i]
ts.id = row[0]
ts.Class = row[1]
ts.seq = row[2].split(',')
ts.seq = [float(val) for val in ts.seq]
tsObjectList2.append(ts)
return tsObjectList1, tsObjectList2
def showTSset(self):
path_ECG = "/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/TS_raw/"
file_ECG = "TS.csv"
path = path_ECG
filename = file_ECG
tsObjectC1, tsObjectC2 = self.drawTS(path, filename)
self.fig = self.master.figure
if self.master.v_class.get()=="1.0":
self.fig.clf()
self.ax1 = self.fig.add_subplot(211)
self.ax2 = self.fig.add_subplot(212)
input_class = self.master.v_class.get()
if input_class == "1.0":
self.ax1.clear()
for ts in tsObjectC1[11:21]:
seq = ts.seq
self.ax1.set_ylabel("TS data class 1")
X = range(0, len(seq))
self.ax1.plot(X, seq, color='green', linewidth=0.5)
elif input_class == "-1.0":
self.ax2.clear()
for ts in tsObjectC2[0:10]:
seq = ts.seq
self.ax2.set_xlabel("index/offset")
self.ax2.set_ylabel("TS data class -1.0")
X = range(0, len(seq))
self.ax2.plot(X, seq, color='green', linewidth=0.5)
self.master.canvas.draw()
def extractShapeletCandidate(self):
path_ECG = "/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/Shapelets/"
f1_ECG = "part-00043-956f02be-ab81-45db-9679-0bfd9150f5e8.csv" # 1
f2_ECG = "part-00013-956f02be-ab81-45db-9679-0bfd9150f5e8.csv" # -1
path = path_ECG
filename1 = f1_ECG
filename2 = f2_ECG
self.shapeletList1 = self.drawShapelet(path, filename1)
self.shapeletList2 = self.drawShapelet(path, filename2)
input_k = self.master.v_k.get()
input_c = self.master.v_class.get()
self.fig = self.master.figure
if input_c == "1.0":
i = 0
for shap in self.shapeletList1[:input_k]:
self.subaxe = self.fig.add_subplot(211)
shapdata = shap.subseq
# add a shift of 10 for shapelets
X = range(10, len(shapdata)+10)
self.subaxe.plot(X, shapdata, color='red', linewidth=2)
i = i + 0.1
elif input_c == "-1.0":
i = 0
for shap in self.shapeletList2[:input_k]:
self.subaxe = self.fig.add_subplot(212)
shapdata = shap.subseq
X = range(0, len(shapdata))
self.subaxe.plot(X, shapdata, color='blue', linewidth=2)
self.master.canvas.draw()
# canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=True)
def extractED(self):
return 0
def extractEDMatrix(self, master):
self.master.v_timeUSE.set(0)
def predict(self, master):
#list of Shapelet from different class
testdataset = master.guiFunc.testdataset
nbr_testTS = master.v_testInstance.get()
print("---callback predict---")
print(nbr_testTS)
if nbr_testTS!="select":
hash_testTS = testdataset.tsNameDir[nbr_testTS]
self.testTS = testdataset.tsObjectDir[hash_testTS]
testTS = self.testTS.timeseries
min_dist = float('inf')
index_target = None
predict_class = '0'
match_shapelet = None
print("length os shapeletList1 is " + str(len(self.shapeletList1)))
for shap in self.shapeletList1 + self.shapeletList2:
DP = sm.euclidean_distance_unequal_lengths(testTS, shap.subseq)
DP = DP.tolist()
DP_min = min(DP)
if min_dist > DP_min:
min_dist = DP_min
index_target = DP.index(DP_min)
match_shapelet = shap
self.testTS = testdataset.tsObjectDir[hash_testTS]
# CANVAS
x = range(len(testTS))
shap_data = match_shapelet.subseq
x_shap = range(index_target, index_target + len(shap_data))
self.master.figure.clf()
self.ax = self.master.figure.add_subplot(111)
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
self.ax.plot(x, testTS, linewidth=0.5, label="testing TS: " + nbr_testTS)
self.ax.plot(x_shap, shap_data, linewidth=2, label="Matching Shapelet")
self.ax.set_ylabel("Testing TS")
self.ax.set_title("Real class: " + str(self.testTS.class_timeseries) + "; Prediction: " + str(match_shapelet.Class))
self.ax.legend(loc="upper right")
self.master.canvas.draw()
class Shapelet(object):
def __init__(self):
self.id = 0.0
self.Class = ''
self.subseq = None
self.DD = 0.0
self.thresh = 0.0
class timeseries(object):
def __init__(self):
self.id = None
self.Class = ''
self.seq = None
| 42.643902 | 162 | 0.623256 | import time
import tkinter as tk
from tkinter import *
import tkinter.filedialog as filedialog
from tkinter.filedialog import askopenfilename
import utils.utils as util
import utils.similarity_measures as sm
import SMAP.MatrixProfile as mp
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
LARGE_FONT= ("Verdana", 12)
class gui_function:
def __init__(self, master):
self.filename = 'file name'
self.training_filename = 'choose training set'
self.testing_filename = 'choose testing set'
self.master = master
self.dataset = util.Dataset()
self.testdataset = util.Dataset()
self.dataset_name = None
self.shapeletList1 = []
self.shapeletList2 = []
def add_dataset(self):
self.dataset_name = askopenfilename(parent=self.master, title="Choose a file")
array_tsdict = util.load_dataset(self.dataset_name)
dir = self.dataset_name.split("/")
datasetname = dir[-1]
self.dataset.update(array_tsdict, datasetname)
self.master.v_dsname.set(self.dataset.name)
self.master.v_tslength.set(self.dataset.tslength)
self.master.v_tsnbr.set(self.dataset.size)
self.master.v_classnbr.set(len(self.dataset.ClassList))
self.master.show_frame(self.master.frame2, "SMAPPage")
def add_testing_file(self):
self.testfile_name = askopenfilename(parent=self.master, title="Choose a file")
array_tsdict = util.load_dataset(self.testfile_name)
dir = self.testfile_name.split("/")
datasetname = dir[-1]
self.testdataset.update(array_tsdict, datasetname)
self.master.v_testdsname.set(self.testdataset.name)
self.master.v_testtslength.set(self.testdataset.tslength)
self.master.v_testtsnbr.set(self.testdataset.size)
self.master.v_testclassnbr.set(len(self.testdataset.ClassList))
self.master.testdataset= self.testdataset
def ShowAlgoFrame(self, algorithm):
self.master.frame2_1[algorithm].tkraise()
self.master.frame2_1[algorithm].grid(row=0, column=0, sticky=W)
def extractDP(self, master):
self.nbr_source = master.v_source.get()
self.nbr_target = master.v_target.get()
dataset = master.dataset
hash_source = dataset.tsNameDir[self.nbr_source]
hash_target = dataset.tsNameDir[self.nbr_target]
self.source = dataset.tsObjectDir[hash_source]
self.target = dataset.tsObjectDir[hash_target]
self.m = master.v_queryL.get()
index_start = master.v_queryI.get()
data = self.target.timeseries
index_end = index_start + self.m
query = self.source.timeseries[index_start:index_end]
DP = sm.euclidean_distance_unequal_lengths(data, query)
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.clear()
x = range(len(DP))
self.master.ax3.spines['top'].set_visible(False)
self.master.ax3.spines['right'].set_visible(False)
self.master.ax3.set_ylabel("Distance Profile")
self.master.ax3.plot(x, DP, linewidth=0.5, label="D. P. of Query in " +self.nbr_target)
self.master.ax3.legend()
self.master.canvas.draw()
DP_list = DP.tolist()
index_inValue = DP_list.index(min(DP_list))
index_end = index_inValue + master.m
NearestN = self.target.timeseries[index_inValue:index_end]
x_target = range(len(self.target.timeseries))
x_NearestN = range(index_inValue, index_end)
self.ax2 = self.master.ax2
self.ax2.clear()
self.ax2.plot(x_target, self.target.timeseries, linewidth=0.5, label=self.nbr_target)
self.ax2.plot(x_NearestN, NearestN, linewidth=2, label="Nearest Neighbor of Query")
self.ax2.spines['top'].set_visible(False)
self.ax2.spines['right'].set_visible(False)
self.ax2.set_ylabel("Target TS")
self.ax2.legend(loc="upper right")
self.master.canvas.draw()
def extractMP(self, master):
self.nbr_source = master.v_source.get()
self.nbr_target = master.v_target.get()
dataset = master.dataset
hash_source = dataset.tsNameDir[self.nbr_source]
hash_target = dataset.tsNameDir[self.nbr_target]
self.source = dataset.tsObjectDir[hash_source]
self.target = dataset.tsObjectDir[hash_target]
self.m = master.v_queryL.get()
dp_all, MP= mp.computeMP(self.source, self.target, self.m, "mass_v2")
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.clear()
x = range(len(MP))
self.master.ax3.spines['top'].set_visible(False)
self.master.ax3.spines['right'].set_visible(False)
self.master.ax3.set_ylabel("Matrix Profile")
self.master.ax3.plot(x, MP, linewidth=0.5, label="M. P. of "+self.nbr_source+" towards " +self.nbr_target)
self.master.ax3.legend()
self.master.canvas.draw()
index_source = MP.index(min(MP))
index_source_end = index_source + self.m
x_pair_source = range(index_source, index_source_end)
pair_source = self.source.timeseries[index_source:index_source_end]
DP = sm.euclidean_distance_unequal_lengths(self.target.timeseries, pair_source)
index_target = DP.tolist().index(min(DP.tolist()))
index_target_end = index_target + self.m
x_pair_target = range(index_target, index_target_end)
pair_target = self.target.timeseries[index_target:index_target_end]
self.master.ax1.clear()
x = range(len(self.source.timeseries))
self.master.ax1.spines['top'].set_visible(False)
self.master.ax1.spines['right'].set_visible(False)
self.master.ax1.set_ylabel("Source TS")
self.master.ax1.plot(x_pair_source, pair_source, linewidth=2, color="red", label="Nearest Pair in source")
self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)
self.master.ax1.legend()
self.master.canvas.draw()
self.master.ax2.clear()
x = range(len(self.target.timeseries))
self.master.ax2.spines['top'].set_visible(False)
self.master.ax2.spines['right'].set_visible(False)
self.master.ax2.set_ylabel("Target TS")
self.master.ax2.plot(x_pair_target, pair_target, linewidth=2, color="red", label="Nearest Pair in target")
self.master.ax2.plot(x, self.target.timeseries, linewidth=0.5, label=self.nbr_target)
self.master.ax2.legend()
self.master.canvas.draw()
def extractLB(self):
return 0
def extractRP(self, master):
source = master.source
input_class = str(master.v_class.get())
start = time.clock()
DP_all, mp_all, self.dist_differ, dist_threshold, self.dist_side_C, self.dist_side_nonC = mp.computeDistDiffer(source, master.dataset.tsObjectDir, self.m)
end = time.clock()
self.SMAP_time = round(end - start, 2)
if str(source.class_timeseries) == input_class:
RP = self.dist_side_C
else:
RP = self.dist_side_nonC
self.master.ax3.clear()
if (self.master.ax2.get_ylabel()!="Rep. Profile"):
self.master.ax2.clear()
plt.setp(self.master.ax2.get_xaxis(), visible=True)
self.master.ax2.spines['bottom'].set_visible(True)
self.master.ax3.axis("off")
x = range(len(RP))
self.master.ax2.set_ylabel("Rep. Profile")
label = "Rep. P. of " + self.nbr_source + " in class " + input_class
self.master.ax2.plot(x, RP, linewidth=0.5, label=label)
self.master.ax2.legend()
self.master.canvas.draw()
self.master.ax1.clear()
x = range(len(self.source.timeseries))
self.master.ax1.spines['top'].set_visible(False)
self.master.ax1.spines['right'].set_visible(False)
self.master.ax1.set_ylabel("Source TS")
self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)
self.master.ax1.legend()
self.master.canvas.draw()
def extractDiscP(self, master):
DiscP = self.dist_differ
plt.setp(self.master.ax2.get_xaxis(), visible=False)
self.master.ax2.spines['bottom'].set_visible(False)
self.master.ax3.axis("on")
x = range(len(DiscP))
self.master.ax3.set_ylabel("Discm. Profile")
label = "Discm. P. of " + self.nbr_source
self.master.ax3.plot(x, DiscP, linewidth=0.5, label=label)
self.master.ax3.legend()
self.master.canvas.draw()
discP_list = DiscP.tolist()
index_maxValue = discP_list.index(max(discP_list))
index_end = index_maxValue + master.m
source = master.source.timeseries
pattern = source[index_maxValue:index_end]
x_source = range(len(source))
x_pattern = range(index_maxValue, index_end)
self.ax1 = self.master.ax1
self.ax1.clear()
self.ax1.plot(x_source, source, linewidth=0.5, label="Source TS")
self.ax1.plot(x_pattern, pattern, linewidth=2, color="red", label="Candidate Shaplet in "+ master.v_source.get())
self.ax1.spines['top'].set_visible(False)
self.ax1.spines['right'].set_visible(False)
self.ax1.set_ylabel("Source TS")
self.ax1.legend(loc="upper right")
self.master.canvas.draw()
self.master.v_timeSMAP.set(self.SMAP_time)
def extractDiscP_LB(self, master):
self.master.v_timeSMAPLB.set(0)
def drawShapelet(self, path, filename):
testFile = pd.read_csv(path + filename, header=None)
Class = testFile[0][0]
shapData = testFile[1][0]
shapData = shapData.strip('()').replace('[', '').replace(']', '')
shapeletList = []
shapObjectList = shapData.split("),(")
for shapObject in shapObjectList:
shap = Shapelet()
shapObject = shapObject.split(',')
shap.DD = float(shapObject[0])
shap.thresh = float(shapObject[1])
shap.Class = Class
shap.subseq = [float(s) for s in shapObject[2:]]
shapeletList.append(shap)
return shapeletList
def drawTS(self, path, filename):
tsObjectList1 = []
tsObjectList2 = []
testFile = pd.read_csv(path + filename, header=None)
tsClass1 = testFile[testFile[1] == 1]
tsClass2 = testFile[testFile[1] == -1]
for i in tsClass1.index:
ts = timeseries()
row = tsClass1.loc[i]
ts.id = row[0]
ts.Class = row[1]
ts.seq = row[2].split(',')
ts.seq = [float(val) for val in ts.seq]
tsObjectList1.append(ts)
for i in tsClass2.index:
ts = timeseries()
row = tsClass2.loc[i]
ts.id = row[0]
ts.Class = row[1]
ts.seq = row[2].split(',')
ts.seq = [float(val) for val in ts.seq]
tsObjectList2.append(ts)
return tsObjectList1, tsObjectList2
def showTSset(self):
path_ECG = "/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/TS_raw/"
file_ECG = "TS.csv"
path = path_ECG
filename = file_ECG
tsObjectC1, tsObjectC2 = self.drawTS(path, filename)
self.fig = self.master.figure
if self.master.v_class.get()=="1.0":
self.fig.clf()
self.ax1 = self.fig.add_subplot(211)
self.ax2 = self.fig.add_subplot(212)
input_class = self.master.v_class.get()
if input_class == "1.0":
self.ax1.clear()
for ts in tsObjectC1[11:21]:
seq = ts.seq
self.ax1.set_ylabel("TS data class 1")
X = range(0, len(seq))
self.ax1.plot(X, seq, color='green', linewidth=0.5)
elif input_class == "-1.0":
self.ax2.clear()
for ts in tsObjectC2[0:10]:
seq = ts.seq
self.ax2.set_xlabel("index/offset")
self.ax2.set_ylabel("TS data class -1.0")
X = range(0, len(seq))
self.ax2.plot(X, seq, color='green', linewidth=0.5)
self.master.canvas.draw()
def extractShapeletCandidate(self):
path_ECG = "/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/Shapelets/"
f1_ECG = "part-00043-956f02be-ab81-45db-9679-0bfd9150f5e8.csv"
f2_ECG = "part-00013-956f02be-ab81-45db-9679-0bfd9150f5e8.csv"
path = path_ECG
filename1 = f1_ECG
filename2 = f2_ECG
self.shapeletList1 = self.drawShapelet(path, filename1)
self.shapeletList2 = self.drawShapelet(path, filename2)
input_k = self.master.v_k.get()
input_c = self.master.v_class.get()
self.fig = self.master.figure
if input_c == "1.0":
i = 0
for shap in self.shapeletList1[:input_k]:
self.subaxe = self.fig.add_subplot(211)
shapdata = shap.subseq
X = range(10, len(shapdata)+10)
self.subaxe.plot(X, shapdata, color='red', linewidth=2)
i = i + 0.1
elif input_c == "-1.0":
i = 0
for shap in self.shapeletList2[:input_k]:
self.subaxe = self.fig.add_subplot(212)
shapdata = shap.subseq
X = range(0, len(shapdata))
self.subaxe.plot(X, shapdata, color='blue', linewidth=2)
self.master.canvas.draw()
def extractED(self):
return 0
def extractEDMatrix(self, master):
self.master.v_timeUSE.set(0)
def predict(self, master):
testdataset = master.guiFunc.testdataset
nbr_testTS = master.v_testInstance.get()
print("---callback predict---")
print(nbr_testTS)
if nbr_testTS!="select":
hash_testTS = testdataset.tsNameDir[nbr_testTS]
self.testTS = testdataset.tsObjectDir[hash_testTS]
testTS = self.testTS.timeseries
min_dist = float('inf')
index_target = None
predict_class = '0'
match_shapelet = None
print("length os shapeletList1 is " + str(len(self.shapeletList1)))
for shap in self.shapeletList1 + self.shapeletList2:
DP = sm.euclidean_distance_unequal_lengths(testTS, shap.subseq)
DP = DP.tolist()
DP_min = min(DP)
if min_dist > DP_min:
min_dist = DP_min
index_target = DP.index(DP_min)
match_shapelet = shap
self.testTS = testdataset.tsObjectDir[hash_testTS]
x = range(len(testTS))
shap_data = match_shapelet.subseq
x_shap = range(index_target, index_target + len(shap_data))
self.master.figure.clf()
self.ax = self.master.figure.add_subplot(111)
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
self.ax.plot(x, testTS, linewidth=0.5, label="testing TS: " + nbr_testTS)
self.ax.plot(x_shap, shap_data, linewidth=2, label="Matching Shapelet")
self.ax.set_ylabel("Testing TS")
self.ax.set_title("Real class: " + str(self.testTS.class_timeseries) + "; Prediction: " + str(match_shapelet.Class))
self.ax.legend(loc="upper right")
self.master.canvas.draw()
class Shapelet(object):
def __init__(self):
self.id = 0.0
self.Class = ''
self.subseq = None
self.DD = 0.0
self.thresh = 0.0
class timeseries(object):
def __init__(self):
self.id = None
self.Class = ''
self.seq = None
| true | true |
f7257b07ee9ab3353bab98ac28e4ae6d47c6bf1c | 2,171 | py | Python | cifar10/models/repnet.py | NNHieu/loss-landscape | dfe517f23993ffbafea99272026d09e074e50b4f | [
"MIT"
] | null | null | null | cifar10/models/repnet.py | NNHieu/loss-landscape | dfe517f23993ffbafea99272026d09e074e50b4f | [
"MIT"
] | null | null | null | cifar10/models/repnet.py | NNHieu/loss-landscape | dfe517f23993ffbafea99272026d09e074e50b4f | [
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.autograd as autograd
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim as optim
import os
import argparse
class ResNetLayer(nn.Module):
def __init__(self, n_channels, n_inner_channels, kernel_size=3, num_groups=8):
super().__init__()
self.conv1 = nn.Conv2d(n_channels, n_inner_channels, kernel_size, padding=kernel_size//2, bias=False)
self.conv2 = nn.Conv2d(n_inner_channels, n_channels, kernel_size, padding=kernel_size//2, bias=False)
self.norm1 = nn.GroupNorm(num_groups, n_inner_channels)
self.norm2 = nn.GroupNorm(num_groups, n_channels)
self.norm3 = nn.GroupNorm(num_groups, n_channels)
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
def forward(self, z, x):
if z is None:
y = self.norm1(F.relu(self.conv1(x)))
return self.norm3(F.relu(x + self.norm2(self.conv2(y))))
else:
y = self.norm1(F.relu(self.conv1(z)))
return self.norm3(F.relu(z + self.norm2(x + self.conv2(y))))
class RepeatConvLayer(nn.Module):
def __init__(self, f, num_repeat):
super().__init__()
self.f = f
self.num_repeat = num_repeat
def forward(self, x):
z = self.f(None, x)
for i in range(self.num_repeat):
z = self.f(z, x)
return z
def repeatNet(num_repeat):
chan = 48
f = ResNetLayer(chan, 64, kernel_size=3)
model = nn.Sequential(nn.Conv2d(3,chan, kernel_size=3, bias=True, padding=1),
nn.BatchNorm2d(chan),
RepeatConvLayer(f, num_repeat),
nn.BatchNorm2d(chan),
nn.AvgPool2d(8,8),
nn.Flatten(),
nn.Linear(chan*4*4,10))
return model
def repeatNet5():
return repeatNet(5)
def repeatNet10():
return repeatNet(10)
def repeatNet17():
return repeatNet(17)
| 30.577465 | 109 | 0.607094 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.autograd as autograd
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim as optim
import os
import argparse
class ResNetLayer(nn.Module):
def __init__(self, n_channels, n_inner_channels, kernel_size=3, num_groups=8):
super().__init__()
self.conv1 = nn.Conv2d(n_channels, n_inner_channels, kernel_size, padding=kernel_size//2, bias=False)
self.conv2 = nn.Conv2d(n_inner_channels, n_channels, kernel_size, padding=kernel_size//2, bias=False)
self.norm1 = nn.GroupNorm(num_groups, n_inner_channels)
self.norm2 = nn.GroupNorm(num_groups, n_channels)
self.norm3 = nn.GroupNorm(num_groups, n_channels)
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
def forward(self, z, x):
if z is None:
y = self.norm1(F.relu(self.conv1(x)))
return self.norm3(F.relu(x + self.norm2(self.conv2(y))))
else:
y = self.norm1(F.relu(self.conv1(z)))
return self.norm3(F.relu(z + self.norm2(x + self.conv2(y))))
class RepeatConvLayer(nn.Module):
def __init__(self, f, num_repeat):
super().__init__()
self.f = f
self.num_repeat = num_repeat
def forward(self, x):
z = self.f(None, x)
for i in range(self.num_repeat):
z = self.f(z, x)
return z
def repeatNet(num_repeat):
chan = 48
f = ResNetLayer(chan, 64, kernel_size=3)
model = nn.Sequential(nn.Conv2d(3,chan, kernel_size=3, bias=True, padding=1),
nn.BatchNorm2d(chan),
RepeatConvLayer(f, num_repeat),
nn.BatchNorm2d(chan),
nn.AvgPool2d(8,8),
nn.Flatten(),
nn.Linear(chan*4*4,10))
return model
def repeatNet5():
return repeatNet(5)
def repeatNet10():
return repeatNet(10)
def repeatNet17():
return repeatNet(17)
| true | true |
f7257b2056fac589a4b126844ed598c5e63b20c6 | 635 | py | Python | manage.py | nrsharip/iss-web | e8ca66ade3933dfac4795ba7c44e067c26a079e2 | [
"MIT"
] | 1 | 2020-09-08T21:47:50.000Z | 2020-09-08T21:47:50.000Z | manage.py | nrsharip/iss-web | e8ca66ade3933dfac4795ba7c44e067c26a079e2 | [
"MIT"
] | null | null | null | manage.py | nrsharip/iss-web | e8ca66ade3933dfac4795ba7c44e067c26a079e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_server_moex.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.863636 | 79 | 0.686614 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_server_moex.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f7257b772a4fb5328778dcbc7b3b0d836de83e58 | 20,431 | py | Python | beta/spreadtrading/stEngine.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 18 | 2019-02-21T05:42:41.000Z | 2022-03-31T10:17:51.000Z | beta/spreadtrading/stEngine.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 1 | 2018-06-12T10:08:24.000Z | 2018-06-12T10:08:24.000Z | beta/spreadtrading/stEngine.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 5 | 2017-12-20T09:57:17.000Z | 2021-08-01T19:47:14.000Z | # encoding: UTF-8
import json
import traceback
import shelve
import parser
import re
from vnpy.event import Event
from vnpy.trader.vtFunction import getJsonPath, getTempPath
from vnpy.trader.vtEvent import (EVENT_TICK, EVENT_TRADE, EVENT_POSITION,
EVENT_TIMER, EVENT_ORDER)
from vnpy.trader.vtObject import (VtSubscribeReq, VtOrderReq,
VtCancelOrderReq, VtLogData)
from vnpy.trader.vtConstant import (DIRECTION_LONG, DIRECTION_SHORT,
OFFSET_OPEN, OFFSET_CLOSE,
PRICETYPE_LIMITPRICE)
from .stBase import (StLeg, StSpread, EVENT_SPREADTRADING_TICK,
EVENT_SPREADTRADING_POS, EVENT_SPREADTRADING_LOG,
EVENT_SPREADTRADING_ALGO, EVENT_SPREADTRADING_ALGOLOG)
from .stAlgo import SniperAlgo
########################################################################
class StDataEngine(object):
"""价差数据计算引擎"""
settingFileName = 'ST_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 腿、价差相关字典
self.legDict = {} # vtSymbol:StLeg
self.spreadDict = {} # name:StSpread
self.vtSymbolSpreadDict = {} # vtSymbol:StSpread
self.registerEvent()
#----------------------------------------------------------------------
def loadSetting(self):
"""加载配置"""
try:
with open(self.settingFilePath) as f:
l = json.load(f)
for setting in l:
result, msg = self.createSpread(setting)
self.writeLog(msg)
self.writeLog(u'价差配置加载完成')
except:
content = u'价差配置加载出错,原因:' + traceback.format_exc()
self.writeLog(content)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存配置"""
with open(self.settingFilePath) as f:
pass
#----------------------------------------------------------------------
def createSpread(self, setting):
"""创建价差"""
result = False
msg = ''
# 检查价差重名
if setting['name'] in self.spreadDict:
msg = u'%s价差存在重名' %setting['name']
return result, msg
# 检查腿是否已使用
l = []
l.append(setting['activeLeg']['vtSymbol'])
for d in setting['passiveLegs']:
l.append(d['vtSymbol'])
for vtSymbol in l:
if vtSymbol in self.vtSymbolSpreadDict:
existingSpread = self.vtSymbolSpreadDict[vtSymbol]
msg = u'%s合约已经存在于%s价差中' %(vtSymbol, existingSpread.name)
return result, msg
# 创建价差
spread = StSpread()
spread.name = setting['name']
spread.formula = setting['formula']
formula = spread.formula
if not re.match("[0-9A-Z\/\+\-\*\(\) ].*", formula) :
msg = u'%s价差存在公式问题请重新编写 %s' % (setting['name'] , spread.formula)
return result, msg
try :
spread.code = parser.expr(formula).compile()
except :
msg = u'%s价差存在公式问题请重新编写 %s' % (setting['name'] , spread.formula)
return result, msg
self.spreadDict[spread.name] = spread
# 创建主动腿
activeSetting = setting['activeLeg']
activeLeg = StLeg()
activeLeg.vtSymbol = str(activeSetting['vtSymbol'])
activeLeg.ratio = float(activeSetting['ratio'])
activeLeg.payup = int(activeSetting['payup'])
activeLeg.legname = str(activeSetting['legname'])
spread.addActiveLeg(activeLeg)
self.legDict[activeLeg.vtSymbol] = activeLeg
self.vtSymbolSpreadDict[activeLeg.vtSymbol] = spread
self.subscribeMarketData(activeLeg.vtSymbol)
# 创建被动腿
passiveSettingList = setting['passiveLegs']
passiveLegList = []
for d in passiveSettingList:
passiveLeg = StLeg()
passiveLeg.vtSymbol = str(d['vtSymbol'])
passiveLeg.ratio = float(d['ratio'])
passiveLeg.payup = int(d['payup'])
passiveLeg.legname = str(d['legname'])
spread.addPassiveLeg(passiveLeg)
self.legDict[passiveLeg.vtSymbol] = passiveLeg
self.vtSymbolSpreadDict[passiveLeg.vtSymbol] = spread
self.subscribeMarketData(passiveLeg.vtSymbol)
# 初始化价差
spread.initSpread()
self.putSpreadTickEvent(spread)
self.putSpreadPosEvent(spread)
# 返回结果
result = True
msg = u'%s价差创建成功' %spread.name
return result, msg
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
# 检查行情是否需要处理
tick = event.dict_['data']
if tick.vtSymbol not in self.legDict:
return
# 更新腿价格
leg = self.legDict[tick.vtSymbol]
leg.bidPrice = tick.bidPrice1
leg.askPrice = tick.askPrice1
leg.bidVolume = tick.bidVolume1
leg.askVolume = tick.askVolume1
# 更新价差价格
spread = self.vtSymbolSpreadDict[tick.vtSymbol]
spread.calculatePrice()
# 发出事件
self.putSpreadTickEvent(spread)
#----------------------------------------------------------------------
def putSpreadTickEvent(self, spread):
"""发出价差行情更新事件"""
event1 = Event(EVENT_SPREADTRADING_TICK+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_TICK)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
# 检查成交是否需要处理
trade = event.dict_['data']
if trade.vtSymbol not in self.legDict:
return
# 更新腿持仓
leg = self.legDict[trade.vtSymbol]
direction = trade.direction
offset = trade.offset
if direction == DIRECTION_LONG:
if offset == OFFSET_OPEN:
leg.longPos += trade.volume
else:
leg.shortPos -= trade.volume
else:
if offset == OFFSET_OPEN:
leg.shortPos += trade.volume
else:
leg.longPos -= trade.volume
leg.netPos = leg.longPos - leg.shortPos
# 更新价差持仓
spread = self.vtSymbolSpreadDict[trade.vtSymbol]
spread.calculatePos()
# 推送价差持仓更新
event1 = Event(EVENT_SPREADTRADING_POS+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def processPosEvent(self, event):
"""处理持仓推送"""
# 检查持仓是否需要处理
pos = event.dict_['data']
if pos.vtSymbol not in self.legDict:
return
# 更新腿持仓
leg = self.legDict[pos.vtSymbol]
direction = pos.direction
if direction == DIRECTION_LONG:
leg.longPos = pos.position
else:
leg.shortPos = pos.position
leg.netPos = leg.longPos - leg.shortPos
# 更新价差持仓
spread = self.vtSymbolSpreadDict[pos.vtSymbol]
spread.calculatePos()
# 推送价差持仓更新
self.putSpreadPosEvent(spread)
#----------------------------------------------------------------------
def putSpreadPosEvent(self, spread):
"""发出价差持仓事件"""
event1 = Event(EVENT_SPREADTRADING_POS+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def registerEvent(self):
""""""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPosEvent)
#----------------------------------------------------------------------
def subscribeMarketData(self, vtSymbol):
"""订阅行情"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'订阅行情失败,找不到该合约%s' %vtSymbol)
return
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
self.mainEngine.subscribe(req, contract.gatewayName)
#----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def getAllSpreads(self):
"""获取所有的价差"""
return self.spreadDict.values()
########################################################################
class StAlgoEngine(object):
"""价差算法交易引擎"""
algoFileName = 'SpreadTradingAlgo.vt'
algoFilePath = getTempPath(algoFileName)
#----------------------------------------------------------------------
def __init__(self, dataEngine, mainEngine, eventEngine):
"""Constructor"""
self.dataEngine = dataEngine
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.algoDict = {} # spreadName:algo
self.vtSymbolAlgoDict = {} # vtSymbol:algo
self.registerEvent()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_SPREADTRADING_TICK, self.processSpreadTickEvent)
self.eventEngine.register(EVENT_SPREADTRADING_POS, self.processSpreadPosEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TIMER, self.processTimerEvent)
#----------------------------------------------------------------------
def processSpreadTickEvent(self, event):
"""处理价差行情事件"""
spread = event.dict_['data']
algo = self.algoDict.get(spread.name, None)
if algo:
algo.updateSpreadTick(spread)
#----------------------------------------------------------------------
def processSpreadPosEvent(self, event):
"""处理价差持仓事件"""
spread = event.dict_['data']
algo = self.algoDict.get(spread.name, None)
if algo:
algo.updateSpreadPos(spread)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交事件"""
trade = event.dict_['data']
algo = self.vtSymbolAlgoDict.get(trade.vtSymbol, None)
if algo:
algo.updateTrade(trade)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托事件"""
order = event.dict_['data']
algo = self.vtSymbolAlgoDict.get(order.vtSymbol, None)
if algo:
algo.updateOrder(order)
#----------------------------------------------------------------------
def processTimerEvent(self, event):
""""""
for algo in self.algoDict.values():
algo.updateTimer()
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, direction, offset, price, volume, payup=0):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
return ''
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.vtSymbol = contract.vtSymbol
req.direction = direction
req.offset = offset
req.volume = int(volume)
req.priceType = PRICETYPE_LIMITPRICE
if direction == DIRECTION_LONG:
req.price = price + payup * contract.priceTick
else:
req.price = price - payup * contract.priceTick
# 委托转换
reqList = self.mainEngine.convertOrderReq(req)
vtOrderIDList = []
for req in reqList:
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName)
vtOrderIDList.append(vtOrderID)
return vtOrderIDList
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
order = self.mainEngine.getOrder(vtOrderID)
if not order:
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def buy(self, vtSymbol, price, volume, payup=0):
"""买入"""
l = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_OPEN, price, volume, payup)
return l
#----------------------------------------------------------------------
def sell(self, vtSymbol, price, volume, payup=0):
"""卖出"""
l = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_CLOSE, price, volume, payup)
return l
#----------------------------------------------------------------------
def short(self, vtSymbol, price, volume, payup=0):
"""卖空"""
l = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_OPEN, price, volume, payup)
return l
#----------------------------------------------------------------------
def cover(self, vtSymbol, price, volume, payup=0):
"""平空"""
l = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_CLOSE, price, volume, payup)
return l
#----------------------------------------------------------------------
def putAlgoEvent(self, algo):
"""发出算法状态更新事件"""
event = Event(EVENT_SPREADTRADING_ALGO+algo.name)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def writeLog(self, content):
"""输出日志"""
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_ALGOLOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存算法配置"""
setting = {}
for algo in self.algoDict.values():
setting[algo.spreadName] = algo.getAlgoParams()
f = shelve.open(self.algoFilePath)
f['setting'] = setting
f.close()
#----------------------------------------------------------------------
def loadSetting(self):
"""加载算法配置"""
# 创建算法对象
l = self.dataEngine.getAllSpreads()
for spread in l:
algo = SniperAlgo(self, spread)
self.algoDict[spread.name] = algo
# 保存腿代码和算法对象的映射
for leg in spread.allLegs:
self.vtSymbolAlgoDict[leg.vtSymbol] = algo
# 加载配置
f = shelve.open(self.algoFilePath)
setting = f.get('setting', None)
f.close()
if not setting:
return
for algo in self.algoDict.values():
if algo.spreadName in setting:
d = setting[algo.spreadName]
algo.setAlgoParams(d)
#----------------------------------------------------------------------
def stopAll(self):
"""停止全部算法"""
for algo in self.algoDict.values():
algo.stop()
#----------------------------------------------------------------------
def startAlgo(self, spreadName):
"""启动算法"""
algo = self.algoDict[spreadName]
algoActive = algo.start()
return algoActive
#----------------------------------------------------------------------
def stopAlgo(self, spreadName):
"""停止算法"""
algo = self.algoDict[spreadName]
algoActive = algo.stop()
return algoActive
#----------------------------------------------------------------------
def getAllAlgoParams(self):
"""获取所有算法的参数"""
return [algo.getAlgoParams() for algo in self.algoDict.values()]
#----------------------------------------------------------------------
def setAlgoBuyPrice(self, spreadName, buyPrice):
"""设置算法买开价格"""
algo = self.algoDict[spreadName]
algo.setBuyPrice(buyPrice)
#----------------------------------------------------------------------
def setAlgoSellPrice(self, spreadName, sellPrice):
"""设置算法卖平价格"""
algo = self.algoDict[spreadName]
algo.setSellPrice(sellPrice)
#----------------------------------------------------------------------
def setAlgoShortPrice(self, spreadName, shortPrice):
"""设置算法卖开价格"""
algo = self.algoDict[spreadName]
algo.setShortPrice(shortPrice)
#----------------------------------------------------------------------
def setAlgoCoverPrice(self, spreadName, coverPrice):
"""设置算法买平价格"""
algo = self.algoDict[spreadName]
algo.setCoverPrice(coverPrice)
#----------------------------------------------------------------------
def setAlgoMode(self, spreadName, mode):
"""设置算法工作模式"""
algo = self.algoDict[spreadName]
algo.setMode(mode)
#----------------------------------------------------------------------
def setAlgoMaxOrderSize(self, spreadName, maxOrderSize):
"""设置算法单笔委托限制"""
algo = self.algoDict[spreadName]
algo.setMaxOrderSize(maxOrderSize)
#----------------------------------------------------------------------
def setAlgoMaxPosSize(self, spreadName, maxPosSize):
"""设置算法持仓限制"""
algo = self.algoDict[spreadName]
algo.setMaxPosSize(maxPosSize)
########################################################################
class StEngine(object):
"""价差引擎"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.dataEngine = StDataEngine(mainEngine, eventEngine)
self.algoEngine = StAlgoEngine(self.dataEngine, mainEngine, eventEngine)
#----------------------------------------------------------------------
def init(self):
"""初始化"""
self.dataEngine.loadSetting()
self.algoEngine.loadSetting()
#----------------------------------------------------------------------
def stop(self):
"""停止"""
self.dataEngine.saveSetting()
self.algoEngine.stopAll()
self.algoEngine.saveSetting()
| 34.805792 | 89 | 0.467623 |
import json
import traceback
import shelve
import parser
import re
from vnpy.event import Event
from vnpy.trader.vtFunction import getJsonPath, getTempPath
from vnpy.trader.vtEvent import (EVENT_TICK, EVENT_TRADE, EVENT_POSITION,
EVENT_TIMER, EVENT_ORDER)
from vnpy.trader.vtObject import (VtSubscribeReq, VtOrderReq,
VtCancelOrderReq, VtLogData)
from vnpy.trader.vtConstant import (DIRECTION_LONG, DIRECTION_SHORT,
OFFSET_OPEN, OFFSET_CLOSE,
PRICETYPE_LIMITPRICE)
from .stBase import (StLeg, StSpread, EVENT_SPREADTRADING_TICK,
EVENT_SPREADTRADING_POS, EVENT_SPREADTRADING_LOG,
EVENT_SPREADTRADING_ALGO, EVENT_SPREADTRADING_ALGOLOG)
from .stAlgo import SniperAlgo
ctiveLeg.legname = str(activeSetting['legname'])
spread.addActiveLeg(activeLeg)
self.legDict[activeLeg.vtSymbol] = activeLeg
self.vtSymbolSpreadDict[activeLeg.vtSymbol] = spread
self.subscribeMarketData(activeLeg.vtSymbol)
passiveSettingList = setting['passiveLegs']
passiveLegList = []
for d in passiveSettingList:
passiveLeg = StLeg()
passiveLeg.vtSymbol = str(d['vtSymbol'])
passiveLeg.ratio = float(d['ratio'])
passiveLeg.payup = int(d['payup'])
passiveLeg.legname = str(d['legname'])
spread.addPassiveLeg(passiveLeg)
self.legDict[passiveLeg.vtSymbol] = passiveLeg
self.vtSymbolSpreadDict[passiveLeg.vtSymbol] = spread
self.subscribeMarketData(passiveLeg.vtSymbol)
spread.initSpread()
self.putSpreadTickEvent(spread)
self.putSpreadPosEvent(spread)
result = True
msg = u'%s价差创建成功' %spread.name
return result, msg
def processTickEvent(self, event):
tick = event.dict_['data']
if tick.vtSymbol not in self.legDict:
return
leg = self.legDict[tick.vtSymbol]
leg.bidPrice = tick.bidPrice1
leg.askPrice = tick.askPrice1
leg.bidVolume = tick.bidVolume1
leg.askVolume = tick.askVolume1
spread = self.vtSymbolSpreadDict[tick.vtSymbol]
spread.calculatePrice()
self.putSpreadTickEvent(spread)
def putSpreadTickEvent(self, spread):
event1 = Event(EVENT_SPREADTRADING_TICK+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_TICK)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
def processTradeEvent(self, event):
trade = event.dict_['data']
if trade.vtSymbol not in self.legDict:
return
leg = self.legDict[trade.vtSymbol]
direction = trade.direction
offset = trade.offset
if direction == DIRECTION_LONG:
if offset == OFFSET_OPEN:
leg.longPos += trade.volume
else:
leg.shortPos -= trade.volume
else:
if offset == OFFSET_OPEN:
leg.shortPos += trade.volume
else:
leg.longPos -= trade.volume
leg.netPos = leg.longPos - leg.shortPos
spread = self.vtSymbolSpreadDict[trade.vtSymbol]
spread.calculatePos()
event1 = Event(EVENT_SPREADTRADING_POS+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
def processPosEvent(self, event):
pos = event.dict_['data']
if pos.vtSymbol not in self.legDict:
return
leg = self.legDict[pos.vtSymbol]
direction = pos.direction
if direction == DIRECTION_LONG:
leg.longPos = pos.position
else:
leg.shortPos = pos.position
leg.netPos = leg.longPos - leg.shortPos
spread = self.vtSymbolSpreadDict[pos.vtSymbol]
spread.calculatePos()
self.putSpreadPosEvent(spread)
def putSpreadPosEvent(self, spread):
event1 = Event(EVENT_SPREADTRADING_POS+spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
def registerEvent(self):
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPosEvent)
def subscribeMarketData(self, vtSymbol):
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'订阅行情失败,找不到该合约%s' %vtSymbol)
return
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
self.mainEngine.subscribe(req, contract.gatewayName)
def writeLog(self, content):
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
def getAllSpreads(self):
return self.spreadDict.values()
q)
vtOrderIDList = []
for req in reqList:
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName)
vtOrderIDList.append(vtOrderID)
return vtOrderIDList
def cancelOrder(self, vtOrderID):
order = self.mainEngine.getOrder(vtOrderID)
if not order:
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
def buy(self, vtSymbol, price, volume, payup=0):
l = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_OPEN, price, volume, payup)
return l
def sell(self, vtSymbol, price, volume, payup=0):
l = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_CLOSE, price, volume, payup)
return l
def short(self, vtSymbol, price, volume, payup=0):
l = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_OPEN, price, volume, payup)
return l
def cover(self, vtSymbol, price, volume, payup=0):
l = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_CLOSE, price, volume, payup)
return l
def putAlgoEvent(self, algo):
event = Event(EVENT_SPREADTRADING_ALGO+algo.name)
self.eventEngine.put(event)
def writeLog(self, content):
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_ALGOLOG)
event.dict_['data'] = log
self.eventEngine.put(event)
def saveSetting(self):
setting = {}
for algo in self.algoDict.values():
setting[algo.spreadName] = algo.getAlgoParams()
f = shelve.open(self.algoFilePath)
f['setting'] = setting
f.close()
def loadSetting(self):
l = self.dataEngine.getAllSpreads()
for spread in l:
algo = SniperAlgo(self, spread)
self.algoDict[spread.name] = algo
for leg in spread.allLegs:
self.vtSymbolAlgoDict[leg.vtSymbol] = algo
f = shelve.open(self.algoFilePath)
setting = f.get('setting', None)
f.close()
if not setting:
return
for algo in self.algoDict.values():
if algo.spreadName in setting:
d = setting[algo.spreadName]
algo.setAlgoParams(d)
def stopAll(self):
for algo in self.algoDict.values():
algo.stop()
def startAlgo(self, spreadName):
algo = self.algoDict[spreadName]
algoActive = algo.start()
return algoActive
def stopAlgo(self, spreadName):
algo = self.algoDict[spreadName]
algoActive = algo.stop()
return algoActive
def getAllAlgoParams(self):
return [algo.getAlgoParams() for algo in self.algoDict.values()]
def setAlgoBuyPrice(self, spreadName, buyPrice):
algo = self.algoDict[spreadName]
algo.setBuyPrice(buyPrice)
def setAlgoSellPrice(self, spreadName, sellPrice):
algo = self.algoDict[spreadName]
algo.setSellPrice(sellPrice)
def setAlgoShortPrice(self, spreadName, shortPrice):
algo = self.algoDict[spreadName]
algo.setShortPrice(shortPrice)
def setAlgoCoverPrice(self, spreadName, coverPrice):
algo = self.algoDict[spreadName]
algo.setCoverPrice(coverPrice)
def setAlgoMode(self, spreadName, mode):
algo = self.algoDict[spreadName]
algo.setMode(mode)
def setAlgoMaxOrderSize(self, spreadName, maxOrderSize):
algo = self.algoDict[spreadName]
algo.setMaxOrderSize(maxOrderSize)
def setAlgoMaxPosSize(self, spreadName, maxPosSize):
algo = self.algoDict[spreadName]
algo.setMaxPosSize(maxPosSize)
| true | true |
f7257c269b0dd8b39aab1647dd5c4b9c17e4563f | 13,360 | py | Python | server/models/postgis/user.py | rustyb/osm-ireland-tasking-manager | 958c232ba50ca02e5adbc7541a4b9efa7186bfdc | [
"BSD-2-Clause"
] | null | null | null | server/models/postgis/user.py | rustyb/osm-ireland-tasking-manager | 958c232ba50ca02e5adbc7541a4b9efa7186bfdc | [
"BSD-2-Clause"
] | 4 | 2020-03-24T17:47:26.000Z | 2021-06-02T00:32:15.000Z | server/models/postgis/user.py | rustyb/osm-ireland-tasking-manager | 958c232ba50ca02e5adbc7541a4b9efa7186bfdc | [
"BSD-2-Clause"
] | 1 | 2021-01-30T20:12:18.000Z | 2021-01-30T20:12:18.000Z | import geojson
import datetime
import dateutil.parser
from server import db
from sqlalchemy import desc, text
from server.models.dtos.user_dto import UserDTO, UserMappedProjectsDTO, MappedProject, UserFilterDTO, Pagination, \
UserSearchQuery, UserSearchDTO, ProjectParticipantUser, ListedUser
from server.models.postgis.licenses import License, users_licenses_table
from server.models.postgis.project_info import ProjectInfo
from server.models.postgis.statuses import MappingLevel, ProjectStatus, UserRole
from server.models.postgis.utils import NotFound, timestamp
class User(db.Model):
""" Describes the history associated with a task """
__tablename__ = "users"
id = db.Column(db.BigInteger, primary_key=True, index=True)
validation_message = db.Column(db.Boolean, default=True, nullable=False)
username = db.Column(db.String, unique=True)
role = db.Column(db.Integer, default=0, nullable=False)
mapping_level = db.Column(db.Integer, default=1, nullable=False)
projects_mapped = db.Column(db.Integer, default=1, nullable=False)
tasks_mapped = db.Column(db.Integer, default=0, nullable=False)
tasks_validated = db.Column(db.Integer, default=0, nullable=False)
tasks_invalidated = db.Column(db.Integer, default=0, nullable=False)
projects_mapped = db.Column(db.ARRAY(db.Integer))
email_address = db.Column(db.String)
is_email_verified = db.Column(db.Boolean, default=False)
is_expert = db.Column(db.Boolean, default=False)
twitter_id = db.Column(db.String)
facebook_id = db.Column(db.String)
linkedin_id = db.Column(db.String)
date_registered = db.Column(db.DateTime, default=timestamp)
# Represents the date the user last had one of their tasks validated
last_validation_date = db.Column(db.DateTime, default=timestamp)
# Relationships
accepted_licenses = db.relationship("License", secondary=users_licenses_table)
def create(self):
""" Creates and saves the current model to the DB """
db.session.add(self)
db.session.commit()
def save(self):
db.session.commit()
def get_by_id(self, user_id: int):
""" Return the user for the specified id, or None if not found """
return User.query.get(user_id)
def get_by_username(self, username: str):
""" Return the user for the specified username, or None if not found """
return User.query.filter_by(username=username).one_or_none()
def update_username(self, username: str):
""" Update the username """
self.username = username
db.session.commit()
def update(self, user_dto: UserDTO):
""" Update the user details """
self.email_address = user_dto.email_address.lower() if user_dto.email_address else None
self.twitter_id = user_dto.twitter_id.lower() if user_dto.twitter_id else None
self.facebook_id = user_dto.facebook_id.lower() if user_dto.facebook_id else None
self.linkedin_id = user_dto.linkedin_id.lower() if user_dto.linkedin_id else None
self.validation_message = user_dto.validation_message
db.session.commit()
def set_email_verified_status(self, is_verified: bool):
""" Updates email verfied flag on successfully verified emails"""
self.is_email_verified = is_verified
db.session.commit()
def set_is_expert(self, is_expert: bool):
""" Enables or disables expert mode on the user"""
self.is_expert = is_expert
db.session.commit()
@staticmethod
def get_all_users(query: UserSearchQuery) -> UserSearchDTO:
""" Search and filter all users """
# Base query that applies to all searches
base = db.session.query(User.id, User.username, User.mapping_level, User.role)
# Add filter to query as required
if query.mapping_level:
base = base.filter(User.mapping_level == MappingLevel[query.mapping_level.upper()].value)
if query.username:
base = base.filter(User.username.ilike(query.username.lower() + '%'))
if query.role:
base = base.filter(User.role == UserRole[query.role.upper()].value)
results = base.order_by(User.username).paginate(query.page, 20, True)
dto = UserSearchDTO()
for result in results.items:
listed_user = ListedUser()
listed_user.id = result.id
listed_user.mapping_level = MappingLevel(result.mapping_level).name
listed_user.username = result.username
listed_user.role = UserRole(result.role).name
dto.users.append(listed_user)
dto.pagination = Pagination(results)
return dto
@staticmethod
def get_all_users_not_pagainated():
""" Get all users in DB"""
return db.session.query(User.id).all()
@staticmethod
def filter_users(user_filter: str, project_id: int, page: int,
is_project_manager:bool=False) -> UserFilterDTO:
""" Finds users that matches first characters, for auto-complete.
Users who have participated (mapped or validated) in the project, if given, will be
returned ahead of those who have not.
"""
# Note that the projects_mapped column includes both mapped and validated projects.
query = db.session.query(User.username, User.projects_mapped.any(project_id).label("participant")) \
.filter(User.username.ilike(user_filter.lower() + '%')) \
.order_by(desc("participant").nullslast(), User.username)
if is_project_manager:
query = query.filter(User.role.in_([UserRole.ADMIN.value, UserRole.PROJECT_MANAGER.value]))
results = query.paginate(page, 20, True)
if results.total == 0:
raise NotFound()
dto = UserFilterDTO()
for result in results.items:
dto.usernames.append(result.username)
if project_id is not None:
participant = ProjectParticipantUser()
participant.username = result.username
participant.project_id = project_id
participant.is_participant = bool(result.participant)
dto.users.append(participant)
dto.pagination = Pagination(results)
return dto
@staticmethod
def upsert_mapped_projects(user_id: int, project_id: int):
""" Adds projects to mapped_projects if it doesn't exist """
sql = "select * from users where id = :user_id and projects_mapped @> '{{:project_id}}'"
result = db.engine.execute(text(sql), user_id=user_id, project_id=project_id)
if result.rowcount > 0:
return # User has previously mapped this project so return
sql = '''update users
set projects_mapped = array_append(projects_mapped, :project_id)
where id = :user_id'''
db.engine.execute(text(sql), project_id=project_id, user_id=user_id)
@staticmethod
def get_mapped_projects(user_id: int, preferred_locale: str) -> UserMappedProjectsDTO:
""" Get all projects a user has mapped on """
# This query looks scary, but we're really just creating an outer join between the query that gets the
# counts of all mapped tasks and the query that gets counts of all validated tasks. This is necessary to
# handle cases where users have only validated tasks on a project, or only mapped on a project.
sql = '''SELECT p.id,
p.status,
p.default_locale,
c.mapped,
c.validated,
st_asgeojson(p.centroid)
FROM projects p,
(SELECT coalesce(v.project_id, m.project_id) project_id,
coalesce(v.validated, 0) validated,
coalesce(m.mapped, 0) mapped
FROM (SELECT t.project_id,
count (t.validated_by) validated
FROM tasks t
WHERE t.project_id IN (SELECT unnest(projects_mapped) FROM users WHERE id = :user_id)
AND t.validated_by = :user_id
GROUP BY t.project_id, t.validated_by) v
FULL OUTER JOIN
(SELECT t.project_id,
count(t.mapped_by) mapped
FROM tasks t
WHERE t.project_id IN (SELECT unnest(projects_mapped) FROM users WHERE id = :user_id)
AND t.mapped_by = :user_id
GROUP BY t.project_id, t.mapped_by) m
ON v.project_id = m.project_id) c
WHERE p.id = c.project_id ORDER BY p.id DESC'''
results = db.engine.execute(text(sql), user_id=user_id)
if results.rowcount == 0:
raise NotFound()
mapped_projects_dto = UserMappedProjectsDTO()
for row in results:
mapped_project = MappedProject()
mapped_project.project_id = row[0]
mapped_project.status = ProjectStatus(row[1]).name
mapped_project.tasks_mapped = row[3]
mapped_project.tasks_validated = row[4]
mapped_project.centroid = geojson.loads(row[5])
project_info = ProjectInfo.get_dto_for_locale(row[0], preferred_locale, row[2])
mapped_project.name = project_info.name
mapped_projects_dto.mapped_projects.append(mapped_project)
return mapped_projects_dto
def set_user_role(self, role: UserRole):
""" Sets the supplied role on the user """
self.role = role.value
db.session.commit()
def set_mapping_level(self, level: MappingLevel):
""" Sets the supplied level on the user """
self.mapping_level = level.value
db.session.commit()
def accept_license_terms(self, license_id: int):
""" Associate the user in scope with the supplied license """
image_license = License.get_by_id(license_id)
self.accepted_licenses.append(image_license)
db.session.commit()
def has_user_accepted_licence(self, license_id: int):
""" Test to see if the user has accepted the terms of the specified license"""
image_license = License.get_by_id(license_id)
if image_license in self.accepted_licenses:
return True
return False
def delete(self):
""" Delete the user in scope from DB """
db.session.delete(self)
db.session.commit()
def as_dto(self, logged_in_username: str) -> UserDTO:
""" Create DTO object from user in scope """
user_dto = UserDTO()
user_dto.id = self.id
user_dto.username = self.username
user_dto.role = UserRole(self.role).name
user_dto.mapping_level = MappingLevel(self.mapping_level).name
user_dto.is_expert = self.is_expert or False
user_dto.date_registered = str(self.date_registered)
try:
user_dto.projects_mapped = len(self.projects_mapped)
# Handle users that haven't touched a project yet.
except:
user_dto.projects_mapped = 0
user_dto.tasks_mapped = self.tasks_mapped
user_dto.tasks_validated = self.tasks_validated
user_dto.tasks_invalidated = self.tasks_invalidated
user_dto.twitter_id = self.twitter_id
user_dto.linkedin_id = self.linkedin_id
user_dto.facebook_id = self.facebook_id
user_dto.validation_message = self.validation_message
user_dto.total_time_spent = 0
user_dto.time_spent_mapping = 0
user_dto.time_spent_validating = 0
sql = """SELECT SUM(TO_TIMESTAMP(action_text, 'HH24:MI:SS')::TIME) FROM task_history
WHERE action='LOCKED_FOR_VALIDATION'
and user_id = :user_id;"""
total_validation_time = db.engine.execute(text(sql), user_id=self.id)
for row in total_validation_time:
total_validation_time = row[0]
if total_validation_time:
total_validation_seconds = total_validation_time.total_seconds()
user_dto.time_spent_validating = total_validation_seconds
user_dto.total_time_spent += user_dto.time_spent_validating
sql = """SELECT SUM(TO_TIMESTAMP(action_text, 'HH24:MI:SS')::TIME) FROM task_history
WHERE action='LOCKED_FOR_MAPPING'
and user_id = :user_id;"""
total_mapping_time = db.engine.execute(text(sql), user_id=self.id)
for row in total_mapping_time:
total_mapping_time = row[0]
if total_mapping_time:
total_mapping_seconds = total_mapping_time.total_seconds()
user_dto.time_spent_mapping = total_mapping_seconds
user_dto.total_time_spent += user_dto.time_spent_mapping
if self.username == logged_in_username:
# Only return email address when logged in user is looking at their own profile
user_dto.email_address = self.email_address
user_dto.is_email_verified = self.is_email_verified
return user_dto
| 44.385382 | 118 | 0.643114 | import geojson
import datetime
import dateutil.parser
from server import db
from sqlalchemy import desc, text
from server.models.dtos.user_dto import UserDTO, UserMappedProjectsDTO, MappedProject, UserFilterDTO, Pagination, \
UserSearchQuery, UserSearchDTO, ProjectParticipantUser, ListedUser
from server.models.postgis.licenses import License, users_licenses_table
from server.models.postgis.project_info import ProjectInfo
from server.models.postgis.statuses import MappingLevel, ProjectStatus, UserRole
from server.models.postgis.utils import NotFound, timestamp
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.BigInteger, primary_key=True, index=True)
validation_message = db.Column(db.Boolean, default=True, nullable=False)
username = db.Column(db.String, unique=True)
role = db.Column(db.Integer, default=0, nullable=False)
mapping_level = db.Column(db.Integer, default=1, nullable=False)
projects_mapped = db.Column(db.Integer, default=1, nullable=False)
tasks_mapped = db.Column(db.Integer, default=0, nullable=False)
tasks_validated = db.Column(db.Integer, default=0, nullable=False)
tasks_invalidated = db.Column(db.Integer, default=0, nullable=False)
projects_mapped = db.Column(db.ARRAY(db.Integer))
email_address = db.Column(db.String)
is_email_verified = db.Column(db.Boolean, default=False)
is_expert = db.Column(db.Boolean, default=False)
twitter_id = db.Column(db.String)
facebook_id = db.Column(db.String)
linkedin_id = db.Column(db.String)
date_registered = db.Column(db.DateTime, default=timestamp)
last_validation_date = db.Column(db.DateTime, default=timestamp)
accepted_licenses = db.relationship("License", secondary=users_licenses_table)
def create(self):
db.session.add(self)
db.session.commit()
def save(self):
db.session.commit()
def get_by_id(self, user_id: int):
return User.query.get(user_id)
def get_by_username(self, username: str):
return User.query.filter_by(username=username).one_or_none()
def update_username(self, username: str):
self.username = username
db.session.commit()
def update(self, user_dto: UserDTO):
self.email_address = user_dto.email_address.lower() if user_dto.email_address else None
self.twitter_id = user_dto.twitter_id.lower() if user_dto.twitter_id else None
self.facebook_id = user_dto.facebook_id.lower() if user_dto.facebook_id else None
self.linkedin_id = user_dto.linkedin_id.lower() if user_dto.linkedin_id else None
self.validation_message = user_dto.validation_message
db.session.commit()
def set_email_verified_status(self, is_verified: bool):
self.is_email_verified = is_verified
db.session.commit()
def set_is_expert(self, is_expert: bool):
self.is_expert = is_expert
db.session.commit()
@staticmethod
def get_all_users(query: UserSearchQuery) -> UserSearchDTO:
base = db.session.query(User.id, User.username, User.mapping_level, User.role)
if query.mapping_level:
base = base.filter(User.mapping_level == MappingLevel[query.mapping_level.upper()].value)
if query.username:
base = base.filter(User.username.ilike(query.username.lower() + '%'))
if query.role:
base = base.filter(User.role == UserRole[query.role.upper()].value)
results = base.order_by(User.username).paginate(query.page, 20, True)
dto = UserSearchDTO()
for result in results.items:
listed_user = ListedUser()
listed_user.id = result.id
listed_user.mapping_level = MappingLevel(result.mapping_level).name
listed_user.username = result.username
listed_user.role = UserRole(result.role).name
dto.users.append(listed_user)
dto.pagination = Pagination(results)
return dto
@staticmethod
def get_all_users_not_pagainated():
return db.session.query(User.id).all()
@staticmethod
def filter_users(user_filter: str, project_id: int, page: int,
is_project_manager:bool=False) -> UserFilterDTO:
query = db.session.query(User.username, User.projects_mapped.any(project_id).label("participant")) \
.filter(User.username.ilike(user_filter.lower() + '%')) \
.order_by(desc("participant").nullslast(), User.username)
if is_project_manager:
query = query.filter(User.role.in_([UserRole.ADMIN.value, UserRole.PROJECT_MANAGER.value]))
results = query.paginate(page, 20, True)
if results.total == 0:
raise NotFound()
dto = UserFilterDTO()
for result in results.items:
dto.usernames.append(result.username)
if project_id is not None:
participant = ProjectParticipantUser()
participant.username = result.username
participant.project_id = project_id
participant.is_participant = bool(result.participant)
dto.users.append(participant)
dto.pagination = Pagination(results)
return dto
@staticmethod
def upsert_mapped_projects(user_id: int, project_id: int):
sql = "select * from users where id = :user_id and projects_mapped @> '{{:project_id}}'"
result = db.engine.execute(text(sql), user_id=user_id, project_id=project_id)
if result.rowcount > 0:
return
sql = '''update users
set projects_mapped = array_append(projects_mapped, :project_id)
where id = :user_id'''
db.engine.execute(text(sql), project_id=project_id, user_id=user_id)
@staticmethod
def get_mapped_projects(user_id: int, preferred_locale: str) -> UserMappedProjectsDTO:
# counts of all mapped tasks and the query that gets counts of all validated tasks. This is necessary to
# handle cases where users have only validated tasks on a project, or only mapped on a project.
sql = '''SELECT p.id,
p.status,
p.default_locale,
c.mapped,
c.validated,
st_asgeojson(p.centroid)
FROM projects p,
(SELECT coalesce(v.project_id, m.project_id) project_id,
coalesce(v.validated, 0) validated,
coalesce(m.mapped, 0) mapped
FROM (SELECT t.project_id,
count (t.validated_by) validated
FROM tasks t
WHERE t.project_id IN (SELECT unnest(projects_mapped) FROM users WHERE id = :user_id)
AND t.validated_by = :user_id
GROUP BY t.project_id, t.validated_by) v
FULL OUTER JOIN
(SELECT t.project_id,
count(t.mapped_by) mapped
FROM tasks t
WHERE t.project_id IN (SELECT unnest(projects_mapped) FROM users WHERE id = :user_id)
AND t.mapped_by = :user_id
GROUP BY t.project_id, t.mapped_by) m
ON v.project_id = m.project_id) c
WHERE p.id = c.project_id ORDER BY p.id DESC'''
results = db.engine.execute(text(sql), user_id=user_id)
if results.rowcount == 0:
raise NotFound()
mapped_projects_dto = UserMappedProjectsDTO()
for row in results:
mapped_project = MappedProject()
mapped_project.project_id = row[0]
mapped_project.status = ProjectStatus(row[1]).name
mapped_project.tasks_mapped = row[3]
mapped_project.tasks_validated = row[4]
mapped_project.centroid = geojson.loads(row[5])
project_info = ProjectInfo.get_dto_for_locale(row[0], preferred_locale, row[2])
mapped_project.name = project_info.name
mapped_projects_dto.mapped_projects.append(mapped_project)
return mapped_projects_dto
def set_user_role(self, role: UserRole):
self.role = role.value
db.session.commit()
def set_mapping_level(self, level: MappingLevel):
self.mapping_level = level.value
db.session.commit()
def accept_license_terms(self, license_id: int):
image_license = License.get_by_id(license_id)
self.accepted_licenses.append(image_license)
db.session.commit()
def has_user_accepted_licence(self, license_id: int):
image_license = License.get_by_id(license_id)
if image_license in self.accepted_licenses:
return True
return False
def delete(self):
db.session.delete(self)
db.session.commit()
def as_dto(self, logged_in_username: str) -> UserDTO:
user_dto = UserDTO()
user_dto.id = self.id
user_dto.username = self.username
user_dto.role = UserRole(self.role).name
user_dto.mapping_level = MappingLevel(self.mapping_level).name
user_dto.is_expert = self.is_expert or False
user_dto.date_registered = str(self.date_registered)
try:
user_dto.projects_mapped = len(self.projects_mapped)
# Handle users that haven't touched a project yet.
except:
user_dto.projects_mapped = 0
user_dto.tasks_mapped = self.tasks_mapped
user_dto.tasks_validated = self.tasks_validated
user_dto.tasks_invalidated = self.tasks_invalidated
user_dto.twitter_id = self.twitter_id
user_dto.linkedin_id = self.linkedin_id
user_dto.facebook_id = self.facebook_id
user_dto.validation_message = self.validation_message
user_dto.total_time_spent = 0
user_dto.time_spent_mapping = 0
user_dto.time_spent_validating = 0
sql = """SELECT SUM(TO_TIMESTAMP(action_text, 'HH24:MI:SS')::TIME) FROM task_history
WHERE action='LOCKED_FOR_VALIDATION'
and user_id = :user_id;"""
total_validation_time = db.engine.execute(text(sql), user_id=self.id)
for row in total_validation_time:
total_validation_time = row[0]
if total_validation_time:
total_validation_seconds = total_validation_time.total_seconds()
user_dto.time_spent_validating = total_validation_seconds
user_dto.total_time_spent += user_dto.time_spent_validating
sql = """SELECT SUM(TO_TIMESTAMP(action_text, 'HH24:MI:SS')::TIME) FROM task_history
WHERE action='LOCKED_FOR_MAPPING'
and user_id = :user_id;"""
total_mapping_time = db.engine.execute(text(sql), user_id=self.id)
for row in total_mapping_time:
total_mapping_time = row[0]
if total_mapping_time:
total_mapping_seconds = total_mapping_time.total_seconds()
user_dto.time_spent_mapping = total_mapping_seconds
user_dto.total_time_spent += user_dto.time_spent_mapping
if self.username == logged_in_username:
user_dto.email_address = self.email_address
user_dto.is_email_verified = self.is_email_verified
return user_dto
| true | true |
f7257d2547d1644fe9f677f8883223e1b992288c | 7,585 | py | Python | mmdet/models/detectors/maskformer.py | ayulockin/mmdetection | 6b87ac22b8d9dea8cc28b9ce84909e6c311e6268 | [
"Apache-2.0"
] | 2 | 2021-11-27T03:30:42.000Z | 2022-01-01T05:14:18.000Z | mmdet/models/detectors/maskformer.py | Bella-ing/mmdetection | 70f6d9cfade4a2f0b198e4f64776521d181b28be | [
"Apache-2.0"
] | 1 | 2020-05-20T08:13:44.000Z | 2020-05-20T08:13:44.000Z | mmdet/models/detectors/maskformer.py | Bella-ing/mmdetection | 70f6d9cfade4a2f0b198e4f64776521d181b28be | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from mmdet.core import INSTANCE_OFFSET
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test without augmentation."""
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 39.921053 | 79 | 0.562162 |
import mmcv
import numpy as np
from mmdet.core import INSTANCE_OFFSET
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
if out_file is not None:
show = False
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| true | true |
f7257da3da9f350d02208ba89525fe652376caa1 | 19,232 | py | Python | yaldevtools/source_code.py | libyal/libyal | 407e4710c9c11000dc45427d72bbdbdc2861c51a | [
"Apache-2.0"
] | 176 | 2015-01-11T01:57:37.000Z | 2022-03-30T05:31:33.000Z | yaldevtools/source_code.py | libyal/libyal | 407e4710c9c11000dc45427d72bbdbdc2861c51a | [
"Apache-2.0"
] | 79 | 2015-01-07T19:05:32.000Z | 2022-01-25T15:19:29.000Z | yaldevtools/source_code.py | libyal/libyal | 407e4710c9c11000dc45427d72bbdbdc2861c51a | [
"Apache-2.0"
] | 25 | 2015-07-16T13:29:00.000Z | 2022-02-12T08:15:19.000Z | # -*- coding: utf-8 -*-
"""The source code classes."""
import collections
from yaldevtools import definitions
class EnumDeclaration(object):
"""Enumeration type declaration.
Attributes:
name (str): name.
constants (dict[str, str]): constant values per name.
"""
def __init__(self, name):
"""Initializes an enumeration type declaration.
Args:
name (str): name.
"""
super(EnumDeclaration, self).__init__()
self.constants = collections.OrderedDict()
self.name = name
class FunctionArgument(object):
"""Function argument."""
def __init__(self, argument_string):
"""Initializes a function argument.
Args:
argument_string (str): function argument.
"""
super(FunctionArgument, self).__init__()
self._strings = [argument_string]
def AddArgumentString(self, argument_string):
"""Adds an argument string to the function argument.
Args:
argument_string (str): function argument.
"""
self._strings.append(argument_string)
def CopyToString(self):
"""Copies the function argument to a string.
Returns:
str: function argument.
"""
number_of_strings = len(self._strings)
argument_string = ''
if number_of_strings == 1:
argument_string = self._strings[0]
elif number_of_strings > 1:
argument_string = '{0:s}{1:s}'.format(
self._strings[0], ', '.join(self._strings[1:]))
return argument_string
class FunctionPrototype(object):
"""Function prototype.
Attributes:
arguments (list[FunctionArgument]): function arguments.
have_bfio (bool): True if the function prototype is defined if BFIO is
defined.
have_debug_output (bool): True if the function prototype is defined if
debug output is defined.
have_extern (bool): True if the function prototype is defined as
externally available (API).
have_wide_character_type (bool): True if the function prototype is
defined if the wide character type is defined.
name (str): name.
return_type (str): return type.
return_values (set[str]): return values or None if the function does not
return values.
value_description (str): description of the value.
"""
def __init__(self, name, return_type):
"""Initializes a function prototype.
Args:
name (str): name.
return_type (str): return type.
"""
super(FunctionPrototype, self).__init__()
self._parsed_value = False
self._value_name = None
self._value_type = None
self.arguments = []
self.have_bfio = False
self.have_debug_output = False
self.have_extern = False
self.have_wide_character_type = False
self.name = name
self.return_type = return_type
self.return_values = None
self.value_description = None
def AddArgument(self, argument):
"""Adds an argument to the function prototype.
Args:
argument (FunctionArgument): function argument.
"""
self.arguments.append(argument)
def AddArgumentString(self, argument_string):
"""Adds an argument string to the function prototype.
Args:
argument_string (str): function argument.
"""
function_argument = FunctionArgument(argument_string)
self.arguments.append(function_argument)
def CopyToManpageString(self):
"""Copies the function prototype to a string to be used in manpage.
Returns:
str: function prototype to be used in manpage.
"""
argument_strings = []
for function_argument in self.arguments:
argument_string = function_argument.CopyToString()
argument_string = '"{0:s}"'.format(argument_string)
argument_strings.append(argument_string)
return ' '.join(argument_strings)
def CopyToString(self):
"""Copies the function prototype to a string.
Returns:
str: function prototype.
"""
argument_strings = []
for function_argument in self.arguments:
argument_string = function_argument.CopyToString()
argument_strings.append(argument_string)
return ', '.join(argument_strings)
def _ParseValue(self):
"""Parses the value name and type."""
# Strip the library name.
_, _, function_name = self.name.partition('_')
# Strip the library type.
_, _, function_name = function_name.partition('_')
value_name = None
value_type = None
number_of_arguments = len(self.arguments)
if function_name.startswith('get_utf'):
if number_of_arguments in (3, 4):
_, _, value_name = function_name.partition('_')
_, _, value_name = value_name.partition('_')
elif function_name.startswith('get_'):
# TODO: handle by_index, by_path getters
if number_of_arguments == 3:
_, _, value_name = function_name.partition('_')
self._parsed_value = True
self._value_name = value_name
self._value_type = value_type
def GetValueName(self):
"""Determines the value name of a getter or setter function.
Returns:
str: value name or None if not available.
"""
if not self._parsed_value:
self._ParseValue()
return self._value_name
def GetValueType(self):
"""Determines the value type of a getter or setter function.
Returns:
str: value type or None if not available.
"""
if not self._parsed_value:
self._ParseValue()
return self._value_type
class PythonTypeObjectFunctionPrototype(object):
"""Python type object function prototype.
Attributes:
arguments (list[str]): arguments.
data_type (str): data type.
function_type (str): function type.
object_type (str): object type.
return_values (set[str]): return values or None if the function does not
return values.
value_description (str): description of the value.
value_type (str): value type.
"""
def __init__(self, python_module_name, type_name, type_function):
"""Initializes a Python type object function prototype.
Args:
python_module_name (str): python module name.
type_name (str): type name.
type_function (str): type function.
"""
super(PythonTypeObjectFunctionPrototype, self).__init__()
self._name = None
self._python_module_name = python_module_name
self._type_function = type_function
self._type_name = type_name
self._value_name = None
self.arguments = []
self.data_type = definitions.DATA_TYPE_NONE
self.function_type = None
self.object_type = None
self.return_values = None
self.value_description = None
self.value_type = None
@property
def name(self):
"""str: name."""
if self._name is None:
self._name = '{0:s}_{1:s}_{2:s}'.format(
self._python_module_name, self._type_name, self.type_function)
return self._name
@property
def type_function(self):
"""str: type function."""
# TODO: make overrides more generic.
if self._type_function == 'set_parent_file':
return 'set_parent'
if (self._type_function.startswith('copy_') and
not self._type_function.startswith('copy_from_')):
return 'get_{0:s}'.format(self._type_function[5:])
if (self._type_function.startswith('get_utf8_') or
self._type_function.startswith('set_utf8_')):
return ''.join([self._type_function[:4], self._type_function[9:]])
if self._type_function.startswith('get_data_as_'):
_, _, type_function_suffix = self._type_function.partition('_data_as_')
if type_function_suffix in (
'16bit_integer', '32bit_integer', '64bit_integer'):
return 'get_data_as_integer'
if type_function_suffix in ('filetime', 'floatingtime'):
return 'get_data_as_datetime'
if type_function_suffix == 'utf8_string':
return 'get_data_as_string'
return self._type_function
if self._type_function.startswith('get_'):
type_function_prefix, _, type_function_suffix = (
self._type_function.partition('_by_'))
if type_function_suffix in ('entry', 'index'):
return type_function_prefix
if type_function_suffix in ('utf8_name', 'utf8_path'):
return ''.join([self._type_function[:-10], self._type_function[-5:]])
if self._type_function.endswith('_utf8_string'):
return ''.join([self._type_function[:-12], self._type_function[-7:]])
if self._type_function.endswith('_utf8_string_size'):
return ''.join([self._type_function[:-17], self._type_function[-12:]])
return self._type_function
@property
def value_name(self):
"""str: value name."""
if self._value_name is None:
# TODO: make overrides more generic.
if self.function_type == definitions.FUNCTION_TYPE_COPY:
if self._type_function.startswith('copy_'):
self._value_name = self._type_function[5:]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_FROM:
if self._type_function.startswith('copy_from_'):
self._value_name = self._type_function[10:]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_TO:
if self._type_function.startswith('get_'):
self._value_name = self._type_function[4:]
elif self.function_type in (
definitions.FUNCTION_TYPE_GET,
definitions.FUNCTION_TYPE_GET_BY_IDENTIFIER,
definitions.FUNCTION_TYPE_GET_BY_INDEX,
definitions.FUNCTION_TYPE_GET_BY_NAME,
definitions.FUNCTION_TYPE_GET_BY_PATH):
type_function_prefix, _, _ = self._type_function.partition('_by_')
if type_function_prefix.startswith('get_'):
type_function_prefix = type_function_prefix[4:]
if type_function_prefix.startswith('utf8_'):
type_function_prefix = type_function_prefix[5:]
self._value_name = type_function_prefix
elif self.function_type == definitions.FUNCTION_TYPE_IS:
if self._type_function.startswith('is_'):
self._value_name = self._type_function[3:]
elif self.function_type == definitions.FUNCTION_TYPE_SET:
if self._type_function.startswith('set_utf8_'):
self._value_name = self._type_function[9:]
elif self._type_function.startswith('set_'):
self._value_name = self._type_function[4:]
return self._value_name
def DataTypeIsDatetime(self):
"""Determines if the data type is a datetime type.
Returns:
bool: True if the data type is a datetime type.
"""
return self.data_type in (
definitions.DATA_TYPE_FAT_DATE_TIME,
definitions.DATA_TYPE_FILETIME,
definitions.DATA_TYPE_FLOATINGTIME,
definitions.DATA_TYPE_POSIX_TIME)
def DataTypeIsFloat(self):
"""Determines if the data type is a floating-point type.
Returns:
bool: True if the data type is a floating-point type.
"""
return self.data_type in (
definitions.DATA_TYPE_FLOAT,
definitions.DATA_TYPE_DOUBLE)
def DataTypeIsInteger(self):
"""Determines if the data type is an integer type.
Returns:
bool: True if the data type is an integer type.
"""
return self.data_type in (
definitions.DATA_TYPE_INT,
definitions.DATA_TYPE_INT32,
definitions.DATA_TYPE_OFF64,
definitions.DATA_TYPE_SIZE32,
definitions.DATA_TYPE_SIZE64,
definitions.DATA_TYPE_UINT8,
definitions.DATA_TYPE_UINT16,
definitions.DATA_TYPE_UINT32,
definitions.DATA_TYPE_UINT64)
def GetAttributeDescription(self):
"""Retrieves the fuction as attribute description.
Returns:
str: function as attribute description.
"""
description = ''
type_function = self.type_function
value_name = self.value_name
if value_name:
value_name = value_name.replace('_', ' ')
if type_function == 'get_ascii_codepage':
description = (
'The codepage used for ASCII strings in the {0:s}.').format(
self._type_name)
elif type_function == 'get_data_as_boolean':
description = 'The data as a boolean.'
elif type_function == 'get_data_as_datetime':
description = 'The data as a datetime object.'
elif type_function == 'get_data_as_integer':
description = 'The data as an integer.'
elif type_function == 'get_data_as_floating_point':
description = 'The data as a floating point.'
elif type_function == 'get_data_as_string':
description = 'The data as a string.'
elif self.function_type == definitions.FUNCTION_TYPE_IS:
type_name = self._type_name
if type_name:
type_name = type_name.replace('_', ' ')
description = 'Indicates the {0:s} is {1:s}.'.format(
type_name, value_name)
elif self.value_description:
description = 'The {0:s}.'.format(self.value_description)
elif value_name:
description = 'The {0:s}.'.format(value_name)
return description
def GetDataTypeDescription(self):
"""Retrieves the data type description.
Returns:
str: data type description.
"""
if self.data_type == definitions.DATA_TYPE_BINARY_DATA:
data_type_description = 'Binary string'
elif self.data_type == definitions.DATA_TYPE_BOOLEAN:
data_type_description = 'Boolean'
elif self.DataTypeIsDatetime():
data_type_description = 'Datetime'
elif self.data_type == definitions.DATA_TYPE_OBJECT:
data_type_description = 'Object'
elif self.DataTypeIsFloat():
data_type_description = 'Float'
elif self.DataTypeIsInteger():
data_type_description = 'Integer'
elif self.data_type in (
definitions.DATA_TYPE_GUID,
definitions.DATA_TYPE_STRING,
definitions.DATA_TYPE_UUID):
data_type_description = 'Unicode string'
elif self.data_type == definitions.DATA_TYPE_NARROW_STRING:
data_type_description = 'String'
elif self.data_type == definitions.DATA_TYPE_NONE:
data_type_description = 'None'
else:
data_type_description = self.data_type
if (data_type_description != 'None' and self.return_values and
'None' in self.return_values):
data_type_description = '{0:s} or None'.format(data_type_description)
return data_type_description
def GetDescription(self):
"""Retrieves the description.
Returns:
list[str]: lines of the description.
"""
description = ['']
type_function = self.type_function
type_name = self._type_name
if type_name:
type_name = type_name.replace('_', ' ')
value_name = self.value_name
if value_name:
value_name = value_name.replace('_', ' ')
if type_function == 'close':
description = ['Closes a {0:s}.'.format(type_name)]
elif type_function == 'get_ascii_codepage':
description = [(
'Retrieves the codepage for ASCII strings used in '
'the {0:s}.').format(type_name)]
elif type_function == 'get_data_as_boolean':
description = ['Retrieves the data as a boolean.']
elif type_function == 'get_data_as_datetime':
description = ['Retrieves the data as a datetime object.']
elif type_function == 'get_data_as_integer':
description = ['Retrieves the data as an integer.']
elif type_function == 'get_data_as_floating_point':
description = ['Retrieves the data as a floating point.']
elif type_function == 'get_data_as_string':
description = ['Retrieves the data as a string.']
elif type_function == 'get_string':
description = ['Retrieves the {0:s} formatted as a string.'.format(
type_name)]
elif type_function == 'open':
description = ['Opens a {0:s}.'.format(type_name)]
elif type_function == 'open_file_object':
description = [(
'Opens a {0:s} using a file-like object.').format(type_name)]
elif type_function == 'read_buffer':
if self.value_description:
description = ['Reads a buffer of {0:s}.'.format(
self.value_description)]
else:
description = ['Reads a buffer of data.']
elif type_function == 'read_buffer_at_offset':
if self.value_description:
description = ['Reads a buffer of {0:s} at a specific offset.'.format(
self.value_description)]
else:
description = ['Reads a buffer of data at a specific offset.']
elif type_function == 'seek_offset':
if self.value_description:
description = ['Seeks an offset within the {0:s}.'.format(
self.value_description)]
else:
description = ['Seeks an offset within the data.']
elif type_function == 'set_ascii_codepage':
description = [
('Sets the codepage for ASCII strings used in the '
'{0:s}.').format(type_name),
('Expects the codepage to be a string containing a Python '
'codec definition.')]
elif type_function == 'set_parent':
description = ['Sets the parent file.']
elif type_function == 'signal_abort':
description = ['Signals the {0:s} to abort the current activity.'.format(
type_name)]
elif self.function_type == definitions.FUNCTION_TYPE_GET_BY_INDEX:
_, _, argument_suffix = self.arguments[0].rpartition('_')
if self.value_description:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
self.value_description, argument_suffix)]
else:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
value_name, argument_suffix)]
elif self.function_type in (
definitions.FUNCTION_TYPE_GET_BY_IDENTIFIER,
definitions.FUNCTION_TYPE_GET_BY_NAME,
definitions.FUNCTION_TYPE_GET_BY_PATH):
_, _, type_function_suffix = type_function.partition('_by_')
if self.value_description:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
self.value_description, type_function_suffix)]
else:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
value_name, type_function_suffix)]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_FROM:
# TODO: fix value name.
description = ['Copies the {0:s} from the {1:s}.'.format(
type_name, value_name)]
elif self.function_type in (
definitions.FUNCTION_TYPE_COPY, definitions.FUNCTION_TYPE_GET):
if self.value_description:
description = ['Retrieves the {0:s}.'.format(self.value_description)]
else:
description = ['Retrieves the {0:s}.'.format(value_name)]
elif self.function_type == definitions.FUNCTION_TYPE_IS:
description = ['Determines if the {0:s} is {1:s}.'.format(
type_name, value_name)]
elif self.function_type == definitions.FUNCTION_TYPE_SET:
description = ['Sets the {0:s}.'.format(value_name)]
return description
def GetValueNameAndPrefix(self):
"""Determines the value name and its prefix.
Returns:
tuple[str, str]: value name and prefix.
"""
if self.value_name:
value_name_prefix, _, value_name = self.value_name.partition('_')
if value_name_prefix in ('root', 'sub'):
return value_name, value_name_prefix
return self.value_name, None
| 31.271545 | 79 | 0.674085 |
import collections
from yaldevtools import definitions
class EnumDeclaration(object):
def __init__(self, name):
super(EnumDeclaration, self).__init__()
self.constants = collections.OrderedDict()
self.name = name
class FunctionArgument(object):
def __init__(self, argument_string):
super(FunctionArgument, self).__init__()
self._strings = [argument_string]
def AddArgumentString(self, argument_string):
self._strings.append(argument_string)
def CopyToString(self):
number_of_strings = len(self._strings)
argument_string = ''
if number_of_strings == 1:
argument_string = self._strings[0]
elif number_of_strings > 1:
argument_string = '{0:s}{1:s}'.format(
self._strings[0], ', '.join(self._strings[1:]))
return argument_string
class FunctionPrototype(object):
def __init__(self, name, return_type):
super(FunctionPrototype, self).__init__()
self._parsed_value = False
self._value_name = None
self._value_type = None
self.arguments = []
self.have_bfio = False
self.have_debug_output = False
self.have_extern = False
self.have_wide_character_type = False
self.name = name
self.return_type = return_type
self.return_values = None
self.value_description = None
def AddArgument(self, argument):
self.arguments.append(argument)
def AddArgumentString(self, argument_string):
function_argument = FunctionArgument(argument_string)
self.arguments.append(function_argument)
def CopyToManpageString(self):
argument_strings = []
for function_argument in self.arguments:
argument_string = function_argument.CopyToString()
argument_string = '"{0:s}"'.format(argument_string)
argument_strings.append(argument_string)
return ' '.join(argument_strings)
def CopyToString(self):
argument_strings = []
for function_argument in self.arguments:
argument_string = function_argument.CopyToString()
argument_strings.append(argument_string)
return ', '.join(argument_strings)
def _ParseValue(self):
_, _, function_name = self.name.partition('_')
_, _, function_name = function_name.partition('_')
value_name = None
value_type = None
number_of_arguments = len(self.arguments)
if function_name.startswith('get_utf'):
if number_of_arguments in (3, 4):
_, _, value_name = function_name.partition('_')
_, _, value_name = value_name.partition('_')
elif function_name.startswith('get_'):
if number_of_arguments == 3:
_, _, value_name = function_name.partition('_')
self._parsed_value = True
self._value_name = value_name
self._value_type = value_type
def GetValueName(self):
if not self._parsed_value:
self._ParseValue()
return self._value_name
def GetValueType(self):
if not self._parsed_value:
self._ParseValue()
return self._value_type
class PythonTypeObjectFunctionPrototype(object):
def __init__(self, python_module_name, type_name, type_function):
super(PythonTypeObjectFunctionPrototype, self).__init__()
self._name = None
self._python_module_name = python_module_name
self._type_function = type_function
self._type_name = type_name
self._value_name = None
self.arguments = []
self.data_type = definitions.DATA_TYPE_NONE
self.function_type = None
self.object_type = None
self.return_values = None
self.value_description = None
self.value_type = None
@property
def name(self):
if self._name is None:
self._name = '{0:s}_{1:s}_{2:s}'.format(
self._python_module_name, self._type_name, self.type_function)
return self._name
@property
def type_function(self):
if self._type_function == 'set_parent_file':
return 'set_parent'
if (self._type_function.startswith('copy_') and
not self._type_function.startswith('copy_from_')):
return 'get_{0:s}'.format(self._type_function[5:])
if (self._type_function.startswith('get_utf8_') or
self._type_function.startswith('set_utf8_')):
return ''.join([self._type_function[:4], self._type_function[9:]])
if self._type_function.startswith('get_data_as_'):
_, _, type_function_suffix = self._type_function.partition('_data_as_')
if type_function_suffix in (
'16bit_integer', '32bit_integer', '64bit_integer'):
return 'get_data_as_integer'
if type_function_suffix in ('filetime', 'floatingtime'):
return 'get_data_as_datetime'
if type_function_suffix == 'utf8_string':
return 'get_data_as_string'
return self._type_function
if self._type_function.startswith('get_'):
type_function_prefix, _, type_function_suffix = (
self._type_function.partition('_by_'))
if type_function_suffix in ('entry', 'index'):
return type_function_prefix
if type_function_suffix in ('utf8_name', 'utf8_path'):
return ''.join([self._type_function[:-10], self._type_function[-5:]])
if self._type_function.endswith('_utf8_string'):
return ''.join([self._type_function[:-12], self._type_function[-7:]])
if self._type_function.endswith('_utf8_string_size'):
return ''.join([self._type_function[:-17], self._type_function[-12:]])
return self._type_function
@property
def value_name(self):
if self._value_name is None:
if self.function_type == definitions.FUNCTION_TYPE_COPY:
if self._type_function.startswith('copy_'):
self._value_name = self._type_function[5:]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_FROM:
if self._type_function.startswith('copy_from_'):
self._value_name = self._type_function[10:]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_TO:
if self._type_function.startswith('get_'):
self._value_name = self._type_function[4:]
elif self.function_type in (
definitions.FUNCTION_TYPE_GET,
definitions.FUNCTION_TYPE_GET_BY_IDENTIFIER,
definitions.FUNCTION_TYPE_GET_BY_INDEX,
definitions.FUNCTION_TYPE_GET_BY_NAME,
definitions.FUNCTION_TYPE_GET_BY_PATH):
type_function_prefix, _, _ = self._type_function.partition('_by_')
if type_function_prefix.startswith('get_'):
type_function_prefix = type_function_prefix[4:]
if type_function_prefix.startswith('utf8_'):
type_function_prefix = type_function_prefix[5:]
self._value_name = type_function_prefix
elif self.function_type == definitions.FUNCTION_TYPE_IS:
if self._type_function.startswith('is_'):
self._value_name = self._type_function[3:]
elif self.function_type == definitions.FUNCTION_TYPE_SET:
if self._type_function.startswith('set_utf8_'):
self._value_name = self._type_function[9:]
elif self._type_function.startswith('set_'):
self._value_name = self._type_function[4:]
return self._value_name
def DataTypeIsDatetime(self):
return self.data_type in (
definitions.DATA_TYPE_FAT_DATE_TIME,
definitions.DATA_TYPE_FILETIME,
definitions.DATA_TYPE_FLOATINGTIME,
definitions.DATA_TYPE_POSIX_TIME)
def DataTypeIsFloat(self):
return self.data_type in (
definitions.DATA_TYPE_FLOAT,
definitions.DATA_TYPE_DOUBLE)
def DataTypeIsInteger(self):
return self.data_type in (
definitions.DATA_TYPE_INT,
definitions.DATA_TYPE_INT32,
definitions.DATA_TYPE_OFF64,
definitions.DATA_TYPE_SIZE32,
definitions.DATA_TYPE_SIZE64,
definitions.DATA_TYPE_UINT8,
definitions.DATA_TYPE_UINT16,
definitions.DATA_TYPE_UINT32,
definitions.DATA_TYPE_UINT64)
def GetAttributeDescription(self):
description = ''
type_function = self.type_function
value_name = self.value_name
if value_name:
value_name = value_name.replace('_', ' ')
if type_function == 'get_ascii_codepage':
description = (
'The codepage used for ASCII strings in the {0:s}.').format(
self._type_name)
elif type_function == 'get_data_as_boolean':
description = 'The data as a boolean.'
elif type_function == 'get_data_as_datetime':
description = 'The data as a datetime object.'
elif type_function == 'get_data_as_integer':
description = 'The data as an integer.'
elif type_function == 'get_data_as_floating_point':
description = 'The data as a floating point.'
elif type_function == 'get_data_as_string':
description = 'The data as a string.'
elif self.function_type == definitions.FUNCTION_TYPE_IS:
type_name = self._type_name
if type_name:
type_name = type_name.replace('_', ' ')
description = 'Indicates the {0:s} is {1:s}.'.format(
type_name, value_name)
elif self.value_description:
description = 'The {0:s}.'.format(self.value_description)
elif value_name:
description = 'The {0:s}.'.format(value_name)
return description
def GetDataTypeDescription(self):
if self.data_type == definitions.DATA_TYPE_BINARY_DATA:
data_type_description = 'Binary string'
elif self.data_type == definitions.DATA_TYPE_BOOLEAN:
data_type_description = 'Boolean'
elif self.DataTypeIsDatetime():
data_type_description = 'Datetime'
elif self.data_type == definitions.DATA_TYPE_OBJECT:
data_type_description = 'Object'
elif self.DataTypeIsFloat():
data_type_description = 'Float'
elif self.DataTypeIsInteger():
data_type_description = 'Integer'
elif self.data_type in (
definitions.DATA_TYPE_GUID,
definitions.DATA_TYPE_STRING,
definitions.DATA_TYPE_UUID):
data_type_description = 'Unicode string'
elif self.data_type == definitions.DATA_TYPE_NARROW_STRING:
data_type_description = 'String'
elif self.data_type == definitions.DATA_TYPE_NONE:
data_type_description = 'None'
else:
data_type_description = self.data_type
if (data_type_description != 'None' and self.return_values and
'None' in self.return_values):
data_type_description = '{0:s} or None'.format(data_type_description)
return data_type_description
def GetDescription(self):
description = ['']
type_function = self.type_function
type_name = self._type_name
if type_name:
type_name = type_name.replace('_', ' ')
value_name = self.value_name
if value_name:
value_name = value_name.replace('_', ' ')
if type_function == 'close':
description = ['Closes a {0:s}.'.format(type_name)]
elif type_function == 'get_ascii_codepage':
description = [(
'Retrieves the codepage for ASCII strings used in '
'the {0:s}.').format(type_name)]
elif type_function == 'get_data_as_boolean':
description = ['Retrieves the data as a boolean.']
elif type_function == 'get_data_as_datetime':
description = ['Retrieves the data as a datetime object.']
elif type_function == 'get_data_as_integer':
description = ['Retrieves the data as an integer.']
elif type_function == 'get_data_as_floating_point':
description = ['Retrieves the data as a floating point.']
elif type_function == 'get_data_as_string':
description = ['Retrieves the data as a string.']
elif type_function == 'get_string':
description = ['Retrieves the {0:s} formatted as a string.'.format(
type_name)]
elif type_function == 'open':
description = ['Opens a {0:s}.'.format(type_name)]
elif type_function == 'open_file_object':
description = [(
'Opens a {0:s} using a file-like object.').format(type_name)]
elif type_function == 'read_buffer':
if self.value_description:
description = ['Reads a buffer of {0:s}.'.format(
self.value_description)]
else:
description = ['Reads a buffer of data.']
elif type_function == 'read_buffer_at_offset':
if self.value_description:
description = ['Reads a buffer of {0:s} at a specific offset.'.format(
self.value_description)]
else:
description = ['Reads a buffer of data at a specific offset.']
elif type_function == 'seek_offset':
if self.value_description:
description = ['Seeks an offset within the {0:s}.'.format(
self.value_description)]
else:
description = ['Seeks an offset within the data.']
elif type_function == 'set_ascii_codepage':
description = [
('Sets the codepage for ASCII strings used in the '
'{0:s}.').format(type_name),
('Expects the codepage to be a string containing a Python '
'codec definition.')]
elif type_function == 'set_parent':
description = ['Sets the parent file.']
elif type_function == 'signal_abort':
description = ['Signals the {0:s} to abort the current activity.'.format(
type_name)]
elif self.function_type == definitions.FUNCTION_TYPE_GET_BY_INDEX:
_, _, argument_suffix = self.arguments[0].rpartition('_')
if self.value_description:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
self.value_description, argument_suffix)]
else:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
value_name, argument_suffix)]
elif self.function_type in (
definitions.FUNCTION_TYPE_GET_BY_IDENTIFIER,
definitions.FUNCTION_TYPE_GET_BY_NAME,
definitions.FUNCTION_TYPE_GET_BY_PATH):
_, _, type_function_suffix = type_function.partition('_by_')
if self.value_description:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
self.value_description, type_function_suffix)]
else:
description = ['Retrieves the {0:s} specified by the {1:s}.'.format(
value_name, type_function_suffix)]
elif self.function_type == definitions.FUNCTION_TYPE_COPY_FROM:
description = ['Copies the {0:s} from the {1:s}.'.format(
type_name, value_name)]
elif self.function_type in (
definitions.FUNCTION_TYPE_COPY, definitions.FUNCTION_TYPE_GET):
if self.value_description:
description = ['Retrieves the {0:s}.'.format(self.value_description)]
else:
description = ['Retrieves the {0:s}.'.format(value_name)]
elif self.function_type == definitions.FUNCTION_TYPE_IS:
description = ['Determines if the {0:s} is {1:s}.'.format(
type_name, value_name)]
elif self.function_type == definitions.FUNCTION_TYPE_SET:
description = ['Sets the {0:s}.'.format(value_name)]
return description
def GetValueNameAndPrefix(self):
if self.value_name:
value_name_prefix, _, value_name = self.value_name.partition('_')
if value_name_prefix in ('root', 'sub'):
return value_name, value_name_prefix
return self.value_name, None
| true | true |
f7257dbe457fadac40393d1ec3dd31766bbf6237 | 540 | py | Python | sample_app/admin.py | imimran/inline-in-fieldset | c20568904011889001d92024c8881782a84aa00c | [
"MIT"
] | null | null | null | sample_app/admin.py | imimran/inline-in-fieldset | c20568904011889001d92024c8881782a84aa00c | [
"MIT"
] | null | null | null | sample_app/admin.py | imimran/inline-in-fieldset | c20568904011889001d92024c8881782a84aa00c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Student, Subject
class SubjectInline(admin.TabularInline):
model = Subject
insert_after = 'name'
class StudentAdmin(admin.ModelAdmin):
fields = (
'name',
'department',
'gender',
)
inlines = [
SubjectInline,
]
change_form_template = 'admin/custom/change_form.html'
class Media:
css = {
'all': (
'css/admin.css',
)
}
admin.site.register(Student, StudentAdmin)
| 16.875 | 58 | 0.572222 | from django.contrib import admin
from .models import Student, Subject
class SubjectInline(admin.TabularInline):
model = Subject
insert_after = 'name'
class StudentAdmin(admin.ModelAdmin):
fields = (
'name',
'department',
'gender',
)
inlines = [
SubjectInline,
]
change_form_template = 'admin/custom/change_form.html'
class Media:
css = {
'all': (
'css/admin.css',
)
}
admin.site.register(Student, StudentAdmin)
| true | true |
f7257df7d21d92286f4cc0d13478da8406845b2f | 26,886 | py | Python | zipline/data/resample.py | Code37/zipline | de038dbf584980af4f30822f8e5d306bac2a44cb | [
"Apache-2.0"
] | 412 | 2017-04-30T14:35:47.000Z | 2022-03-29T02:58:33.000Z | zipline/data/resample.py | waijay1992/zipline | 8beba055aa4211dc2debc5c3083077cbd19d0bbc | [
"Apache-2.0"
] | 116 | 2017-05-15T04:45:45.000Z | 2020-05-30T19:09:00.000Z | zipline/data/resample.py | waijay1992/zipline | 8beba055aa4211dc2debc5c3083077cbd19d0bbc | [
"Apache-2.0"
] | 80 | 2017-05-03T13:17:33.000Z | 2021-02-08T15:42:09.000Z | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt)
| 36.6794 | 79 | 0.542699 |
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
def minute_to_session(column, close_locs, data, out):
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt)
| true | true |
f725827d5a741e222353b63795ed67c9692af41b | 1,201 | py | Python | load_testing/ial2_sign_up.locustfile.py | isabella232/identity-loadtest | d915fe5920978672246a1de46b0d9530f7d38fcc | [
"CC0-1.0"
] | null | null | null | load_testing/ial2_sign_up.locustfile.py | isabella232/identity-loadtest | d915fe5920978672246a1de46b0d9530f7d38fcc | [
"CC0-1.0"
] | 1 | 2021-02-24T02:55:22.000Z | 2021-02-24T02:55:22.000Z | load_testing/ial2_sign_up.locustfile.py | isabella232/identity-loadtest | d915fe5920978672246a1de46b0d9530f7d38fcc | [
"CC0-1.0"
] | null | null | null | from locust import HttpUser, TaskSet, task, between
from common_flows import flow_ial2_proofing, flow_sign_up, flow_helper
class IAL2SignUpLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mont-front.jpeg")
license_back = flow_helper.load_fixture("mont-back.jpeg")
def on_start(self):
print("*** Starting Sign-Up and IAL2 proof load tests ***")
def on_stop(self):
print("*** Ending IAL2 Sign-Up load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_up_and_proof_load_test(self):
# Sign up flow
flow_sign_up.do_sign_up(self)
# Get /account page
flow_helper.do_request(self, "get", "/account", "/account")
# IAL2 Proofing flow
flow_ial2_proofing.do_ial2_proofing(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/")
class WebsiteUser(HttpUser):
tasks = [IAL2SignUpLoad]
wait_time = between(5, 9)
| 29.292683 | 70 | 0.654455 | from locust import HttpUser, TaskSet, task, between
from common_flows import flow_ial2_proofing, flow_sign_up, flow_helper
class IAL2SignUpLoad(TaskSet):
license_front = flow_helper.load_fixture("mont-front.jpeg")
license_back = flow_helper.load_fixture("mont-back.jpeg")
def on_start(self):
print("*** Starting Sign-Up and IAL2 proof load tests ***")
def on_stop(self):
print("*** Ending IAL2 Sign-Up load tests ***")
@task(1)
def sign_up_and_proof_load_test(self):
flow_sign_up.do_sign_up(self)
flow_helper.do_request(self, "get", "/account", "/account")
flow_ial2_proofing.do_ial2_proofing(self)
flow_helper.do_request(self, "get", "/account", "/account")
flow_helper.do_request(self, "get", "/logout", "/")
class WebsiteUser(HttpUser):
tasks = [IAL2SignUpLoad]
wait_time = between(5, 9)
| true | true |
f725827e2a3d139f12b578cfd7d4e3af8491e768 | 927 | py | Python | bin/pdbqt2pdb_ref.py | gicsaw/pdbtools | 10a9441f0345d34e90ca1c454a6aa460b7da926d | [
"MIT"
] | null | null | null | bin/pdbqt2pdb_ref.py | gicsaw/pdbtools | 10a9441f0345d34e90ca1c454a6aa460b7da926d | [
"MIT"
] | null | null | null | bin/pdbqt2pdb_ref.py | gicsaw/pdbtools | 10a9441f0345d34e90ca1c454a6aa460b7da926d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pdbtools.ligand_tools as ligand_tools
def main():
import argparse
title_line = 'convert pdbqt to pdb using reference pdb file'
parser = argparse.ArgumentParser(description=title_line)
parser.add_argument('-i', '--input_file', required=True,
help='input ligand pdbqt file')
parser.add_argument('-o', '--output_file', required=True,
help='output ligand pdb file')
parser.add_argument('-r', '--ref_file', required=True,
help='reference ligand pdb file')
args = parser.parse_args()
ligand_input_file = args.input_file
ligand_output_file = args.output_file
ref_file = args.ref_file
e = ligand_tools.pdbqt_to_pdb_ref(ligand_input_file, ligand_output_file,
ref_file)
if e is not None:
print(e)
if __name__ == "__main__":
main()
| 30.9 | 76 | 0.629989 |
import pdbtools.ligand_tools as ligand_tools
def main():
import argparse
title_line = 'convert pdbqt to pdb using reference pdb file'
parser = argparse.ArgumentParser(description=title_line)
parser.add_argument('-i', '--input_file', required=True,
help='input ligand pdbqt file')
parser.add_argument('-o', '--output_file', required=True,
help='output ligand pdb file')
parser.add_argument('-r', '--ref_file', required=True,
help='reference ligand pdb file')
args = parser.parse_args()
ligand_input_file = args.input_file
ligand_output_file = args.output_file
ref_file = args.ref_file
e = ligand_tools.pdbqt_to_pdb_ref(ligand_input_file, ligand_output_file,
ref_file)
if e is not None:
print(e)
if __name__ == "__main__":
main()
| true | true |
f72583f1e1e8e5ca6b7b3190c8fd9abe0893dc59 | 9,848 | py | Python | pysnmp-with-texts/CISCO-HARDWARE-IP-VERIFY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-HARDWARE-IP-VERIFY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-HARDWARE-IP-VERIFY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-HARDWARE-IP-VERIFY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-HARDWARE-IP-VERIFY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:59:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Gauge32, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, iso, Unsigned32, Counter32, Bits, MibIdentifier, TimeTicks, ObjectIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "iso", "Unsigned32", "Counter32", "Bits", "MibIdentifier", "TimeTicks", "ObjectIdentity", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoHardwareIpVerifyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 804))
ciscoHardwareIpVerifyMIB.setRevisions(('2012-09-04 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setLastUpdated('201209040000Z')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setDescription("This MIB module defines management objects for configuration and monitoring of the Intrusion Detection System (IDS) that checks for IP packet verification. The following terms are used throughout the MIB: IDS: Intrusion Detection System CRC: Cyclic Redundancy Check DF: Don't Fragment ")
ciscoHardwareIpVerifyMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 0))
ciscoHardwareIpVerifyMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 1))
ciscoHardwareIpVerifyMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2))
chivIpVerifyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1), )
if mibBuilder.loadTexts: chivIpVerifyTable.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyTable.setDescription('A list of IDS check configuration and statistical information for each IP type and each IDS check type on the management device.')
chivIpVerifyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1), ).setIndexNames((0, "CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckIpType"), (0, "CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckTypeName"))
if mibBuilder.loadTexts: chivIpVerifyEntry.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyEntry.setDescription('An entry contains the IDS packet check configuration information and the associated counters.')
chivIpVerifyCheckIpType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: chivIpVerifyCheckIpType.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckIpType.setDescription('This object indicates the IP address type for IDS packet check.')
chivIpVerifyCheckTypeName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))).clone(namedValues=NamedValues(("addressSrcBroadcast", 1), ("addressSrcMulticast", 2), ("addressDestZero", 3), ("addressIdentical", 4), ("addressSrcReserved", 5), ("addressClassE", 6), ("checksum", 7), ("protocol", 8), ("fragment", 9), ("lengthMinimum", 10), ("lengthConsistent", 11), ("lengthMaximumFragment", 12), ("lengthMaximumUdp", 13), ("lengthMaximumTcp", 14), ("tcpFlags", 15), ("tcpTinyFlags", 16), ("version", 17))))
if mibBuilder.loadTexts: chivIpVerifyCheckTypeName.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckTypeName.setDescription('This object indicates the IDS packet check type which can be configured on the device. Each check type is a specific criteria. Those IP packets that matches the certain criteria are dropped. addressSrcBroadcast(1) Drop the IPv4 packet if the source address is a broadcast IPv4 address. addressSrcMulticast(2) Drop the IPv4 packet if the source address is a multicast IPv4 address. addressDestZero(3) Drop the IPv4 packet if the destination address is 0.0.0.0. addressIdentical(4) Drop the IPv4 packet if the source IPv4 address is identical to destination IPv4 address. addressSrcReserved(5) Drop the IPv4 packet if the source address is a reserved IPv4 address. addressClassE(6) Drop the IPv4 packet if either the source address or destination address is a class E IPv4 address. checksum(7) Drops the IPv4 packet if its checksum is invalid. protocol(8) Drop the IPv4 packet if the packet fragment has an invalid IP protocol number fragment(9) Drop the IPv4 packet if the packet fragment has a nonzero offset and the DF bit is active. lengthMinimum(10) Drop the IPv4 packet if the Ethernet frame length is less than the IP packet length plus four octets (the CRC length). lengthConsistent(11) Drop the IPv4 or IPv6 packet where the Ethernet frame size is greater than or equal to the IP packet length plus the Ethernet header. lengthMaximumFragment(12) Drop the IPv4 or IPv6 packet if the maximum fragment offset is greater than 65536. lengthMaximumUdp(13) Drop the IPv4 or IPv6 packet if the IP payload length is less than the UDP packet length. lengthMaximumTcp(14) Drop the IPv4 or IPv6 packet if the TCP length is greater than the IP payload length. tcpFlags(15) Drop the IPv4 packet if verification of TCP packet header fails. tcpTinyFlags(16) Drop the IPv4 or IPv6 packet if the IP fragment offset is 1, or if the IP fragment offset is 0 and the IP payload length is less than 16. version(17) Drop the IPv4 packet if the Ethertype is not set to 4 (IPv4); and drops the IPv6 packet if the Ethertype is not set to 6 (IPv6).')
chivIpVerifyCheckStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chivIpVerifyCheckStatus.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckStatus.setDescription('This object specifies the IDS packet check configuration status.')
chivIpVerifyPacketsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chivIpVerifyPacketsDropped.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyPacketsDropped.setDescription('This object indicates the number of packets which has been dropped.')
ciscoHardwareIpVerifyMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 1))
ciscoHardwareIpVerifyMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 2))
ciscoHardwareIpVerifyMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 1, 1)).setObjects(("CISCO-HARDWARE-IP-VERIFY-MIB", "ciscoHardwareIpVerifyMIBStatisticGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoHardwareIpVerifyMIBCompliance = ciscoHardwareIpVerifyMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIBCompliance.setDescription('The compliance statement for the CISCO-HARDWARE-IP-VERIFY-MIB.')
ciscoHardwareIpVerifyMIBStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 2, 1)).setObjects(("CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckStatus"), ("CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyPacketsDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoHardwareIpVerifyMIBStatisticGroup = ciscoHardwareIpVerifyMIBStatisticGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIBStatisticGroup.setDescription('A collection of objects that provides configuration and statistical information for IDS packet check.')
mibBuilder.exportSymbols("CISCO-HARDWARE-IP-VERIFY-MIB", ciscoHardwareIpVerifyMIBConform=ciscoHardwareIpVerifyMIBConform, chivIpVerifyCheckIpType=chivIpVerifyCheckIpType, ciscoHardwareIpVerifyMIBStatisticGroup=ciscoHardwareIpVerifyMIBStatisticGroup, ciscoHardwareIpVerifyMIBGroups=ciscoHardwareIpVerifyMIBGroups, ciscoHardwareIpVerifyMIBCompliances=ciscoHardwareIpVerifyMIBCompliances, chivIpVerifyTable=chivIpVerifyTable, chivIpVerifyCheckStatus=chivIpVerifyCheckStatus, ciscoHardwareIpVerifyMIBObjects=ciscoHardwareIpVerifyMIBObjects, PYSNMP_MODULE_ID=ciscoHardwareIpVerifyMIB, ciscoHardwareIpVerifyMIBNotifs=ciscoHardwareIpVerifyMIBNotifs, chivIpVerifyEntry=chivIpVerifyEntry, ciscoHardwareIpVerifyMIBCompliance=ciscoHardwareIpVerifyMIBCompliance, chivIpVerifyCheckTypeName=chivIpVerifyCheckTypeName, chivIpVerifyPacketsDropped=chivIpVerifyPacketsDropped, ciscoHardwareIpVerifyMIB=ciscoHardwareIpVerifyMIB)
| 172.77193 | 2,096 | 0.792141 |
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Gauge32, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, iso, Unsigned32, Counter32, Bits, MibIdentifier, TimeTicks, ObjectIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "iso", "Unsigned32", "Counter32", "Bits", "MibIdentifier", "TimeTicks", "ObjectIdentity", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoHardwareIpVerifyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 804))
ciscoHardwareIpVerifyMIB.setRevisions(('2012-09-04 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setLastUpdated('201209040000Z')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIB.setDescription("This MIB module defines management objects for configuration and monitoring of the Intrusion Detection System (IDS) that checks for IP packet verification. The following terms are used throughout the MIB: IDS: Intrusion Detection System CRC: Cyclic Redundancy Check DF: Don't Fragment ")
ciscoHardwareIpVerifyMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 0))
ciscoHardwareIpVerifyMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 1))
ciscoHardwareIpVerifyMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2))
chivIpVerifyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1), )
if mibBuilder.loadTexts: chivIpVerifyTable.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyTable.setDescription('A list of IDS check configuration and statistical information for each IP type and each IDS check type on the management device.')
chivIpVerifyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1), ).setIndexNames((0, "CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckIpType"), (0, "CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckTypeName"))
if mibBuilder.loadTexts: chivIpVerifyEntry.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyEntry.setDescription('An entry contains the IDS packet check configuration information and the associated counters.')
chivIpVerifyCheckIpType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: chivIpVerifyCheckIpType.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckIpType.setDescription('This object indicates the IP address type for IDS packet check.')
chivIpVerifyCheckTypeName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))).clone(namedValues=NamedValues(("addressSrcBroadcast", 1), ("addressSrcMulticast", 2), ("addressDestZero", 3), ("addressIdentical", 4), ("addressSrcReserved", 5), ("addressClassE", 6), ("checksum", 7), ("protocol", 8), ("fragment", 9), ("lengthMinimum", 10), ("lengthConsistent", 11), ("lengthMaximumFragment", 12), ("lengthMaximumUdp", 13), ("lengthMaximumTcp", 14), ("tcpFlags", 15), ("tcpTinyFlags", 16), ("version", 17))))
if mibBuilder.loadTexts: chivIpVerifyCheckTypeName.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckTypeName.setDescription('This object indicates the IDS packet check type which can be configured on the device. Each check type is a specific criteria. Those IP packets that matches the certain criteria are dropped. addressSrcBroadcast(1) Drop the IPv4 packet if the source address is a broadcast IPv4 address. addressSrcMulticast(2) Drop the IPv4 packet if the source address is a multicast IPv4 address. addressDestZero(3) Drop the IPv4 packet if the destination address is 0.0.0.0. addressIdentical(4) Drop the IPv4 packet if the source IPv4 address is identical to destination IPv4 address. addressSrcReserved(5) Drop the IPv4 packet if the source address is a reserved IPv4 address. addressClassE(6) Drop the IPv4 packet if either the source address or destination address is a class E IPv4 address. checksum(7) Drops the IPv4 packet if its checksum is invalid. protocol(8) Drop the IPv4 packet if the packet fragment has an invalid IP protocol number fragment(9) Drop the IPv4 packet if the packet fragment has a nonzero offset and the DF bit is active. lengthMinimum(10) Drop the IPv4 packet if the Ethernet frame length is less than the IP packet length plus four octets (the CRC length). lengthConsistent(11) Drop the IPv4 or IPv6 packet where the Ethernet frame size is greater than or equal to the IP packet length plus the Ethernet header. lengthMaximumFragment(12) Drop the IPv4 or IPv6 packet if the maximum fragment offset is greater than 65536. lengthMaximumUdp(13) Drop the IPv4 or IPv6 packet if the IP payload length is less than the UDP packet length. lengthMaximumTcp(14) Drop the IPv4 or IPv6 packet if the TCP length is greater than the IP payload length. tcpFlags(15) Drop the IPv4 packet if verification of TCP packet header fails. tcpTinyFlags(16) Drop the IPv4 or IPv6 packet if the IP fragment offset is 1, or if the IP fragment offset is 0 and the IP payload length is less than 16. version(17) Drop the IPv4 packet if the Ethertype is not set to 4 (IPv4); and drops the IPv6 packet if the Ethertype is not set to 6 (IPv6).')
chivIpVerifyCheckStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chivIpVerifyCheckStatus.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyCheckStatus.setDescription('This object specifies the IDS packet check configuration status.')
chivIpVerifyPacketsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 804, 1, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chivIpVerifyPacketsDropped.setStatus('current')
if mibBuilder.loadTexts: chivIpVerifyPacketsDropped.setDescription('This object indicates the number of packets which has been dropped.')
ciscoHardwareIpVerifyMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 1))
ciscoHardwareIpVerifyMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 2))
ciscoHardwareIpVerifyMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 1, 1)).setObjects(("CISCO-HARDWARE-IP-VERIFY-MIB", "ciscoHardwareIpVerifyMIBStatisticGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoHardwareIpVerifyMIBCompliance = ciscoHardwareIpVerifyMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIBCompliance.setDescription('The compliance statement for the CISCO-HARDWARE-IP-VERIFY-MIB.')
ciscoHardwareIpVerifyMIBStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 804, 2, 2, 1)).setObjects(("CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyCheckStatus"), ("CISCO-HARDWARE-IP-VERIFY-MIB", "chivIpVerifyPacketsDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoHardwareIpVerifyMIBStatisticGroup = ciscoHardwareIpVerifyMIBStatisticGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoHardwareIpVerifyMIBStatisticGroup.setDescription('A collection of objects that provides configuration and statistical information for IDS packet check.')
mibBuilder.exportSymbols("CISCO-HARDWARE-IP-VERIFY-MIB", ciscoHardwareIpVerifyMIBConform=ciscoHardwareIpVerifyMIBConform, chivIpVerifyCheckIpType=chivIpVerifyCheckIpType, ciscoHardwareIpVerifyMIBStatisticGroup=ciscoHardwareIpVerifyMIBStatisticGroup, ciscoHardwareIpVerifyMIBGroups=ciscoHardwareIpVerifyMIBGroups, ciscoHardwareIpVerifyMIBCompliances=ciscoHardwareIpVerifyMIBCompliances, chivIpVerifyTable=chivIpVerifyTable, chivIpVerifyCheckStatus=chivIpVerifyCheckStatus, ciscoHardwareIpVerifyMIBObjects=ciscoHardwareIpVerifyMIBObjects, PYSNMP_MODULE_ID=ciscoHardwareIpVerifyMIB, ciscoHardwareIpVerifyMIBNotifs=ciscoHardwareIpVerifyMIBNotifs, chivIpVerifyEntry=chivIpVerifyEntry, ciscoHardwareIpVerifyMIBCompliance=ciscoHardwareIpVerifyMIBCompliance, chivIpVerifyCheckTypeName=chivIpVerifyCheckTypeName, chivIpVerifyPacketsDropped=chivIpVerifyPacketsDropped, ciscoHardwareIpVerifyMIB=ciscoHardwareIpVerifyMIB)
| true | true |
f72583f3abd30086f322b31d478f793386b37978 | 1,402 | py | Python | plugins/infocenteruri.py | LandGrey/taoman | 3ad45823e7af0a8a9ee6e1296446c3a6aab43fe4 | [
"MIT"
] | 207 | 2017-05-03T10:31:45.000Z | 2022-03-26T09:49:03.000Z | plugins/infocenteruri.py | the8robot/taoman | 3ad45823e7af0a8a9ee6e1296446c3a6aab43fe4 | [
"MIT"
] | 1 | 2020-03-29T03:28:29.000Z | 2020-03-29T04:41:03.000Z | plugins/infocenteruri.py | the8robot/taoman | 3ad45823e7af0a8a9ee6e1296446c3a6aab43fe4 | [
"MIT"
] | 59 | 2017-05-06T02:43:36.000Z | 2022-01-22T12:39:07.000Z | #!/usr/bin/env python
# coding:utf-8
#
"""
Copyright (c) 2017 LandGrey (https://github.com/LandGrey/taoman)
License: MIT
"""
import urllib
import requests
from lib.fun import crawl_link_handle
from lib.config import baidu_base_url, get_head, timeout, baidu_first_pattern, self_pattern, intranet_ip_pattern, \
ip_simple_pattern
def crawlinfocenter(domain):
domains = []
data = {'wd': 'site:{0} 信息化|网络中心'.format(domain)}
requests.packages.urllib3.disable_warnings()
req = requests.get(baidu_base_url + urllib.urlencode(data), headers=get_head(), timeout=timeout, verify=False)
content = req.text
match = baidu_first_pattern.findall(content)
if match:
info_center_url = crawl_link_handle(match[0][0])
reqs = requests.get('http://' + info_center_url, headers=get_head(), timeout=timeout, verify=False)
matchs = self_pattern.findall(reqs.text)
for m in matchs:
domains.append(crawl_link_handle(m)
if domain in m
else (crawl_link_handle(m)
if ip_simple_pattern.findall(crawl_link_handle(m)
if not intranet_ip_pattern.findall(crawl_link_handle(m))
else '')
else ''))
return domains
| 38.944444 | 118 | 0.599857 |
import urllib
import requests
from lib.fun import crawl_link_handle
from lib.config import baidu_base_url, get_head, timeout, baidu_first_pattern, self_pattern, intranet_ip_pattern, \
ip_simple_pattern
def crawlinfocenter(domain):
domains = []
data = {'wd': 'site:{0} 信息化|网络中心'.format(domain)}
requests.packages.urllib3.disable_warnings()
req = requests.get(baidu_base_url + urllib.urlencode(data), headers=get_head(), timeout=timeout, verify=False)
content = req.text
match = baidu_first_pattern.findall(content)
if match:
info_center_url = crawl_link_handle(match[0][0])
reqs = requests.get('http://' + info_center_url, headers=get_head(), timeout=timeout, verify=False)
matchs = self_pattern.findall(reqs.text)
for m in matchs:
domains.append(crawl_link_handle(m)
if domain in m
else (crawl_link_handle(m)
if ip_simple_pattern.findall(crawl_link_handle(m)
if not intranet_ip_pattern.findall(crawl_link_handle(m))
else '')
else ''))
return domains
| true | true |
f725848078385894b2570cd95107b891abee18ba | 7,574 | py | Python | file_hash.py | XVicarious/file_hash | ebab0151dbbd2d162742008d9088ad03a38f495e | [
"MIT"
] | null | null | null | file_hash.py | XVicarious/file_hash | ebab0151dbbd2d162742008d9088ad03a38f495e | [
"MIT"
] | 1 | 2018-10-27T09:02:13.000Z | 2018-10-27T09:02:13.000Z | file_hash.py | XVicarious/file_hash | ebab0151dbbd2d162742008d9088ad03a38f495e | [
"MIT"
] | null | null | null | """Hash your files for easy identification."""
import hashlib
import logging
import os
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from typing import Dict
from flexget import plugin
from flexget.event import event
from flexget.logger import FlexGetLogger
from .cunit import IECUnit
PLUGIN_ID = 'file_hash'
log: FlexGetLogger = logging.getLogger(PLUGIN_ID)
class FileHashPlugin(object):
"""
Task class that does the hashing.
By default file_hash will:
- Use blake2b if it is available, otherwise it will use MD5
- Start at 50MiB into the file
- If the file is less than 50MiB, it starts at the beginning
- Hashes 25MiB of the file after the starting point
- If the file does not have 25MiB after the starting point, it will hash from the starting point to the end
- Choose MAX two 'size', 'start', 'stop'
Examples:
# Use file_hash with the default settings.
file_hash: yes
# Use sha1 with the rest of the default settings
file_hash: sha1
# Hash 1MiB, 25MiB into the file with algorithm SHA256
file_hash:
algorithm: sha256
size: 1
start: 25
# Hash from 25MiB in to 35MiB in
file_hash:
start: 25
stop: 35
"""
@staticmethod
def __default_algo():
return 'blake2b' if 'blake2b' in hashlib.algorithms_available else 'md5'
hash_size_default = 25
hash_start_default = 50
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string',
'enum': list(hashlib.algorithms_available)},
{'type': 'object',
'properties': {
'algorithm': {
'type': 'string',
'enum': list(hashlib.algorithms_available)},
'size': {'type': 'integer', 'default': hash_size_default},
'start': {'type': 'integer', 'default': hash_start_default},
'stop': {'type': 'integer'},
'time': {'type': 'boolean', 'default': 'boolean'}}},
],
}
plugin_fields = {'file_hash_type', 'file_hash_hash', 'file_hash_modified', 'file_hash_bytes'}
@staticmethod
def __strict_boolean(check):
if isinstance(check, bool) and check:
return True
return False
def __get_algo(self, config):
return self.__default_algo()
def compare_entry(self, entry, config):
if 'file_hash' in entry:
file_hash = entry['file_hash']
match_algo = file_hash.algorithm == self.__get_algo(config)
match_file_size = file_hash.file_size == os.path.getsize(entry['location'])
match_modified = file_hash.modified == os.path.getmtime(entry['location'])
match_start = file_hash.start == config.get('start')
match_stop = file_hash.stop == config.get('stop')
match_chunk_size = file_hash.chunk_size == config.get('size')
match_strict = match_file_size and match_start and match_stop and match_chunk_size
if match_algo and match_strict:
return True
return False
def on_task_metainfo(self, task, config):
"""Call the plugin."""
log.info('Starting file_hash')
# todo: add conditions to adapt to users' configuration
if self.__strict_boolean(config):
config = {True}
hash_portion = {
'algorithm': self.__get_algo(config),
'size': IECUnit.MiB * (config['size'] if 'size' in config else self.hash_size_default),
'start': IECUnit.MiB * (config['start'] if 'start' in config else self.hash_start_default),
'stop': IECUnit.MiB * (config['stop'] if 'stop' in config else -1),
}
hasher = hashlib.new(hash_portion['algorithm'])
log.verbose('Hasing with algorithm: %s', hash_portion['algorithm'])
log.debug('Hashing %s MiB of each file.', hash_portion['size'])
log.debug('Hashing starting %s MiB into file.', hash_portion['start'])
log.debug('Hashing ending at %s MiB.', hash_portion['stop'])
len_entries = len(task.entries)
idx = 0
for entry in task.entries:
idx += 1
file_size = os.path.getsize(entry['location'])
if self.compare_entry(entry, config):
log.verbose('This file seems to be unmodified, skipping')
continue
log.verbose('%s/%s: Hasing %s', idx, len_entries, entry['location'])
current_hasher = hasher.copy()
tmp_hash_portion_start = -1
if file_size < hash_portion['start']:
log.debug('The file is only %s MiB, adjusting start location.', float(file_size / IECUnit.MiB))
if file_size < hash_portion['size']:
log.debug('The file is less than the set size to to hash, setting start position to 0')
tmp_hash_portion_start = 0
else:
tmp_hash_portion_start = file_size - hash_portion['size']
log.debug('The size of the file is greater than the set size to hash, \
setting start position to %s MiB', tmp_hash_portion_start)
with open(entry['location'], 'rb') as to_hash:
to_hash.seek(tmp_hash_portion_start if tmp_hash_portion_start > -1 else hash_portion['start'])
piece = to_hash.read(hash_portion['size'])
current_hasher.update(piece)
file_digest = current_hasher.hexdigest()
file_modified = os.path.getmtime(entry['location'])
filehash = FileHash(hash_portion, file_digest, file_modified, file_size)
entry['file_hash'] = filehash
log.debug(filehash)
to_hash.close()
class FileHash(object):
"""Store the information from the hashing."""
algorithm = None
file_hash = None
modified = None
start = None
stop = None
chunk_size = None
size = None
def __init__(self, config_settings: Dict, file_hash, modified, size):
"""
Initialize a FileHash object.
config_settings -- ends up being the config for the plugin
file_hash -- the hash of the file
modified -- last time the file was modified
size -- size of the file in bytes
"""
self.algorithm = config_settings['algorithm']
self.start = config_settings['start']
self.stop = config_settings['stop']
self.chunk_size = config_settings['size']
self.file_hash = file_hash
self.modified = modified
self.size = size
def __eq__(self, other):
return isinstance(other, FileHash) and\
self.algorithm == other.algorithm and\
self.file_hash == other.file_hash
def __repr__(self):
"""Represent a FileHash."""
return """<FileHash: \
algorithm={0}, \
start={1}, stop={2}, \
chunk_size={3}, \
file_hash={4}, \
modified={5}, \
size={6}""".format(
self.algorithm,
self.start, self.stop,
self.chunk_size,
self.file_hash,
self.modified,
self.size)
@event('plugin.register')
def register_plugin():
plugin.register(FileHashPlugin, PLUGIN_ID, api_ver=2, interfaces=['task', 'series_metainfo', 'movie_metainfo'])
| 37.127451 | 115 | 0.591761 |
import hashlib
import logging
import os
from builtins import *
from typing import Dict
from flexget import plugin
from flexget.event import event
from flexget.logger import FlexGetLogger
from .cunit import IECUnit
PLUGIN_ID = 'file_hash'
log: FlexGetLogger = logging.getLogger(PLUGIN_ID)
class FileHashPlugin(object):
@staticmethod
def __default_algo():
return 'blake2b' if 'blake2b' in hashlib.algorithms_available else 'md5'
hash_size_default = 25
hash_start_default = 50
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string',
'enum': list(hashlib.algorithms_available)},
{'type': 'object',
'properties': {
'algorithm': {
'type': 'string',
'enum': list(hashlib.algorithms_available)},
'size': {'type': 'integer', 'default': hash_size_default},
'start': {'type': 'integer', 'default': hash_start_default},
'stop': {'type': 'integer'},
'time': {'type': 'boolean', 'default': 'boolean'}}},
],
}
plugin_fields = {'file_hash_type', 'file_hash_hash', 'file_hash_modified', 'file_hash_bytes'}
@staticmethod
def __strict_boolean(check):
if isinstance(check, bool) and check:
return True
return False
def __get_algo(self, config):
return self.__default_algo()
def compare_entry(self, entry, config):
if 'file_hash' in entry:
file_hash = entry['file_hash']
match_algo = file_hash.algorithm == self.__get_algo(config)
match_file_size = file_hash.file_size == os.path.getsize(entry['location'])
match_modified = file_hash.modified == os.path.getmtime(entry['location'])
match_start = file_hash.start == config.get('start')
match_stop = file_hash.stop == config.get('stop')
match_chunk_size = file_hash.chunk_size == config.get('size')
match_strict = match_file_size and match_start and match_stop and match_chunk_size
if match_algo and match_strict:
return True
return False
def on_task_metainfo(self, task, config):
log.info('Starting file_hash')
if self.__strict_boolean(config):
config = {True}
hash_portion = {
'algorithm': self.__get_algo(config),
'size': IECUnit.MiB * (config['size'] if 'size' in config else self.hash_size_default),
'start': IECUnit.MiB * (config['start'] if 'start' in config else self.hash_start_default),
'stop': IECUnit.MiB * (config['stop'] if 'stop' in config else -1),
}
hasher = hashlib.new(hash_portion['algorithm'])
log.verbose('Hasing with algorithm: %s', hash_portion['algorithm'])
log.debug('Hashing %s MiB of each file.', hash_portion['size'])
log.debug('Hashing starting %s MiB into file.', hash_portion['start'])
log.debug('Hashing ending at %s MiB.', hash_portion['stop'])
len_entries = len(task.entries)
idx = 0
for entry in task.entries:
idx += 1
file_size = os.path.getsize(entry['location'])
if self.compare_entry(entry, config):
log.verbose('This file seems to be unmodified, skipping')
continue
log.verbose('%s/%s: Hasing %s', idx, len_entries, entry['location'])
current_hasher = hasher.copy()
tmp_hash_portion_start = -1
if file_size < hash_portion['start']:
log.debug('The file is only %s MiB, adjusting start location.', float(file_size / IECUnit.MiB))
if file_size < hash_portion['size']:
log.debug('The file is less than the set size to to hash, setting start position to 0')
tmp_hash_portion_start = 0
else:
tmp_hash_portion_start = file_size - hash_portion['size']
log.debug('The size of the file is greater than the set size to hash, \
setting start position to %s MiB', tmp_hash_portion_start)
with open(entry['location'], 'rb') as to_hash:
to_hash.seek(tmp_hash_portion_start if tmp_hash_portion_start > -1 else hash_portion['start'])
piece = to_hash.read(hash_portion['size'])
current_hasher.update(piece)
file_digest = current_hasher.hexdigest()
file_modified = os.path.getmtime(entry['location'])
filehash = FileHash(hash_portion, file_digest, file_modified, file_size)
entry['file_hash'] = filehash
log.debug(filehash)
to_hash.close()
class FileHash(object):
algorithm = None
file_hash = None
modified = None
start = None
stop = None
chunk_size = None
size = None
def __init__(self, config_settings: Dict, file_hash, modified, size):
self.algorithm = config_settings['algorithm']
self.start = config_settings['start']
self.stop = config_settings['stop']
self.chunk_size = config_settings['size']
self.file_hash = file_hash
self.modified = modified
self.size = size
def __eq__(self, other):
return isinstance(other, FileHash) and\
self.algorithm == other.algorithm and\
self.file_hash == other.file_hash
def __repr__(self):
return """<FileHash: \
algorithm={0}, \
start={1}, stop={2}, \
chunk_size={3}, \
file_hash={4}, \
modified={5}, \
size={6}""".format(
self.algorithm,
self.start, self.stop,
self.chunk_size,
self.file_hash,
self.modified,
self.size)
@event('plugin.register')
def register_plugin():
plugin.register(FileHashPlugin, PLUGIN_ID, api_ver=2, interfaces=['task', 'series_metainfo', 'movie_metainfo'])
| true | true |
f72585050c810ea13194c609b96660990583ebbd | 2,046 | py | Python | Day-04/part2.py | archanpatkar/advent2021 | 8e0780cd28b5825af092e4ba8e3d9cd1059bce92 | [
"MIT"
] | null | null | null | Day-04/part2.py | archanpatkar/advent2021 | 8e0780cd28b5825af092e4ba8e3d9cd1059bce92 | [
"MIT"
] | null | null | null | Day-04/part2.py | archanpatkar/advent2021 | 8e0780cd28b5825af092e4ba8e3d9cd1059bce92 | [
"MIT"
] | null | null | null | import sys
sys.path.append("..")
from common import *
def parse(d):
temp = d.strip().split("\n")
first = tuple([int(n) for n in temp[0].strip().split(",")])
second = []
temp2 = []
print(temp)
for r in temp[1:]:
# print(r)
if(len(r) == 0):
second.append(tuple(temp2))
temp2 = []
else:
temp2.append(tuple([int(n) for n in r.split(" ") if(len(n) > 0)]))
second.append(tuple(temp2))
print(first)
print(second)
return (first,second)
data = aoci(parse)
boards = [b for b in data[1] if len(b) > 0]
print("boards")
print(boards)
print("-----------------------------")
pos = {b:{} for b in boards}
winners = []
winners2 = []
winner = None
i = 0
draw = None
found = None
for draw in data[0]:
print("Draw:",draw)
for board in boards:
for l in range(len(board)):
if draw in board[l]:
if not pos[board].get((l,board[l].index(draw))):
pos[board][(l,board[l].index(draw))] = []
pos[board][(l,board[l].index(draw))] = draw
nd = {b:{} for b in pos}
for b in boards:
if(not(b in winners2)):
for i in range(5):
nd[b][(i,0)] = 0
nd[b][(0,i)] = 0
nd[b][(0,0,"C")] = 0
for k in pos[b]:
nd[b][(k[0],0)] += 1
if(k[1] == 0):
nd[b][(0,0,"C")] += 1
else: nd[b][(0,k[1])] += 1
for s in nd[b]:
if(nd[b][s] >= 5):
found = (b,nd[b],pos[b],draw)
winners.append(found)
winners2.append(b)
break
if len(winners) == len(boards):
break
if(winner == None):
winner = winners[-1]
pprint(winner,indent=4)
sum = 0
for i in range(5):
for j in range(5):
if not((i,j) in winner[2]):
print(winner[0][i][j])
sum += winner[0][i][j]
print("output")
print(sum)
print(draw)
print(sum * winner[-1]) | 25.575 | 78 | 0.451124 | import sys
sys.path.append("..")
from common import *
def parse(d):
temp = d.strip().split("\n")
first = tuple([int(n) for n in temp[0].strip().split(",")])
second = []
temp2 = []
print(temp)
for r in temp[1:]:
if(len(r) == 0):
second.append(tuple(temp2))
temp2 = []
else:
temp2.append(tuple([int(n) for n in r.split(" ") if(len(n) > 0)]))
second.append(tuple(temp2))
print(first)
print(second)
return (first,second)
data = aoci(parse)
boards = [b for b in data[1] if len(b) > 0]
print("boards")
print(boards)
print("-----------------------------")
pos = {b:{} for b in boards}
winners = []
winners2 = []
winner = None
i = 0
draw = None
found = None
for draw in data[0]:
print("Draw:",draw)
for board in boards:
for l in range(len(board)):
if draw in board[l]:
if not pos[board].get((l,board[l].index(draw))):
pos[board][(l,board[l].index(draw))] = []
pos[board][(l,board[l].index(draw))] = draw
nd = {b:{} for b in pos}
for b in boards:
if(not(b in winners2)):
for i in range(5):
nd[b][(i,0)] = 0
nd[b][(0,i)] = 0
nd[b][(0,0,"C")] = 0
for k in pos[b]:
nd[b][(k[0],0)] += 1
if(k[1] == 0):
nd[b][(0,0,"C")] += 1
else: nd[b][(0,k[1])] += 1
for s in nd[b]:
if(nd[b][s] >= 5):
found = (b,nd[b],pos[b],draw)
winners.append(found)
winners2.append(b)
break
if len(winners) == len(boards):
break
if(winner == None):
winner = winners[-1]
pprint(winner,indent=4)
sum = 0
for i in range(5):
for j in range(5):
if not((i,j) in winner[2]):
print(winner[0][i][j])
sum += winner[0][i][j]
print("output")
print(sum)
print(draw)
print(sum * winner[-1]) | true | true |
f7258523558bf650489ce11f89d5a70fe2400656 | 1,042 | py | Python | dts_test_project/dts_test_app/migrations/0001_initial.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 2 | 2018-08-14T07:37:06.000Z | 2018-09-27T11:20:54.000Z | dts_test_project/dts_test_app/migrations/0001_initial.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 1 | 2021-01-25T09:48:27.000Z | 2021-01-25T09:48:27.000Z | dts_test_project/dts_test_app/migrations/0001_initial.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 3 | 2018-08-14T07:37:08.000Z | 2021-04-24T07:52:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DummyModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ModelWithFkToPublicUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| 28.944444 | 114 | 0.56238 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DummyModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ModelWithFkToPublicUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| true | true |
f7258581d63de21fec44899869d8d610223ca22b | 2,803 | py | Python | src/redgrease/cluster.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
] | null | null | null | src/redgrease/cluster.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
] | null | null | null | src/redgrease/cluster.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
] | null | null | null | from typing import Callable, Dict
import rediscluster
import rediscluster.exceptions
import redgrease.client
import redgrease.data
import redgrease.utils
class RedisCluster(rediscluster.RedisCluster):
"""RedisCluster client class, with support for gears features
Behaves exactly like the rediscluster.RedisCluster client, but is extended with
a 'gears' property fo executiong Gears commands.
Attributes:
gears (redgrease.client.Gears):
Gears command client.
"""
# States how target node is selected for cluster commands:
# - blocked : command is not allowed - Raises a RedisClusterException
# - random : excuted on one randomly selected node
# - all-masters : executed on all master node
# - all-nodes : executed on all nodes
# - slot-id : executed on the node defined by the second argument
NODES_FLAGS = {
**rediscluster.RedisCluster.NODES_FLAGS,
**{
"RG.INFOCLUSTER": "random",
"RG.PYSTATS": "all-nodes",
"RG.PYDUMPREQS": "random",
"RG.REFRESHCLUSTER": "all-nodes",
"RG.DUMPEXECUTIONS": "random",
"RG.DUMPREGISTRATIONS": "random",
},
}
# Not to be confused with redis.Redis.RESPONSE_CALLBACKS
# RESULT_CALLBACKS is special to rediscluster.RedisCluster.
# It decides how results of commands defined in `NODES_FLAGS` are aggregated into
# a final response, **after** redis.Redis.RESPONSE_CALLBACKS as been applied to
# each response individually.
RESULT_CALLBACKS = {
**rediscluster.RedisCluster.RESULT_CALLBACKS,
**{
"RG.INFOCLUSTER": lambda _, res: next(iter(res.values())),
"RG.PYSTATS": lambda _, res: res,
"RG.PYDUMPREQS": lambda _, res: next(iter(res.values())),
"RG.REFRESHCLUSTER": lambda _, res: all(res.values()),
},
}
RESPONSE_CALLBACKS: Dict[str, Callable] = {
**rediscluster.RedisCluster.RESPONSE_CALLBACKS,
**redgrease.client.Gears.RESPONSE_CALLBACKS,
}
def __init__(self, *args, **kwargs):
"""Instantiate a redis cluster client, with gears features"""
self._gears = None
self.connection = None
super().__init__(*args, **kwargs)
@property
def gears(self) -> redgrease.client.Gears:
"""Gears client, exposing gears commands
Returns:
Gears:
Gears client
"""
if not self._gears:
self._gears = redgrease.client.Gears(self)
return self._gears
def RedisGears(*args, **kwargs):
try:
return RedisCluster(*args, **kwargs)
except (AttributeError, rediscluster.exceptions.RedisClusterException):
return redgrease.client.Redis(*args, **kwargs)
| 32.593023 | 85 | 0.642526 | from typing import Callable, Dict
import rediscluster
import rediscluster.exceptions
import redgrease.client
import redgrease.data
import redgrease.utils
class RedisCluster(rediscluster.RedisCluster):
NODES_FLAGS = {
**rediscluster.RedisCluster.NODES_FLAGS,
**{
"RG.INFOCLUSTER": "random",
"RG.PYSTATS": "all-nodes",
"RG.PYDUMPREQS": "random",
"RG.REFRESHCLUSTER": "all-nodes",
"RG.DUMPEXECUTIONS": "random",
"RG.DUMPREGISTRATIONS": "random",
},
}
RESULT_CALLBACKS = {
**rediscluster.RedisCluster.RESULT_CALLBACKS,
**{
"RG.INFOCLUSTER": lambda _, res: next(iter(res.values())),
"RG.PYSTATS": lambda _, res: res,
"RG.PYDUMPREQS": lambda _, res: next(iter(res.values())),
"RG.REFRESHCLUSTER": lambda _, res: all(res.values()),
},
}
RESPONSE_CALLBACKS: Dict[str, Callable] = {
**rediscluster.RedisCluster.RESPONSE_CALLBACKS,
**redgrease.client.Gears.RESPONSE_CALLBACKS,
}
def __init__(self, *args, **kwargs):
self._gears = None
self.connection = None
super().__init__(*args, **kwargs)
@property
def gears(self) -> redgrease.client.Gears:
if not self._gears:
self._gears = redgrease.client.Gears(self)
return self._gears
def RedisGears(*args, **kwargs):
try:
return RedisCluster(*args, **kwargs)
except (AttributeError, rediscluster.exceptions.RedisClusterException):
return redgrease.client.Redis(*args, **kwargs)
| true | true |
f72587485c282a74d27d67de475830aad98af691 | 1,316 | py | Python | elpips/util.py | niopeng/elpips | 385012a2ee614c75a1631546c391039af85744f4 | [
"BSD-2-Clause"
] | 88 | 2019-06-13T10:42:26.000Z | 2022-03-30T07:36:20.000Z | elpips/util.py | niopeng/elpips | 385012a2ee614c75a1631546c391039af85744f4 | [
"BSD-2-Clause"
] | 4 | 2019-11-13T23:11:33.000Z | 2021-07-21T11:04:08.000Z | elpips/util.py | niopeng/elpips | 385012a2ee614c75a1631546c391039af85744f4 | [
"BSD-2-Clause"
] | 18 | 2019-06-10T16:31:10.000Z | 2022-01-04T03:48:57.000Z | import tensorflow as tf
import numpy as np
def switch_case_cond(cases, default_case):
if cases:
condition, effect = cases[0]
return tf.cond(condition, effect, lambda: switch_case_cond(cases[1:], default_case))
return default_case()
def switch_case_where(cases, default_case):
if cases:
condition, effect = cases[0]
return tf.where(condition, effect, switch_case_where(cases[1:], default_case))
return default_case
def np_dtype(tf_dtype):
if tf_dtype == tf.float32:
return np.float32
if tf_dtype == tf.float64:
return np.float64
raise Exception('Unsupported dtype')
def f32_to_dtype(x, dtype):
if dtype == tf.float32:
return x
return tf.cast(x, dtype)
def as_tuple(x):
'''Formats x as a tuple. If x is already a tuple returns it as is, otherwise returns a 1-tuple containing x.'''
if isinstance(x, tuple):
return x
else:
return (x,)
def for_each(x, func):
'''Runs 'func' for x, or each item of x if x is a tuple. Returns the results in the same format.'''
if isinstance(x, tuple):
return tuple((func(s) for s in x))
else:
return func(x)
def for_tuple(x, func):
'''Runs 'func' for as_tuple(x). Returns the results in the original format. Assumes 'func' returns tuple when given tuple.'''
if isinstance(x, tuple):
return func(x)
else:
return func(as_tuple(x))[0]
| 25.803922 | 126 | 0.715046 | import tensorflow as tf
import numpy as np
def switch_case_cond(cases, default_case):
if cases:
condition, effect = cases[0]
return tf.cond(condition, effect, lambda: switch_case_cond(cases[1:], default_case))
return default_case()
def switch_case_where(cases, default_case):
if cases:
condition, effect = cases[0]
return tf.where(condition, effect, switch_case_where(cases[1:], default_case))
return default_case
def np_dtype(tf_dtype):
if tf_dtype == tf.float32:
return np.float32
if tf_dtype == tf.float64:
return np.float64
raise Exception('Unsupported dtype')
def f32_to_dtype(x, dtype):
if dtype == tf.float32:
return x
return tf.cast(x, dtype)
def as_tuple(x):
if isinstance(x, tuple):
return x
else:
return (x,)
def for_each(x, func):
if isinstance(x, tuple):
return tuple((func(s) for s in x))
else:
return func(x)
def for_tuple(x, func):
if isinstance(x, tuple):
return func(x)
else:
return func(as_tuple(x))[0]
| true | true |
f72587bcc4673fe9fd1a8a3ffec34ab4d833f88b | 5,113 | py | Python | src/estimagic/benchmarking/run_benchmark.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 7 | 2019-05-11T07:19:46.000Z | 2019-05-31T07:03:13.000Z | src/estimagic/benchmarking/run_benchmark.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 14 | 2019-05-04T14:15:52.000Z | 2019-06-10T11:45:27.000Z | src/estimagic/benchmarking/run_benchmark.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 1 | 2019-05-21T08:44:37.000Z | 2019-05-21T08:44:37.000Z | """Functions to create, run and visualize optimization benchmarks.
TO-DO:
- Add other benchmark sets:
- finish medium scale problems from https://arxiv.org/pdf/1710.11005.pdf, Page 34.
- add scalar problems from https://github.com/AxelThevenot
- Add option for deterministic noise or wiggle.
"""
from pathlib import Path
import numpy as np
from estimagic import batch_evaluators
from estimagic.logging.read_log import read_optimization_histories
from estimagic.optimization.optimize import minimize
def run_benchmark(
problems,
optimize_options,
logging_directory,
*,
batch_evaluator="joblib",
n_cores=1,
error_handling="continue",
fast_logging=True,
seed=None,
):
"""Run problems with different optimize options.
Args:
problems (dict): Nested dictionary with benchmark problems of the structure:
{"name": {"inputs": {...}, "solution": {...}, "info": {...}}}
where "inputs" are keyword arguments for ``minimize`` such as the criterion
function and start parameters. "solution" contains the entries "params" and
"value" and "info" might contain information about the test problem.
optimize_options (list or dict): Either a list of algorithms or a Nested
dictionary that maps a name for optimizer settings
(e.g. ``"lbfgsb_strict_criterion"``) to a dictionary of keyword arguments
for arguments for ``minimize`` (e.g. ``{"algorithm": "scipy_lbfgsb",
"algo_options": {"convergence.relative_criterion_tolerance": 1e-12}}``).
Alternatively, the values can just be an algorithm which is then benchmarked
at default settings.
batch_evaluator (str or callable): See :ref:`batch_evaluators`.
logging_directory (pathlib.Path): Directory in which the log databases are
saved.
n_cores (int): Number of optimizations that is run in parallel. Note that in
addition to that an optimizer might parallelize.
error_handling (str): One of "raise", "continue".
fast_logging (bool): Whether the slightly unsafe but much faster database
configuration is chosen.
Returns:
dict: Nested Dictionary with information on the benchmark run. The outer keys
are tuples where the first entry is the name of the problem and the second
the name of the optimize options. The values are dicts with the entries:
"runtime", "params_history", "criterion_history", "solution"
"""
np.random.seed(seed)
logging_directory = Path(logging_directory)
logging_directory.mkdir(parents=True, exist_ok=True)
if isinstance(batch_evaluator, str):
batch_evaluator = getattr(
batch_evaluators, f"{batch_evaluator}_batch_evaluator"
)
opt_options = _process_optimize_options(optimize_options)
log_options = {"fast_logging": fast_logging, "if_table_exists": "replace"}
kwargs_list = []
names = []
for prob_name, problem in problems.items():
for option_name, options in opt_options.items():
kwargs = {
**options,
**problem["inputs"],
"logging": logging_directory / f"{prob_name}_{option_name}.db",
"log_options": log_options,
}
kwargs_list.append(kwargs)
names.append((prob_name, option_name))
log_paths = [kwargs["logging"] for kwargs in kwargs_list]
raw_results = batch_evaluator(
func=minimize,
arguments=kwargs_list,
n_cores=n_cores,
error_handling=error_handling,
unpack_symbol="**",
)
results = {}
for name, result, log_path in zip(names, raw_results, log_paths):
histories = read_optimization_histories(log_path)
stop = histories["metadata"]["timestamps"].max()
start = histories["metadata"]["timestamps"].min()
runtime = (stop - start).total_seconds()
results[name] = {
"params_history": histories["params"],
"criterion_history": histories["values"],
"time_history": histories["metadata"]["timestamps"] - start,
"solution": result,
"runtime": runtime,
}
return results
def _process_optimize_options(raw_options):
if not isinstance(raw_options, dict):
dict_options = {}
for option in raw_options:
if isinstance(option, str):
dict_options[option] = option
else:
dict_options[option.__name__] = option
else:
dict_options = raw_options
out_options = {}
for name, option in dict_options.items():
if not isinstance(option, dict):
option = {"algorithm": option}
if "log_options" in option:
raise ValueError(
"Log options cannot be specified as part of optimize_options. Logging "
"behavior is configured by the run_benchmark function."
)
out_options[name] = option
return out_options
| 37.050725 | 88 | 0.643067 | from pathlib import Path
import numpy as np
from estimagic import batch_evaluators
from estimagic.logging.read_log import read_optimization_histories
from estimagic.optimization.optimize import minimize
def run_benchmark(
problems,
optimize_options,
logging_directory,
*,
batch_evaluator="joblib",
n_cores=1,
error_handling="continue",
fast_logging=True,
seed=None,
):
np.random.seed(seed)
logging_directory = Path(logging_directory)
logging_directory.mkdir(parents=True, exist_ok=True)
if isinstance(batch_evaluator, str):
batch_evaluator = getattr(
batch_evaluators, f"{batch_evaluator}_batch_evaluator"
)
opt_options = _process_optimize_options(optimize_options)
log_options = {"fast_logging": fast_logging, "if_table_exists": "replace"}
kwargs_list = []
names = []
for prob_name, problem in problems.items():
for option_name, options in opt_options.items():
kwargs = {
**options,
**problem["inputs"],
"logging": logging_directory / f"{prob_name}_{option_name}.db",
"log_options": log_options,
}
kwargs_list.append(kwargs)
names.append((prob_name, option_name))
log_paths = [kwargs["logging"] for kwargs in kwargs_list]
raw_results = batch_evaluator(
func=minimize,
arguments=kwargs_list,
n_cores=n_cores,
error_handling=error_handling,
unpack_symbol="**",
)
results = {}
for name, result, log_path in zip(names, raw_results, log_paths):
histories = read_optimization_histories(log_path)
stop = histories["metadata"]["timestamps"].max()
start = histories["metadata"]["timestamps"].min()
runtime = (stop - start).total_seconds()
results[name] = {
"params_history": histories["params"],
"criterion_history": histories["values"],
"time_history": histories["metadata"]["timestamps"] - start,
"solution": result,
"runtime": runtime,
}
return results
def _process_optimize_options(raw_options):
if not isinstance(raw_options, dict):
dict_options = {}
for option in raw_options:
if isinstance(option, str):
dict_options[option] = option
else:
dict_options[option.__name__] = option
else:
dict_options = raw_options
out_options = {}
for name, option in dict_options.items():
if not isinstance(option, dict):
option = {"algorithm": option}
if "log_options" in option:
raise ValueError(
"Log options cannot be specified as part of optimize_options. Logging "
"behavior is configured by the run_benchmark function."
)
out_options[name] = option
return out_options
| true | true |
f725895b361675e864ed1a5ce6a8bc79a831c085 | 6,172 | py | Python | Data_Handling/Data_analyze.py | KristofferLM96/TsetlinMachine-GO | 926091fc70042abe5a67230932398bdab2c46328 | [
"MIT"
] | 2 | 2020-02-27T16:22:08.000Z | 2020-03-22T11:04:35.000Z | Data_Handling/Data_analyze.py | KristofferLM96/TsetlinMachine-GO | 926091fc70042abe5a67230932398bdab2c46328 | [
"MIT"
] | null | null | null | Data_Handling/Data_analyze.py | KristofferLM96/TsetlinMachine-GO | 926091fc70042abe5a67230932398bdab2c46328 | [
"MIT"
] | null | null | null | # -----------------------------------------------
# ................. LIBRARIES ...................
# -----------------------------------------------
import glob
import os
import time
import numpy as np
# -----------------------------------------------
# ............. GLOBAL VARIABLES ................
# -----------------------------------------------
name = "100_9x9Aya" # 9x9Natsukaze || 9x9Aya || x_9x9Aya .. x = amount moves
file_name = name + "_binary.txt"
binary_path = "Data/Binary/" + file_name
original_path = "/home/kristoffer/Documents/Data/Original/9x9_10k_r104_144x20k/*"
encoding = "UTF-8" # ISO-8859-1 / UTF-8
multiple_files = True
unique_list = []
original_list = []
# [check_handicap(), check_duplication(), get_result_ratio(), check_moves(), remove_empty_lines()]
run_programs = [0, 0, 1, 0, 0]
# -----------------------------------------------
# ................. FUNCTIONS ...................
# -----------------------------------------------
def remove_empty_lines():
output_file = open("Data/Binary/" + name + "_binary_1.txt", "w+")
with open(binary_path, "r") as file:
for line in file:
if not line.isspace():
output_file.write(line)
def check_handicap():
file = open(original_path, 'r', encoding=encoding)
file_lines = file.readlines()
_handicap = file_lines[0].split("HA[")
print(_handicap)
handicap = _handicap[1][0]
print(handicap)
file.close()
def check_duplication():
file = open(binary_path, 'r', encoding=encoding)
global original_list
global unique_list
original_list = [line.strip() for line in file]
print("Original List Length:", len(original_list))
original_length = len(original_list)
unique_list = np.unique(original_list)
unique_length = len(unique_list)
print("Unique List Length:", unique_length)
print("Original - Unique:", original_length - unique_length, "\n")
file.close()
def get_result_ratio():
win = open("Data/Results-Split/" + name + "_win.txt", 'r')
loss = open("Data/Results-Split/" + name + "_loss.txt", 'r')
draw = open("Data/Results-Split/" + name + "_draw.txt", 'r')
win_amount = len(win.readlines())
loss_amount = len(loss.readlines())
draw_amount = len(draw.readlines())
total_amount = win_amount + loss_amount + draw_amount
print("Total Amount:", total_amount)
print("Amount of wins:", win_amount, ",", round(((win_amount * 100) / total_amount), 2), "%")
print("Amount of loss:", loss_amount, ",", round(((loss_amount * 100) / total_amount), 2), "%")
print("Amount of draw:", draw_amount, ",", round(((draw_amount * 100) / total_amount), 2), "%")
win.close()
loss.close()
draw.close()
def check_moves():
total_pos = 19
moves_list = []
def get_moves(_game_lines):
if "HA[" in _game_lines[0]:
handicap = int(_game_lines[0].split("HA[")[1][0])
else:
handicap = 0
_move_list = []
const = 4
for row in _game_lines[1:-1]:
x = translate(row[3])
y = translate(row[4])
if row[1] + row[2] == "AB":
for i in range(handicap):
x = translate(row[4 + (i * const)])
y = translate(row[5 + (i * const)])
_move = ["b", x, y]
if x != total_pos and y != total_pos:
_move_list.append(_move)
else:
if row[1] == "B":
_move = ["b", x, y]
if row[1] == "W":
_move = ["w", x, y]
if x != total_pos and y != total_pos:
_move_list.append(_move)
return _move_list
def translate(i):
if i == "a":
return 0
if i == "b":
return 1
if i == "c":
return 2
if i == "d":
return 3
if i == "e":
return 4
if i == "f":
return 5
if i == "g":
return 6
if i == "h":
return 7
if i == "i":
return 8
if i == "j":
return 9
if i == "k":
return 10
if i == "l":
return 11
if i == "m":
return 12
if i == "n":
return 13
if i == "o":
return 14
if i == "p":
return 15
if i == "q":
return 16
if i == "r":
return 17
if i == "s":
return 18
if i == "t":
return 19
counter = 1
total_files = len(glob.glob(os.path.join(original_path, '*.sgf')))
for infile in glob.glob(os.path.join(original_path, '*.sgf')):
start_time = time.time()
file = open(infile, 'r', encoding="ISO-8859-1")
file_lines = file.readlines()
moves_list.append(len(get_moves(file_lines)))
print(infile)
print("Getting moves from file ", counter, "out of", total_files,
"files. ............................................... ",
round((counter / total_files * 100), 2), "% ............................................... ",
round((time.time() - start_time) * 1000, 2), "ms", "\n")
counter = counter + 1
file.close()
unique_moves_list, unique_moves_list_count = np.unique(moves_list, return_counts=True)
print(unique_moves_list, "\n")
print(unique_moves_list_count, "\n")
total_data = sum(unique_moves_list_count)
for x, y in np.nditer([unique_moves_list, unique_moves_list_count]):
print("Moves: %d : Amount: %d, %d %%" % (int(x), int(y), ((int(y)*100)/total_data)))
print("\n")
print("Unique Move lengths:", len(unique_moves_list))
# -----------------------------------------------
# .................. MAIN .......................
# -----------------------------------------------
if run_programs[0]:
check_handicap()
if run_programs[1]:
check_duplication()
if run_programs[2]:
get_result_ratio()
if run_programs[3]:
check_moves()
if run_programs[4]:
remove_empty_lines()
| 31.814433 | 108 | 0.487362 |
import glob
import os
import time
import numpy as np
name = "100_9x9Aya"
file_name = name + "_binary.txt"
binary_path = "Data/Binary/" + file_name
original_path = "/home/kristoffer/Documents/Data/Original/9x9_10k_r104_144x20k/*"
encoding = "UTF-8"
multiple_files = True
unique_list = []
original_list = []
run_programs = [0, 0, 1, 0, 0]
def remove_empty_lines():
output_file = open("Data/Binary/" + name + "_binary_1.txt", "w+")
with open(binary_path, "r") as file:
for line in file:
if not line.isspace():
output_file.write(line)
def check_handicap():
file = open(original_path, 'r', encoding=encoding)
file_lines = file.readlines()
_handicap = file_lines[0].split("HA[")
print(_handicap)
handicap = _handicap[1][0]
print(handicap)
file.close()
def check_duplication():
file = open(binary_path, 'r', encoding=encoding)
global original_list
global unique_list
original_list = [line.strip() for line in file]
print("Original List Length:", len(original_list))
original_length = len(original_list)
unique_list = np.unique(original_list)
unique_length = len(unique_list)
print("Unique List Length:", unique_length)
print("Original - Unique:", original_length - unique_length, "\n")
file.close()
def get_result_ratio():
win = open("Data/Results-Split/" + name + "_win.txt", 'r')
loss = open("Data/Results-Split/" + name + "_loss.txt", 'r')
draw = open("Data/Results-Split/" + name + "_draw.txt", 'r')
win_amount = len(win.readlines())
loss_amount = len(loss.readlines())
draw_amount = len(draw.readlines())
total_amount = win_amount + loss_amount + draw_amount
print("Total Amount:", total_amount)
print("Amount of wins:", win_amount, ",", round(((win_amount * 100) / total_amount), 2), "%")
print("Amount of loss:", loss_amount, ",", round(((loss_amount * 100) / total_amount), 2), "%")
print("Amount of draw:", draw_amount, ",", round(((draw_amount * 100) / total_amount), 2), "%")
win.close()
loss.close()
draw.close()
def check_moves():
total_pos = 19
moves_list = []
def get_moves(_game_lines):
if "HA[" in _game_lines[0]:
handicap = int(_game_lines[0].split("HA[")[1][0])
else:
handicap = 0
_move_list = []
const = 4
for row in _game_lines[1:-1]:
x = translate(row[3])
y = translate(row[4])
if row[1] + row[2] == "AB":
for i in range(handicap):
x = translate(row[4 + (i * const)])
y = translate(row[5 + (i * const)])
_move = ["b", x, y]
if x != total_pos and y != total_pos:
_move_list.append(_move)
else:
if row[1] == "B":
_move = ["b", x, y]
if row[1] == "W":
_move = ["w", x, y]
if x != total_pos and y != total_pos:
_move_list.append(_move)
return _move_list
def translate(i):
if i == "a":
return 0
if i == "b":
return 1
if i == "c":
return 2
if i == "d":
return 3
if i == "e":
return 4
if i == "f":
return 5
if i == "g":
return 6
if i == "h":
return 7
if i == "i":
return 8
if i == "j":
return 9
if i == "k":
return 10
if i == "l":
return 11
if i == "m":
return 12
if i == "n":
return 13
if i == "o":
return 14
if i == "p":
return 15
if i == "q":
return 16
if i == "r":
return 17
if i == "s":
return 18
if i == "t":
return 19
counter = 1
total_files = len(glob.glob(os.path.join(original_path, '*.sgf')))
for infile in glob.glob(os.path.join(original_path, '*.sgf')):
start_time = time.time()
file = open(infile, 'r', encoding="ISO-8859-1")
file_lines = file.readlines()
moves_list.append(len(get_moves(file_lines)))
print(infile)
print("Getting moves from file ", counter, "out of", total_files,
"files. ............................................... ",
round((counter / total_files * 100), 2), "% ............................................... ",
round((time.time() - start_time) * 1000, 2), "ms", "\n")
counter = counter + 1
file.close()
unique_moves_list, unique_moves_list_count = np.unique(moves_list, return_counts=True)
print(unique_moves_list, "\n")
print(unique_moves_list_count, "\n")
total_data = sum(unique_moves_list_count)
for x, y in np.nditer([unique_moves_list, unique_moves_list_count]):
print("Moves: %d : Amount: %d, %d %%" % (int(x), int(y), ((int(y)*100)/total_data)))
print("\n")
print("Unique Move lengths:", len(unique_moves_list))
if run_programs[0]:
check_handicap()
if run_programs[1]:
check_duplication()
if run_programs[2]:
get_result_ratio()
if run_programs[3]:
check_moves()
if run_programs[4]:
remove_empty_lines()
| true | true |
f725898cfbcf257a6620878c85d91d75a4f7b6bd | 11,001 | py | Python | SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_io.py | dmccloskey/SBaaS_rnasequencing | 521ad0b671b0bca02e9cebfc1b372f2265955418 | [
"MIT"
] | null | null | null | SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_io.py | dmccloskey/SBaaS_rnasequencing | 521ad0b671b0bca02e9cebfc1b372f2265955418 | [
"MIT"
] | null | null | null | SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_io.py | dmccloskey/SBaaS_rnasequencing | 521ad0b671b0bca02e9cebfc1b372f2265955418 | [
"MIT"
] | null | null | null | #system
import json
#sbaas
from .stage01_rnasequencing_genesCountTable_query import stage01_rnasequencing_genesCountTable_query
from .stage01_rnasequencing_analysis_query import stage01_rnasequencing_analysis_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from sequencing_analysis.genes_countFPKMattr_table import genes_countFPKMattr_table
from ddt_python.ddt_container_filterMenuAndChart2dAndTable import ddt_container_filterMenuAndChart2dAndTable
from ddt_python.ddt_container import ddt_container
from listDict.listDict import listDict
from math import log2
class stage01_rnasequencing_genesCountTable_io(
stage01_rnasequencing_genesCountTable_query,
stage01_rnasequencing_analysis_query,
sbaas_template_io):
def import_dataStage01RNASequencingGenesCountTable_add(
self,genes_count_table_dir,genes_fpkm_table_dir,
genes_attr_table_dir,
analysis_id_I,experiment_ids_I,samples_host_dirs_I,sample_names_I):
'''table adds'''
countFPKMattr = genes_countFPKMattr_table();
countFPKMattr.import_countTable(
filename_I=genes_count_table_dir,);
countFPKMattr.import_fpkmTable(
filename_I=genes_fpkm_table_dir,);
countFPKMattr.import_attrTable(
filename_I=genes_attr_table_dir,);
#parse the filenames and samplenames
sna2sns_I={};
sna2experimentID_I={};
sample_names_lst = sample_names_I.split(',');
experiment_ids_lst = experiment_ids_I.split(',');
for cnt,sample_replicates in enumerate(samples_host_dirs_I.split('|')):
sna2sns_I[sample_names_lst[cnt]] = [];
sna2experimentID_I[sample_names_lst[cnt]] = experiment_ids_lst[cnt];
for sample in sample_replicates.split(','):
filename = sample.split('/')[-1].replace('.bam','').replace('.fastq','');
sna2sns_I[sample_names_lst[cnt]].append(filename);
genesCountTable = countFPKMattr.alignAndReformat_countFPKMattrTables(
analysis_id_I = analysis_id_I,
sna2experimentID_I = sna2experimentID_I,
sna2sns_I = sna2sns_I)
self.add_dataStage01RNASequencingGenesCountTable(genesCountTable);
def import_dataStage01RNASequencingGenesCountTable_update(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_dataStage01RNASequencingGenesCountTable(data.data);
data.clear_data();
def export_dataStage01RNASequencingGenesCountTable_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a box and whiskers plot'''
# get the analysis information
experiment_ids,sample_names = [],[];
experiment_ids,sample_names = self.get_experimentIDAndSampleName_analysisID_dataStage01RNASequencingAnalysis(analysis_id_I);
data_O = [];
for sample_name_cnt,sample_name in enumerate(sample_names):
# query fpkm data:
fpkms = [];
fpkms = self.get_rows_experimentIDAndSampleName_dataStage01RNASequencingGenesCountTable(experiment_ids[sample_name_cnt],sample_name);
data_O.extend(fpkms);
# dump chart parameters to a js files
data1_keys = ['experiment_id','sample_name','gene_short_name'
];
data1_nestkeys = ['gene_short_name'];
data1_keymap = {'xdata':'gene_short_name',
'ydatamean':'FPKM',
'ydatalb':'FPKM_conf_lo',
'ydataub':'FPKM_conf_hi',
'serieslabel':'sample_name',
'featureslabel':'gene_short_name'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'boxandwhiskersplot2d_02',"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"gene","svgy1axislabel":"FPKM",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Custom box and whiskers plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'FPKM','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage01RNASequencingGenesCountTable_pairWisePlot_js(self,analysis_id_I,log2normalization_I=True,data_dir_I='tmp'):
'''Export data for a pairwise scatter plot
INPUT:
analysis_id = String, analysis_id
log2normalization_I = Boolean, apply a log2 normalization the FPKM values (default: True)
data_dir_I = string, data directory
OUTPUT:
'''
# get the analysis information
experiment_ids,sample_names = [],[];
experiment_ids,sample_names = self.get_experimentIDAndSampleName_analysisID_dataStage01RNASequencingAnalysis(analysis_id_I);
data_O = [];
for sample_name_cnt,sample_name in enumerate(sample_names):
# query fpkm data:
fpkms = [];
fpkms = self.get_rows_experimentIDAndSampleName_dataStage01RNASequencingGenesCountTable(experiment_ids[sample_name_cnt],sample_name);
if log2normalization_I:
for f in fpkms:
if f['FPKM'] == 0.0: f['FPKM'] = 0.0;
else: f['FPKM'] = log2(f['FPKM']);
data_O.extend(fpkms);
# reorganize the data
listdict = listDict(data_O);
data_O,columnValueHeader_O = listdict.convert_listDict2ColumnGroupListDict(
#value_labels_I = ['FPKM','FPKM_conf_lo','FPKM_conf_hi'],
value_labels_I = ['FPKM',],
column_labels_I = ['experiment_id','sample_name'],
feature_labels_I = ['gene_id','gene_short_name'],
na_str_I=0.0,
columnValueConnector_str_I='_',
);
# make the tile object
#data1 = filtermenu/table
data1_keymap_table = {
'xdata':'svd_method',
'ydata':'singular_value_index',
'zdata':'d_vector',
'rowslabel':'svd_method',
'columnslabel':'singular_value_index',
};
#data2 = svg
#if single plot, data2 = filter menu, data2, and table
data1_keys = ['gene_id','gene_short_name'
];
data1_nestkeys = ['gene_short_name'];
data1_keymap_svg = [];
svgtype = [];
svgtile2datamap = [];
data_svg_keymap = [];
for cnt1,column1 in enumerate(columnValueHeader_O):
for cnt2,column2 in enumerate(columnValueHeader_O[cnt1+1:]):
keymap = {
'xdata':column1,
'ydata':column2,
'serieslabel':'',
'featureslabel':'gene_short_name',
'tooltipdata':'gene_short_name',
};
data1_keymap_svg.append([keymap]);
data_svg_keymap.append(keymap);
svgtype.append('pcaplot2d_scores_01');
svgtile2datamap.append([0]);
nsvgtable = ddt_container_filterMenuAndChart2dAndTable();
nsvgtable.make_filterMenuAndChart2dAndTable(
data_filtermenu=data_O,
data_filtermenu_keys=data1_keys,
data_filtermenu_nestkeys=data1_nestkeys,
data_filtermenu_keymap=data1_keymap_table,
data_svg_keys=data1_keys,
data_svg_nestkeys=data1_nestkeys,
data_svg_keymap=data_svg_keymap,
data_table_keys=data1_keys,
data_table_nestkeys=data1_nestkeys,
data_table_keymap=data1_keymap_table,
data_svg=None,
data_table=None,
svgtype=svgtype,
tabletype='responsivetable_01',
svgx1axislabel='',
svgy1axislabel='',
tablekeymap = [data1_keymap_table],
svgkeymap = data1_keymap_svg,
formtile2datamap=[0],
tabletile2datamap=[0],
svgtile2datamap=svgtile2datamap,
svgfilters=None,
svgtileheader='Pair-wise scatter plot',
tablefilters=None,
tableheaders=None
);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = nsvgtable.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(nsvgtable.get_allObjects()); | 50.463303 | 248 | 0.634942 |
import json
from .stage01_rnasequencing_genesCountTable_query import stage01_rnasequencing_genesCountTable_query
from .stage01_rnasequencing_analysis_query import stage01_rnasequencing_analysis_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from sequencing_analysis.genes_countFPKMattr_table import genes_countFPKMattr_table
from ddt_python.ddt_container_filterMenuAndChart2dAndTable import ddt_container_filterMenuAndChart2dAndTable
from ddt_python.ddt_container import ddt_container
from listDict.listDict import listDict
from math import log2
class stage01_rnasequencing_genesCountTable_io(
stage01_rnasequencing_genesCountTable_query,
stage01_rnasequencing_analysis_query,
sbaas_template_io):
def import_dataStage01RNASequencingGenesCountTable_add(
self,genes_count_table_dir,genes_fpkm_table_dir,
genes_attr_table_dir,
analysis_id_I,experiment_ids_I,samples_host_dirs_I,sample_names_I):
countFPKMattr = genes_countFPKMattr_table();
countFPKMattr.import_countTable(
filename_I=genes_count_table_dir,);
countFPKMattr.import_fpkmTable(
filename_I=genes_fpkm_table_dir,);
countFPKMattr.import_attrTable(
filename_I=genes_attr_table_dir,);
sna2sns_I={};
sna2experimentID_I={};
sample_names_lst = sample_names_I.split(',');
experiment_ids_lst = experiment_ids_I.split(',');
for cnt,sample_replicates in enumerate(samples_host_dirs_I.split('|')):
sna2sns_I[sample_names_lst[cnt]] = [];
sna2experimentID_I[sample_names_lst[cnt]] = experiment_ids_lst[cnt];
for sample in sample_replicates.split(','):
filename = sample.split('/')[-1].replace('.bam','').replace('.fastq','');
sna2sns_I[sample_names_lst[cnt]].append(filename);
genesCountTable = countFPKMattr.alignAndReformat_countFPKMattrTables(
analysis_id_I = analysis_id_I,
sna2experimentID_I = sna2experimentID_I,
sna2sns_I = sna2sns_I)
self.add_dataStage01RNASequencingGenesCountTable(genesCountTable);
def import_dataStage01RNASequencingGenesCountTable_update(self, filename):
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_dataStage01RNASequencingGenesCountTable(data.data);
data.clear_data();
def export_dataStage01RNASequencingGenesCountTable_js(self,analysis_id_I,data_dir_I='tmp'):
experiment_ids,sample_names = [],[];
experiment_ids,sample_names = self.get_experimentIDAndSampleName_analysisID_dataStage01RNASequencingAnalysis(analysis_id_I);
data_O = [];
for sample_name_cnt,sample_name in enumerate(sample_names):
fpkms = [];
fpkms = self.get_rows_experimentIDAndSampleName_dataStage01RNASequencingGenesCountTable(experiment_ids[sample_name_cnt],sample_name);
data_O.extend(fpkms);
data1_keys = ['experiment_id','sample_name','gene_short_name'
];
data1_nestkeys = ['gene_short_name'];
data1_keymap = {'xdata':'gene_short_name',
'ydatamean':'FPKM',
'ydatalb':'FPKM_conf_lo',
'ydataub':'FPKM_conf_hi',
'serieslabel':'sample_name',
'featureslabel':'gene_short_name'};
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'boxandwhiskersplot2d_02',"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"gene","svgy1axislabel":"FPKM",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Custom box and whiskers plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'FPKM','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage01RNASequencingGenesCountTable_pairWisePlot_js(self,analysis_id_I,log2normalization_I=True,data_dir_I='tmp'):
experiment_ids,sample_names = [],[];
experiment_ids,sample_names = self.get_experimentIDAndSampleName_analysisID_dataStage01RNASequencingAnalysis(analysis_id_I);
data_O = [];
for sample_name_cnt,sample_name in enumerate(sample_names):
fpkms = [];
fpkms = self.get_rows_experimentIDAndSampleName_dataStage01RNASequencingGenesCountTable(experiment_ids[sample_name_cnt],sample_name);
if log2normalization_I:
for f in fpkms:
if f['FPKM'] == 0.0: f['FPKM'] = 0.0;
else: f['FPKM'] = log2(f['FPKM']);
data_O.extend(fpkms);
listdict = listDict(data_O);
data_O,columnValueHeader_O = listdict.convert_listDict2ColumnGroupListDict(
value_labels_I = ['FPKM',],
column_labels_I = ['experiment_id','sample_name'],
feature_labels_I = ['gene_id','gene_short_name'],
na_str_I=0.0,
columnValueConnector_str_I='_',
);
data1_keymap_table = {
'xdata':'svd_method',
'ydata':'singular_value_index',
'zdata':'d_vector',
'rowslabel':'svd_method',
'columnslabel':'singular_value_index',
};
data1_keys = ['gene_id','gene_short_name'
];
data1_nestkeys = ['gene_short_name'];
data1_keymap_svg = [];
svgtype = [];
svgtile2datamap = [];
data_svg_keymap = [];
for cnt1,column1 in enumerate(columnValueHeader_O):
for cnt2,column2 in enumerate(columnValueHeader_O[cnt1+1:]):
keymap = {
'xdata':column1,
'ydata':column2,
'serieslabel':'',
'featureslabel':'gene_short_name',
'tooltipdata':'gene_short_name',
};
data1_keymap_svg.append([keymap]);
data_svg_keymap.append(keymap);
svgtype.append('pcaplot2d_scores_01');
svgtile2datamap.append([0]);
nsvgtable = ddt_container_filterMenuAndChart2dAndTable();
nsvgtable.make_filterMenuAndChart2dAndTable(
data_filtermenu=data_O,
data_filtermenu_keys=data1_keys,
data_filtermenu_nestkeys=data1_nestkeys,
data_filtermenu_keymap=data1_keymap_table,
data_svg_keys=data1_keys,
data_svg_nestkeys=data1_nestkeys,
data_svg_keymap=data_svg_keymap,
data_table_keys=data1_keys,
data_table_nestkeys=data1_nestkeys,
data_table_keymap=data1_keymap_table,
data_svg=None,
data_table=None,
svgtype=svgtype,
tabletype='responsivetable_01',
svgx1axislabel='',
svgy1axislabel='',
tablekeymap = [data1_keymap_table],
svgkeymap = data1_keymap_svg,
formtile2datamap=[0],
tabletile2datamap=[0],
svgtile2datamap=svgtile2datamap,
svgfilters=None,
svgtileheader='Pair-wise scatter plot',
tablefilters=None,
tableheaders=None
);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = nsvgtable.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(nsvgtable.get_allObjects()); | true | true |
f7258a45bcc7583012e19070d9419ded10110a6e | 2,480 | py | Python | redunlive/utils.py | nmaekawa/redunlive | 3f1830d605c46a300d028a32b564d803964f2384 | [
"Apache-2.0"
] | null | null | null | redunlive/utils.py | nmaekawa/redunlive | 3f1830d605c46a300d028a32b564d803964f2384 | [
"Apache-2.0"
] | null | null | null | redunlive/utils.py | nmaekawa/redunlive | 3f1830d605c46a300d028a32b564d803964f2384 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import sys
import platform
import requests
from requests.auth import HTTPBasicAuth
from . import __version__
from . import log
def clean_name( name ):
""" replaces non-alpha with underscores '_'
and set the string to lower case
"""
return re.sub( '[^0-9a-zA-Z]+', '_', name ).lower()
def pull_data( url, creds=None):
""" reads a text file from given url
if basic auth needed, pass args creds['user'] and creds['pwd']
"""
headers = {
'User-Agent': default_useragent(),
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html, text/*'
}
au = None
if not creds is None:
if 'user' in creds.keys() and 'pwd' in creds.keys():
au = HTTPBasicAuth( creds['user'], creds['pwd'] )
headers.update( {'X-REQUESTED-AUTH': 'Basic'} )
try:
response = requests.get( url, headers=headers, auth=au )
except requests.HTTPError as e:
log.warning("data from url(%s) is unavailable. Error: %s" % ( url, e ) )
return None
else:
return response.text
def default_useragent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, \
sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (__name__, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
| 32.207792 | 80 | 0.594355 |
import re
import sys
import platform
import requests
from requests.auth import HTTPBasicAuth
from . import __version__
from . import log
def clean_name( name ):
return re.sub( '[^0-9a-zA-Z]+', '_', name ).lower()
def pull_data( url, creds=None):
headers = {
'User-Agent': default_useragent(),
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html, text/*'
}
au = None
if not creds is None:
if 'user' in creds.keys() and 'pwd' in creds.keys():
au = HTTPBasicAuth( creds['user'], creds['pwd'] )
headers.update( {'X-REQUESTED-AUTH': 'Basic'} )
try:
response = requests.get( url, headers=headers, auth=au )
except requests.HTTPError as e:
log.warning("data from url(%s) is unavailable. Error: %s" % ( url, e ) )
return None
else:
return response.text
def default_useragent():
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, \
sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version()
elif _implementation == 'IronPython':
_implementation_version = platform.python_version()
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (__name__, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
| true | true |
f7258af14dcf0fa82efc690560fcea365ab3ef09 | 4,999 | py | Python | cameras_to_albums.py | brandoconnor/flickr-album-per-camera | 358c90cded12c6c9ecb9f8ae289e005f172d30e2 | [
"Apache-2.0"
] | null | null | null | cameras_to_albums.py | brandoconnor/flickr-album-per-camera | 358c90cded12c6c9ecb9f8ae289e005f172d30e2 | [
"Apache-2.0"
] | 1 | 2016-02-12T02:25:19.000Z | 2016-02-13T00:07:46.000Z | cameras_to_albums.py | brandoconnor/flickr-album-per-camera | 358c90cded12c6c9ecb9f8ae289e005f172d30e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
A simple script to organize photos into albums named by their camera source.
For me, this is useful for cycling through only high quality photos on my TV
hooked up to a chromecast.
"""
import argparse
import flickrapi
def auth_flickr(api_key, api_secret):
"""Authenticate user to flickr API."""
flickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json')
flickr.authenticate_via_browser(perms='write')
return flickr
def get_user_id(flickr, user):
"""Get the user_id from the username."""
user_data = flickr.people.findByUsername(username=user)
return user_data['user']['id']
def get_photoset_dict(flickr, user_id):
"""construct a photoset dict of album name to album/set id."""
print('Gathering dictionary of all photos in all photosets...')
init_photoset = flickr.photosets.getList(user_id=user_id)
photoset_dict = dict()
set_num = 1
for set_page in range(1, init_photoset['photosets']['pages'] + 1):
photoset = flickr.photosets.getList(user_id=user_id, page=set_page)
for pset in photoset['photosets']['photoset']:
print('processing photoset %s of %s' %
(set_num, init_photoset['photosets']['total']))
set_num += 1
# a_dict = {'thing': {'this': ['list, 'values']} }
photoset_dict[pset['title']['_content']] = {pset['id']: []}
init_photoset_object = flickr.photosets.getPhotos(
photoset_id=pset['id'])
for photoset_page in range(1, init_photoset_object['photoset']['pages'] + 1):
photoset_object = flickr.photosets.getPhotos(
user_id=user_id, photoset_id=pset['id'], page=photoset_page)
photoset_dict[pset['title']['_content']][
pset['id']] += [p['id'] for p in photoset_object['photoset']['photo']]
return photoset_dict
def main(args):
"""Main code block. Do all the things."""
flickr = auth_flickr(args.api_key, args.api_secret)
user_id = get_user_id(flickr, user=args.username)
album_dict = get_photoset_dict(flickr, user_id)
init_photos = flickr.people.getPhotos(user_id=user_id)
total = init_photos['photos']['total']
photo_num = args.initial_photo / 100 * 100 # TODO ensure less than total photos
if photo_num > total:
raise('Trying to start at photo %s but only %s total. Exiting.' %
(args.initial_photo, total))
init_page = args.initial_photo / 100 + 1 # 100 photos per page
for page_num in range(init_page, init_photos['photos']['pages'] + 1):
photo_batch = flickr.people.getPhotos(user_id=user_id, page=page_num)
for photo in photo_batch['photos']['photo']:
photo_num += 1
photo_id = photo['id']
print('processing photo %s of %s: %s' %
(photo_num, total, photo_id))
photo_data = flickr.photos.getExif(photo_id=photo_id)
camera_name = photo_data['photo']['camera']
if len(camera_name) > 0:
if camera_name not in album_dict.keys():
print('adding camera album "%s"' % camera_name)
new_set = flickr.photosets.create(title=camera_name,
primary_photo_id=photo_id,
description='All photos taken behind a %s' % camera_name)
album_dict[camera_name] = {
new_set['photoset']['id']: [photo_id]}
continue
elif photo_id not in [p for p in album_dict[camera_name].values()[0]]:
# if this photo is not in the appropriate album, add it
print('Adding photo to camera album.')
flickr.photosets.addPhoto(photoset_id=album_dict[
camera_name].keys()[0], photo_id=photo_id)
album_dict[camera_name].values()[0].append(photo_id)
else:
print('Photo is already in the appropriate set')
continue
else:
print('Skipping photo with insufficient metadata.')
if __name__ in '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dry_run', action='store_true',
default=False, help="Verbose minus action. Default=False")
parser.add_argument('-i', '--initial_photo', help='approximate initial photo. Rounds down to nearest hundred',
type=int, default=0)
parser.add_argument('-k', '--api_key',
help='flickr API key', required=True)
parser.add_argument('-s', '--api_secret',
help='flickr API secret', required=True)
parser.add_argument('-u', '--username',
help='your flickr username', required=True)
exit(main(parser.parse_args()))
| 46.719626 | 114 | 0.595319 |
import argparse
import flickrapi
def auth_flickr(api_key, api_secret):
flickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json')
flickr.authenticate_via_browser(perms='write')
return flickr
def get_user_id(flickr, user):
user_data = flickr.people.findByUsername(username=user)
return user_data['user']['id']
def get_photoset_dict(flickr, user_id):
print('Gathering dictionary of all photos in all photosets...')
init_photoset = flickr.photosets.getList(user_id=user_id)
photoset_dict = dict()
set_num = 1
for set_page in range(1, init_photoset['photosets']['pages'] + 1):
photoset = flickr.photosets.getList(user_id=user_id, page=set_page)
for pset in photoset['photosets']['photoset']:
print('processing photoset %s of %s' %
(set_num, init_photoset['photosets']['total']))
set_num += 1
photoset_dict[pset['title']['_content']] = {pset['id']: []}
init_photoset_object = flickr.photosets.getPhotos(
photoset_id=pset['id'])
for photoset_page in range(1, init_photoset_object['photoset']['pages'] + 1):
photoset_object = flickr.photosets.getPhotos(
user_id=user_id, photoset_id=pset['id'], page=photoset_page)
photoset_dict[pset['title']['_content']][
pset['id']] += [p['id'] for p in photoset_object['photoset']['photo']]
return photoset_dict
def main(args):
flickr = auth_flickr(args.api_key, args.api_secret)
user_id = get_user_id(flickr, user=args.username)
album_dict = get_photoset_dict(flickr, user_id)
init_photos = flickr.people.getPhotos(user_id=user_id)
total = init_photos['photos']['total']
photo_num = args.initial_photo / 100 * 100 # TODO ensure less than total photos
if photo_num > total:
raise('Trying to start at photo %s but only %s total. Exiting.' %
(args.initial_photo, total))
init_page = args.initial_photo / 100 + 1 # 100 photos per page
for page_num in range(init_page, init_photos['photos']['pages'] + 1):
photo_batch = flickr.people.getPhotos(user_id=user_id, page=page_num)
for photo in photo_batch['photos']['photo']:
photo_num += 1
photo_id = photo['id']
print('processing photo %s of %s: %s' %
(photo_num, total, photo_id))
photo_data = flickr.photos.getExif(photo_id=photo_id)
camera_name = photo_data['photo']['camera']
if len(camera_name) > 0:
if camera_name not in album_dict.keys():
print('adding camera album "%s"' % camera_name)
new_set = flickr.photosets.create(title=camera_name,
primary_photo_id=photo_id,
description='All photos taken behind a %s' % camera_name)
album_dict[camera_name] = {
new_set['photoset']['id']: [photo_id]}
continue
elif photo_id not in [p for p in album_dict[camera_name].values()[0]]:
# if this photo is not in the appropriate album, add it
print('Adding photo to camera album.')
flickr.photosets.addPhoto(photoset_id=album_dict[
camera_name].keys()[0], photo_id=photo_id)
album_dict[camera_name].values()[0].append(photo_id)
else:
print('Photo is already in the appropriate set')
continue
else:
print('Skipping photo with insufficient metadata.')
if __name__ in '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dry_run', action='store_true',
default=False, help="Verbose minus action. Default=False")
parser.add_argument('-i', '--initial_photo', help='approximate initial photo. Rounds down to nearest hundred',
type=int, default=0)
parser.add_argument('-k', '--api_key',
help='flickr API key', required=True)
parser.add_argument('-s', '--api_secret',
help='flickr API secret', required=True)
parser.add_argument('-u', '--username',
help='your flickr username', required=True)
exit(main(parser.parse_args()))
| true | true |
f7258c968558839fb8aa4aaba0600d037324ede1 | 2,204 | py | Python | cheritest/trunk/tests/alu/test_msub_ex.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 36 | 2015-05-29T16:47:19.000Z | 2022-02-08T21:16:26.000Z | cheritest/trunk/tests/alu/test_msub_ex.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 1 | 2015-10-14T13:05:21.000Z | 2015-10-19T20:34:03.000Z | cheritest/trunk/tests/alu/test_msub_ex.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 15 | 2015-06-11T07:10:58.000Z | 2021-06-18T05:14:54.000Z | #-
# Copyright (c) 2015 Michael Roe
# All rights reserved.
#
# This software was developed by the University of Cambridge Computer
# Laboratory as part of the Rigorous Engineering of Mainstream Systems (REMS)
# project, funded by EPSRC grant EP/K008528/1.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_msub_ex(BaseBERITestCase):
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_1(self):
self.assertRegisterEqual(self.MIPS.a0, 0xfffffffffffffffa, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_2(self):
self.assertRegisterEqual(self.MIPS.a1, 0xffffffffffffffff, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_3(self):
self.assertRegisterEqual(self.MIPS.a2, 0xfffffffffffffffa, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_4(self):
self.assertRegisterEqual(self.MIPS.a3, 0xffffffffffffffff, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
| 41.584906 | 166 | 0.748185 |
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_msub_ex(BaseBERITestCase):
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_1(self):
self.assertRegisterEqual(self.MIPS.a0, 0xfffffffffffffffa, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_2(self):
self.assertRegisterEqual(self.MIPS.a1, 0xffffffffffffffff, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_3(self):
self.assertRegisterEqual(self.MIPS.a2, 0xfffffffffffffffa, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
@attr('ignorebadex')
@attr('madd')
def test_msub_ex_4(self):
self.assertRegisterEqual(self.MIPS.a3, 0xffffffffffffffff, "MSUB of a value that was not a valid sign extention of a 32-bit value gave an unexpected result.")
| true | true |
f7258cce35cf6a9ce718c2c71ffc2151fcba6ee2 | 498 | py | Python | contentsummary/urls.py | Bobstin/itcsummary | 259d8f64e415a1c7cbc926752c717e307c09953f | [
"MIT"
] | null | null | null | contentsummary/urls.py | Bobstin/itcsummary | 259d8f64e415a1c7cbc926752c717e307c09953f | [
"MIT"
] | 5 | 2021-02-27T13:23:58.000Z | 2021-09-22T17:39:19.000Z | contentsummary/urls.py | Bobstin/itcsummary | 259d8f64e415a1c7cbc926752c717e307c09953f | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^next/(?P<priornumber>[0-9]+)/$', views.nextSession, name='nextSession'),
url(r'^all/$', views.allSessions, name='allSessions'),
url(r'^allpt1/$', views.allSessionspt1, name='allSessionspt1'),
url(r'^allpt2/$', views.allSessionspt2, name='allSessionspt2'),
url(r'^singleSession/(?P<session_number>[0-9]+)/$', views.singleSession, name='singleSession'),
url(r'^$', views.example, name='example'),
] | 41.5 | 99 | 0.670683 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^next/(?P<priornumber>[0-9]+)/$', views.nextSession, name='nextSession'),
url(r'^all/$', views.allSessions, name='allSessions'),
url(r'^allpt1/$', views.allSessionspt1, name='allSessionspt1'),
url(r'^allpt2/$', views.allSessionspt2, name='allSessionspt2'),
url(r'^singleSession/(?P<session_number>[0-9]+)/$', views.singleSession, name='singleSession'),
url(r'^$', views.example, name='example'),
] | true | true |
f7258cd44fa3033315e86b9b007b80c34183601d | 71,980 | py | Python | nova/compute/resource_tracker.py | aspiers/nova | e8b6b0bc78ec229803d1d27f8a4706e2c425bd77 | [
"Apache-2.0"
] | 1 | 2021-12-27T00:47:30.000Z | 2021-12-27T00:47:30.000Z | nova/compute/resource_tracker.py | aspiers/nova | e8b6b0bc78ec229803d1d27f8a4706e2c425bd77 | [
"Apache-2.0"
] | null | null | null | nova/compute/resource_tracker.py | aspiers/nova | e8b6b0bc78ec229803d1d27f8a4706e2c425bd77 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in (
task_states.resizing_states + task_states.rebuild_states)):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if orc.VCPU in inv_data:
cpu_inv = inv_data[orc.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if orc.MEMORY_MB in inv_data:
mem_inv = inv_data[orc.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if orc.DISK_GB in inv_data:
disk_inv = inv_data[orc.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
# Dict of Stats objects, keyed by nodename
self.stats = collections.defaultdict(compute_stats.Stats)
# Set of UUIDs of instances tracked on this host.
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {} # dict, keyed by instance uuid, to is_bfv boolean
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.reportclient = report.SchedulerReportClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move.
Note that this code assumes ``instance.new_flavor`` is set when
resizing with a new flavor.
"""
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
completed successfully, this is normally the source.
If it did not complete successfully (failed or
reverted), this is normally the destination.
:param instance_type: The flavor that determines the usage to remove.
If the migration completed successfully, this is
the old flavor to be removed from the source. If
the migration did not complete successfully, this
is the new flavor to be removed from the
destination.
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the
default.
"""
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
instance_type = self._get_instance_type(instance, prefix,
migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
:returns: True if a new compute_nodes table record was created,
False otherwise
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
"""Copy resource values to supplied compute_node."""
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
# purge old stats and init with anything passed in by the driver
# NOTE(danms): Preserve 'failed_builds' across the stats clearing,
# as that is not part of resources
# TODO(danms): Stop doing this when we get a column to store this
# directly
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# Update the allocation ratios for the related ComputeNode object
# but only if the configured values are not the default; the
# ComputeNode._from_db_object method takes care of providing default
# allocation ratios when the config is left at the default, so
# we'll really end up with something like a
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
# NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would
# be used when we initialize the compute node object, that means the
# ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to
# CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is
# True.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
# will be removed in the next version (T version).
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
"""Handle node removal/rebalance.
Clean up any stored data about a compute node no longer
managed by this host.
"""
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
:param startup: Boolean indicating whether we're running this on
on startup (True) or periodic (False).
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
# that would be created via the _update() call below, and if there is
# no resource provider then there are no allocations against it.
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _get_traits(self, nodename, provider_tree):
# Get the traits from the ProviderTree which will be the set
# of virt-owned traits plus any externally defined traits set
# on the provider that aren't owned by the virt driver.
traits = provider_tree.data(nodename).traits
# Now get the driver's capabilities and add any supported
# traits that are missing, and remove any existing set traits
# that are not currently supported.
for trait, supported in self.driver.capabilities_as_traits().items():
if supported:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
return list(traits)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's get_inventory(). So even there
# is no resource change for compute_node as above, we need proceed
# to get inventory and use report client interfaces to update
# inventory to placement. It's report client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# First try update_provider_tree
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
# provider corresponding to the compute node.
prov_tree = self.reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
allocs = None
try:
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# This isn't supposed to happen during periodic, so raise
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = self.reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
# driver owns - so those that come from the tree itself
# (via the virt driver) plus the compute capabilities
# traits, and then merge those with the traits set
# externally that the driver does not own - and remove any
# set on the provider externally that the virt owns but
# aren't in the current list of supported traits. For
# example, let's say we reported multiattach support as a
# trait at t1 and then at t2 it's not, so we need to
# remove it. But at both t1 and t2 there is a
# CUSTOM_VENDOR_TRAIT_X which we can't touch because it
# was set externally on the provider.
traits = self._get_traits(nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
except NotImplementedError:
# update_provider_tree isn't implemented yet - try get_inventory
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in
# the format that the placement API expects and we'll be able
# to remove this code branch
inv_data = compute_utils.compute_node_to_inventory_dict(
compute_node)
prov_tree.update_inventory(nodename, inv_data)
# Flush any changes. If we processed ReshapeNeeded above, allocs is not
# None, and this will hit placement's POST /reshaper route.
self.reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB.
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
# At the moment we still need this check and save compute_node.
compute_node.save()
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration %s", migration.uuid,
instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
LOG.debug('Starting to track incoming migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
LOG.debug('Starting to track outgoing migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
# Stop tracking removed instances in the is_bfv cache. This needs to
# happen *after* calling _get_usage_dict() since that relies on the
# is_bfv cache.
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
try:
# pai: report.ProviderAllocInfo namedtuple
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
# The main loop below would short-circuit anyway, but this saves us
# the (potentially expensive) context.elevated construction below.
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the
# scheduler _just_ created an allocation for it and we're
# racing with the creation in the cell database, or the
# instance was deleted and fully archived before we got a
# chance to run this. The former is far more likely than
# the latter. Avoid deleting allocations for a building
# instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
# NOTE(jaypipes): This will not be true if/when we support
# cross-cell migrations...
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances.
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of evacuated "
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param instance: nova.objects.Instance for the related operation; this
is needed to determine if the instance is
volume-backed
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def build_failed(self, nodename):
"""Increments the failed_builds stats for the given node."""
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
"""Resets the failed_builds stats for the given node."""
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def claim_pci_devices(self, context, pci_requests):
"""Claim instance PCI resources
:param context: security context
:param pci_requests: a list of nova.objects.InstancePCIRequests
:returns: a list of nova.objects.PciDevice objects
"""
result = self.pci_tracker.claim_instance(
context, pci_requests, None)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def allocate_pci_devices_for_instance(self, context, instance):
"""Allocate instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.allocate_instance(instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def free_pci_device_allocations_for_instance(self, context, instance):
"""Free instance allocated PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_allocations(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def free_pci_device_claims_for_instance(self, context, instance):
"""Free instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
| 46.408769 | 79 | 0.621367 |
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in (
task_states.resizing_states + task_states.rebuild_states)):
return True
return False
def _is_trackable_migration(migration):
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
if orc.VCPU in inv_data:
cpu_inv = inv_data[orc.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if orc.MEMORY_MB in inv_data:
mem_inv = inv_data[orc.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if orc.DISK_GB in inv_data:
disk_inv = inv_data[orc.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
self.compute_nodes = {}
self.stats = collections.defaultdict(compute_stats.Stats)
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.reportclient = report.SchedulerReportClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
if self.disabled(nodename):
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
return claims.NopClaim(migration=migration)
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
instance_type = self._get_instance_type(instance, prefix,
migration)
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
if self.disabled(nodename):
return
uuid = instance['uuid']
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
nodename = resources['hypervisor_hostname']
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
resources['host_ip'] = CONF.my_ip
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
cn.metrics = jsonutils.dumps(metrics)
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _get_traits(self, nodename, provider_tree):
traits = provider_tree.data(nodename).traits
# Now get the driver's capabilities and add any supported
for trait, supported in self.driver.capabilities_as_traits().items():
if supported:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
return list(traits)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update_to_placement(self, context, compute_node, startup):
# is no resource change for compute_node as above, we need proceed
# to get inventory and use report client interfaces to update
# inventory to placement. It's report client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# First try update_provider_tree
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
prov_tree = self.reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
allocs = None
try:
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = self.reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
# driver owns - so those that come from the tree itself
# (via the virt driver) plus the compute capabilities
# traits, and then merge those with the traits set
# externally that the driver does not own - and remove any
# set on the provider externally that the virt owns but
# aren't in the current list of supported traits. For
# trait at t1 and then at t2 it's not, so we need to
# was set externally on the provider.
traits = self._get_traits(nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
except NotImplementedError:
# update_provider_tree isn't implemented yet - try get_inventory
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
except NotImplementedError:
# to remove this code branch
inv_data = compute_utils.compute_node_to_inventory_dict(
compute_node)
prov_tree.update_inventory(nodename, inv_data)
# Flush any changes. If we processed ReshapeNeeded above, allocs is not
# None, and this will hit placement's POST /reshaper route.
self.reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
def _update(self, context, compute_node, startup=False):
if self._resource_change(compute_node):
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
compute_node.save()
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration %s", migration.uuid,
instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
sign = 1
else:
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
sign = 1
LOG.debug('Starting to track incoming migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
elif outbound and not tracked:
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
LOG.debug('Starting to track outgoing migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
LOG.debug('Migration instance not found: %s', e)
continue
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
try:
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# scheduler _just_ created an allocation for it and we're
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of evacuated "
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def _find_orphaned_instances(self):
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, instance, prefix, migration):
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def build_failed(self, nodename):
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def claim_pci_devices(self, context, pci_requests):
result = self.pci_tracker.claim_instance(
context, pci_requests, None)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def allocate_pci_devices_for_instance(self, context, instance):
self.pci_tracker.allocate_instance(instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def free_pci_device_allocations_for_instance(self, context, instance):
self.pci_tracker.free_instance_allocations(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def free_pci_device_claims_for_instance(self, context, instance):
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
| true | true |
f7258d9ad8ec1ffdd9a1d4a476dabdc315bbf560 | 5,448 | py | Python | synthesizer/models/custom_decoder.py | Khizar-Ali/Lip2Wav | 07f056b3468ca660823830680bf25bdd42034f9e | [
"MIT"
] | 541 | 2020-05-14T05:56:31.000Z | 2022-03-30T03:34:55.000Z | synthesizer/models/custom_decoder.py | Khizar-Ali/Lip2Wav | 07f056b3468ca660823830680bf25bdd42034f9e | [
"MIT"
] | 36 | 2020-05-14T06:00:31.000Z | 2022-03-10T06:13:44.000Z | synthesizer/models/custom_decoder.py | Khizar-Ali/Lip2Wav | 07f056b3468ca660823830680bf25bdd42034f9e | [
"MIT"
] | 123 | 2020-05-19T02:43:47.000Z | 2022-03-26T11:28:13.000Z | from __future__ import absolute_import, division, print_function
import collections
import tensorflow as tf
from synthesizer.models.helpers import TacoTestHelper, TacoTrainingHelper
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import ops, tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
class CustomDecoderOutput(
#collections.namedtuple("CustomDecoderOutput", ("rnn_output", "token_output", "sample_id"))):
collections.namedtuple("CustomDecoderOutput", ("rnn_output", "sample_id"))):
pass
class CustomDecoder(decoder.Decoder):
"""Custom sampling decoder.
Allows for stop token prediction at inference time
and returns equivalent loss in training time.
Note:
Only use this decoder with Tacotron 2 as it only accepts tacotron custom helpers
"""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize CustomDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell(type(cell), cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer"s compute_output_shape, we need to convert the
# RNNCell"s output_size entries into shapes with an unknown
# batch size. We then pass this through the layer"s
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
#return CustomDecoderOutput(
# rnn_output=self._rnn_output_size(),
# token_output=self._helper.token_output_size,
# sample_id=self._helper.sample_ids_shape)
return CustomDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state"s first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
#return CustomDecoderOutput(
# nest.map_structure(lambda _: dtype, self._rnn_output_size()),
# tf.float32,
# self._helper.sample_ids_dtype)
return CustomDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a custom decoding step.
Enables for dyanmic <stop_token> prediction
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "CustomDecoderStep", (time, inputs, state)):
#Call outputprojection wrapper cell
#(cell_outputs, stop_token), cell_state = self._cell(inputs, state)
(cell_outputs), cell_state = self._cell(inputs, state)
#apply output_layer (if existant)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
#(finished, next_inputs, next_state) = self._helper.next_inputs(
# time=time,
# outputs=cell_outputs,
# state=cell_state,
# sample_ids=sample_ids,
# stop_token_prediction=stop_token)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
#outputs = CustomDecoderOutput(cell_outputs, stop_token, sample_ids)
outputs = CustomDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| 36.810811 | 101 | 0.752019 | from __future__ import absolute_import, division, print_function
import collections
import tensorflow as tf
from synthesizer.models.helpers import TacoTestHelper, TacoTrainingHelper
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import ops, tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
class CustomDecoderOutput(
collections.namedtuple("CustomDecoderOutput", ("rnn_output", "sample_id"))):
pass
class CustomDecoder(decoder.Decoder):
def __init__(self, cell, helper, initial_state, output_layer=None):
rnn_cell_impl.assert_like_rnncell(type(cell), cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# RNNCell"s output_size entries into shapes with an unknown
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
#return CustomDecoderOutput(
# rnn_output=self._rnn_output_size(),
# token_output=self._helper.token_output_size,
# sample_id=self._helper.sample_ids_shape)
return CustomDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state"s first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
#return CustomDecoderOutput(
# nest.map_structure(lambda _: dtype, self._rnn_output_size()),
# tf.float32,
# self._helper.sample_ids_dtype)
return CustomDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def initialize(self, name=None):
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
with ops.name_scope(name, "CustomDecoderStep", (time, inputs, state)):
#Call outputprojection wrapper cell
#(cell_outputs, stop_token), cell_state = self._cell(inputs, state)
(cell_outputs), cell_state = self._cell(inputs, state)
#apply output_layer (if existant)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
#(finished, next_inputs, next_state) = self._helper.next_inputs(
# time=time,
# outputs=cell_outputs,
# state=cell_state,
# sample_ids=sample_ids,
# stop_token_prediction=stop_token)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
#outputs = CustomDecoderOutput(cell_outputs, stop_token, sample_ids)
outputs = CustomDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| true | true |
f7258e0a7c9afac5477c470a223587a4b47a9d4e | 2,566 | py | Python | run_mndo.py | andersx/fitmndod | 45030f820545cf030dbed0acaf196a5ee20da6f8 | [
"MIT"
] | null | null | null | run_mndo.py | andersx/fitmndod | 45030f820545cf030dbed0acaf196a5ee20da6f8 | [
"MIT"
] | null | null | null | run_mndo.py | andersx/fitmndod | 45030f820545cf030dbed0acaf196a5ee20da6f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
import numpy as np
from scipy.optimize import minimize
from numpy.linalg import norm
import os
from copy import deepcopy
import threading
import subprocess
from matplotlib import pyplot
import time
import seaborn as sns
import pandas as pd
def run_mndo99_nodisk():
output1 = []
output2 = []
output3 = []
output4 = []
def task1():
global output1
cmd = ["./run_mndo99", "master1.inp"]
output1 = subprocess.check_output(cmd)
def task2():
global output2
cmd = ["./run_mndo99", "master2.inp"]
output2 = subprocess.check_output(cmd)
def task3():
global output3
cmd = ["./run_mndo99", "master3.inp"]
output3 = subprocess.check_output(cmd)
def task4():
global output4
cmd = ["./run_mndo99", "master4.inp"]
output4 = subprocess.check_output(cmd)
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t3 = threading.Thread(target=task3)
t4 = threading.Thread(target=task4)
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
return output1 + output2 + output3 + output4
if __name__ == "__main__":
lol = run_mndo99_nodisk()
print lol
| 27.591398 | 73 | 0.696025 |
import numpy as np
from scipy.optimize import minimize
from numpy.linalg import norm
import os
from copy import deepcopy
import threading
import subprocess
from matplotlib import pyplot
import time
import seaborn as sns
import pandas as pd
def run_mndo99_nodisk():
output1 = []
output2 = []
output3 = []
output4 = []
def task1():
global output1
cmd = ["./run_mndo99", "master1.inp"]
output1 = subprocess.check_output(cmd)
def task2():
global output2
cmd = ["./run_mndo99", "master2.inp"]
output2 = subprocess.check_output(cmd)
def task3():
global output3
cmd = ["./run_mndo99", "master3.inp"]
output3 = subprocess.check_output(cmd)
def task4():
global output4
cmd = ["./run_mndo99", "master4.inp"]
output4 = subprocess.check_output(cmd)
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t3 = threading.Thread(target=task3)
t4 = threading.Thread(target=task4)
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
return output1 + output2 + output3 + output4
if __name__ == "__main__":
lol = run_mndo99_nodisk()
print lol
| false | true |
f7258fa1fd9ffacbfd6a1a0cc0f2cd988adbdb32 | 3,381 | py | Python | Server_Code/database.py | PUT-PTM/2019_SmartAttendance | cab58f3f355c07d3dfd4c73c8adb4c7bbf6d676c | [
"MIT"
] | 1 | 2019-03-13T16:00:32.000Z | 2019-03-13T16:00:32.000Z | Server_Code/database.py | PUT-PTM/2019_SmartAttendance | cab58f3f355c07d3dfd4c73c8adb4c7bbf6d676c | [
"MIT"
] | null | null | null | Server_Code/database.py | PUT-PTM/2019_SmartAttendance | cab58f3f355c07d3dfd4c73c8adb4c7bbf6d676c | [
"MIT"
] | 1 | 2021-07-10T08:27:21.000Z | 2021-07-10T08:27:21.000Z | from datetime import datetime
import json
from pathlib import Path
import pymssql
config_json: dict = json.loads(Path('config.json').read_text())
# Connecting to database
def connect():
global config_json
# Connect to Microsoft SQL server
conn = pymssql.connect(
server=config_json['server'],
user=config_json['user'],
password=config_json['password'],
database=config_json['database']
)
return conn
def student_exists(sid: int) -> bool:
conn = connect()
cursor = conn.cursor()
# Correct request
cursor.execute('select COUNT(1) from StudentInfo where SID=' + str(sid) + ';')
result = cursor.fetchone()
conn.close()
return str(result[0]) == '1'
def student_info_get() -> dict:
conn = connect()
cursor = conn.cursor()
# Get all students from database
cursor.execute('select * from StudentInfo order by SID;')
# Convert table into json
data: dict = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
# Creating json table with data
data['elements'].append(
# Creating json table with data
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"fName": "' + row[1] + '",' +
'"lName": "' + row[2] + '"}'
)
)
row = cursor.fetchone()
# While end
conn.close()
return data
def student_info_insert(info: dict) -> None:
# Get values from JSON
sid = str(info['SID'])
f_name = '\'' + info['fName'] + '\''
l_name = '\'' + info['lName'] + '\''
# Add entry to database
sql_req = 'insert into StudentInfo values (' + sid + ',' + f_name + ',' + l_name + ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
def student_info_delete(sid: int) -> None:
conn = connect()
cursor = conn.cursor()
cursor.execute('delete from StudentInfo where SID=' + str(sid) + ';')
conn.commit()
conn.close()
def presence_get() -> dict:
conn = connect()
cursor = conn.cursor()
cursor.execute('select * from Presence order by Date;')
# Convert table into json
data = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
# Creating json table with data
data['elements'].append(
# Creating json table with data
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"Date": "' + row[1].strftime('%Y-%m-%d %H:%M:%S') + '",' +
'"CID": "' + str(row[2]) + '",' +
'"Room": "' + row[3] + '"}'
)
)
row = cursor.fetchone()
# While end
conn.close()
return data
def presence_insert(info: dict):
# Get values from JSONs
sid = str(info.get('SID'))
date = '\'' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\''
cid = info.get('CID')
room = '\'' + info.get('Room') + '\''
# Request building
sql_req = 'insert into Presence values (' + sid + ',' + date
if cid is not None:
sql_req += ',' + str(cid)
if len(room) > 2:
sql_req += ',' + room
sql_req += ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
| 25.613636 | 91 | 0.540964 | from datetime import datetime
import json
from pathlib import Path
import pymssql
config_json: dict = json.loads(Path('config.json').read_text())
def connect():
global config_json
conn = pymssql.connect(
server=config_json['server'],
user=config_json['user'],
password=config_json['password'],
database=config_json['database']
)
return conn
def student_exists(sid: int) -> bool:
conn = connect()
cursor = conn.cursor()
cursor.execute('select COUNT(1) from StudentInfo where SID=' + str(sid) + ';')
result = cursor.fetchone()
conn.close()
return str(result[0]) == '1'
def student_info_get() -> dict:
conn = connect()
cursor = conn.cursor()
cursor.execute('select * from StudentInfo order by SID;')
data: dict = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
data['elements'].append(
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"fName": "' + row[1] + '",' +
'"lName": "' + row[2] + '"}'
)
)
row = cursor.fetchone()
conn.close()
return data
def student_info_insert(info: dict) -> None:
sid = str(info['SID'])
f_name = '\'' + info['fName'] + '\''
l_name = '\'' + info['lName'] + '\''
sql_req = 'insert into StudentInfo values (' + sid + ',' + f_name + ',' + l_name + ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
def student_info_delete(sid: int) -> None:
conn = connect()
cursor = conn.cursor()
cursor.execute('delete from StudentInfo where SID=' + str(sid) + ';')
conn.commit()
conn.close()
def presence_get() -> dict:
conn = connect()
cursor = conn.cursor()
cursor.execute('select * from Presence order by Date;')
data = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
data['elements'].append(
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"Date": "' + row[1].strftime('%Y-%m-%d %H:%M:%S') + '",' +
'"CID": "' + str(row[2]) + '",' +
'"Room": "' + row[3] + '"}'
)
)
row = cursor.fetchone()
conn.close()
return data
def presence_insert(info: dict):
sid = str(info.get('SID'))
date = '\'' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\''
cid = info.get('CID')
room = '\'' + info.get('Room') + '\''
sql_req = 'insert into Presence values (' + sid + ',' + date
if cid is not None:
sql_req += ',' + str(cid)
if len(room) > 2:
sql_req += ',' + room
sql_req += ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.