hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df2d50b13d6118ebb3209f5087fbc8dc10515a8f | 10,949 | py | Python | ucsmsdk/mometa/fabric/FabricFcMonDestEp.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/fabric/FabricFcMonDestEp.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/fabric/FabricFcMonDestEp.py | Curlyfingers/ucsmsdk | 982ff2d8faa12ffb88e1f8cba98cf5749f05c93d | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for FabricFcMonDestEp ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricFcMonDestEpConsts:
ADMIN_SPEED_16GBPS = "16gbps"
ADMIN_SPEED_1GBPS = "1gbps"
ADMIN_SPEED_2GBPS = "2gbps"
ADMIN_SPEED_32GBPS = "32gbps"
ADMIN_SPEED_4GBPS = "4gbps"
ADMIN_SPEED_8GBPS = "8gbps"
ADMIN_SPEED_AUTO = "auto"
ADMIN_SPEED_INDETERMINATE = "indeterminate"
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
AUTO_NEGOTIATE_FALSE = "false"
AUTO_NEGOTIATE_NO = "no"
AUTO_NEGOTIATE_TRUE = "true"
AUTO_NEGOTIATE_YES = "yes"
CHASSIS_ID_N_A = "N/A"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
LIC_STATE_LICENSE_EXPIRED = "license-expired"
LIC_STATE_LICENSE_GRACEPERIOD = "license-graceperiod"
LIC_STATE_LICENSE_INSUFFICIENT = "license-insufficient"
LIC_STATE_LICENSE_OK = "license-ok"
LIC_STATE_NOT_APPLICABLE = "not-applicable"
LIC_STATE_UNKNOWN = "unknown"
OPER_STATE_DOWN = "down"
OPER_STATE_ERROR_MISCONFIGURED = "error-misconfigured"
OPER_STATE_ERROR_UNSUPPORTED_MINI_SERVER_PORT = "error-unsupported-mini-server-port"
OPER_STATE_FAILED = "failed"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UP = "up"
PEER_CHASSIS_ID_N_A = "N/A"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class FabricFcMonDestEp(ManagedObject):
"""This is FabricFcMonDestEp class."""
consts = FabricFcMonDestEpConsts()
naming_props = set([u'slotId', u'portId'])
mo_meta = MoMeta("FabricFcMonDestEp", "fabricFcMonDestEp", "dest-slot-[slot_id]-port-[port_id]", VersionMeta.Version141i, "InputOutput", 0x1fff, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricFcMon'], [u'faultInst'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_speed": MoPropertyMeta("admin_speed", "adminSpeed", "string", VersionMeta.Version202m, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["16gbps", "1gbps", "2gbps", "32gbps", "4gbps", "8gbps", "auto", "indeterminate"], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["disabled", "enabled"], []),
"aggr_port_id": MoPropertyMeta("aggr_port_id", "aggrPortId", "uint", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"auto_negotiate": MoPropertyMeta("auto_negotiate", "autoNegotiate", "string", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["false", "no", "true", "yes"], []),
"chassis_id": MoPropertyMeta("chassis_id", "chassisId", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x10, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"lic_gp": MoPropertyMeta("lic_gp", "licGP", "ulong", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"lic_state": MoPropertyMeta("lic_state", "licState", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["license-expired", "license-graceperiod", "license-insufficient", "license-ok", "not-applicable", "unknown"], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["down", "error-misconfigured", "error-unsupported-mini-server-port", "failed", "unknown", "up"], []),
"oper_state_reason": MoPropertyMeta("oper_state_reason", "operStateReason", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"peer_aggr_port_id": MoPropertyMeta("peer_aggr_port_id", "peerAggrPortId", "uint", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_chassis_id": MoPropertyMeta("peer_chassis_id", "peerChassisId", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"peer_port_id": MoPropertyMeta("peer_port_id", "peerPortId", "uint", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_slot_id": MoPropertyMeta("peer_slot_id", "peerSlotId", "uint", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"port_id": MoPropertyMeta("port_id", "portId", "uint", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x100, None, None, None, [], ["1-54"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x200, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"slot_id": MoPropertyMeta("slot_id", "slotId", "uint", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x400, None, None, None, [], ["1-5"]),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"usr_lbl": MoPropertyMeta("usr_lbl", "usrLbl", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x1000, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,32}""", [], []),
"warnings": MoPropertyMeta("warnings", "warnings", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|none|fc-zoning-enabled|configuration-error),){0,3}(defaultValue|none|fc-zoning-enabled|configuration-error){0,1}""", [], []),
}
prop_map = {
"adminSpeed": "admin_speed",
"adminState": "admin_state",
"aggrPortId": "aggr_port_id",
"autoNegotiate": "auto_negotiate",
"chassisId": "chassis_id",
"childAction": "child_action",
"dn": "dn",
"epDn": "ep_dn",
"fltAggr": "flt_aggr",
"ifRole": "if_role",
"ifType": "if_type",
"licGP": "lic_gp",
"licState": "lic_state",
"locale": "locale",
"name": "name",
"operState": "oper_state",
"operStateReason": "oper_state_reason",
"peerAggrPortId": "peer_aggr_port_id",
"peerChassisId": "peer_chassis_id",
"peerDn": "peer_dn",
"peerPortId": "peer_port_id",
"peerSlotId": "peer_slot_id",
"portId": "port_id",
"rn": "rn",
"sacl": "sacl",
"slotId": "slot_id",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
"usrLbl": "usr_lbl",
"warnings": "warnings",
}
def __init__(self, parent_mo_or_dn, slot_id, port_id, **kwargs):
self._dirty_mask = 0
self.slot_id = slot_id
self.port_id = port_id
self.admin_speed = None
self.admin_state = None
self.aggr_port_id = None
self.auto_negotiate = None
self.chassis_id = None
self.child_action = None
self.ep_dn = None
self.flt_aggr = None
self.if_role = None
self.if_type = None
self.lic_gp = None
self.lic_state = None
self.locale = None
self.name = None
self.oper_state = None
self.oper_state_reason = None
self.peer_aggr_port_id = None
self.peer_chassis_id = None
self.peer_dn = None
self.peer_port_id = None
self.peer_slot_id = None
self.sacl = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
self.usr_lbl = None
self.warnings = None
ManagedObject.__init__(self, "FabricFcMonDestEp", parent_mo_or_dn, **kwargs)
| 64.02924 | 317 | 0.658782 |
68fe6fe8560fa7c6e8c7575b43e549d047270ae5 | 984 | py | Python | migrations/versions/f033e26ef177_initial_migration.py | Berthedusabeyezu/Picth | b9c9b3039aa76a40058594f5ce755b8cb56005ff | [
"MIT"
] | null | null | null | migrations/versions/f033e26ef177_initial_migration.py | Berthedusabeyezu/Picth | b9c9b3039aa76a40058594f5ce755b8cb56005ff | [
"MIT"
] | null | null | null | migrations/versions/f033e26ef177_initial_migration.py | Berthedusabeyezu/Picth | b9c9b3039aa76a40058594f5ce755b8cb56005ff | [
"MIT"
] | null | null | null | """Initial Migration
Revision ID: f033e26ef177
Revises: 435148246c3d
Create Date: 2019-03-05 08:48:13.110813
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f033e26ef177'
down_revision = '435148246c3d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
# ### end Alembic commands ###
| 26.594595 | 65 | 0.674797 |
0d9add65f11590c371222fa054e05a0d2d59022f | 68 | py | Python | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/K/kelvin-atomic mass unit relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/K/kelvin-atomic mass unit relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/K/kelvin-atomic mass unit relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | constants.physical_constants["kelvin-atomic mass unit relationship"] | 68 | 68 | 0.867647 |
14ee62057d355e6571588805ea5c36b388aabd2f | 6,889 | py | Python | manim_engine/once_useful_constructs/combinatorics.py | nmamano/sanim | b4b8603bfb86678f7448364135292cde7b02d719 | [
"MIT"
] | 1 | 2021-07-05T08:59:57.000Z | 2021-07-05T08:59:57.000Z | manim_engine/once_useful_constructs/combinatorics.py | nmamano/sanim | b4b8603bfb86678f7448364135292cde7b02d719 | [
"MIT"
] | null | null | null | manim_engine/once_useful_constructs/combinatorics.py | nmamano/sanim | b4b8603bfb86678f7448364135292cde7b02d719 | [
"MIT"
] | null | null | null | from manim_engine.constants import *
from manim_engine.mobject.svg.tex_mobject import TexMobject
from manim_engine.mobject.types.vectorized_mobject import VMobject, VGroup
from manim_engine.mobject.numbers import Integer
from manim_engine.scene.scene import Scene
from manim_engine.utils.simple_functions import choose
DEFAULT_COUNT_NUM_OFFSET = (FRAME_X_RADIUS - 1, FRAME_Y_RADIUS - 1, 0)
DEFAULT_COUNT_RUN_TIME = 5.0
class CountingScene(Scene):
def count(self, items, item_type = "mobject", *args, **kwargs):
if item_type == "mobject":
self.count_mobjects(items, *args, **kwargs)
elif item_type == "region":
self.count_regions(items, *args, **kwargs)
else:
raise Exception("Unknown item_type, should be mobject or region")
return self
def count_mobjects(
self, mobjects, mode = "highlight",
color = "red",
display_numbers = True,
num_offset = DEFAULT_COUNT_NUM_OFFSET,
run_time = DEFAULT_COUNT_RUN_TIME):
"""
Note, leaves final number mobject as "number" attribute
mode can be "highlight", "show_creation" or "show", otherwise
a warning is given and nothing is animating during the count
"""
if len(mobjects) > 50: #TODO
raise Exception("I don't know if you should be counting \
too many mobjects...")
if len(mobjects) == 0:
raise Exception("Counting mobject list of length 0")
if mode not in ["highlight", "show_creation", "show"]:
raise Warning("Unknown mode")
frame_time = run_time / len(mobjects)
if mode == "highlight":
self.add(*mobjects)
for mob, num in zip(mobjects, it.count(1)):
if display_numbers:
num_mob = TexMobject(str(num))
num_mob.center().shift(num_offset)
self.add(num_mob)
if mode == "highlight":
original_color = mob.color
mob.set_color(color)
self.wait(frame_time)
mob.set_color(original_color)
if mode == "show_creation":
self.play(ShowCreation(mob, run_time = frame_time))
if mode == "show":
self.add(mob)
self.wait(frame_time)
if display_numbers:
self.remove(num_mob)
if display_numbers:
self.add(num_mob)
self.number = num_mob
return self
def count_regions(self, regions,
mode = "one_at_a_time",
num_offset = DEFAULT_COUNT_NUM_OFFSET,
run_time = DEFAULT_COUNT_RUN_TIME,
**unused_kwargsn):
if mode not in ["one_at_a_time", "show_all"]:
raise Warning("Unknown mode")
frame_time = run_time / (len(regions))
for region, count in zip(regions, it.count(1)):
num_mob = TexMobject(str(count))
num_mob.center().shift(num_offset)
self.add(num_mob)
self.set_color_region(region)
self.wait(frame_time)
if mode == "one_at_a_time":
self.reset_background()
self.remove(num_mob)
self.add(num_mob)
self.number = num_mob
return self
def combinationMobject(n,k):
return Integer(choose(n,k))
class GeneralizedPascalsTriangle(VMobject):
CONFIG = {
"nrows" : 7,
"height" : FRAME_HEIGHT - 1,
"width" : 1.5*FRAME_X_RADIUS,
"portion_to_fill" : 0.7,
"submob_class" : combinationMobject,
}
def generate_points(self):
self.cell_height = float(self.height) / self.nrows
self.cell_width = float(self.width) / self.nrows
self.bottom_left = (self.cell_width * self.nrows / 2.0)*LEFT + \
(self.cell_height * self.nrows / 2.0)*DOWN
num_to_num_mob = {}
self.coords_to_mobs = {}
self.coords = [
(n, k)
for n in range(self.nrows)
for k in range(n+1)
]
for n, k in self.coords:
num = choose(n, k)
center = self.coords_to_center(n, k)
num_mob = self.submob_class(n,k) #TexMobject(str(num))
scale_factor = min(
1,
self.portion_to_fill * self.cell_height / num_mob.get_height(),
self.portion_to_fill * self.cell_width / num_mob.get_width(),
)
num_mob.center().scale(scale_factor).shift(center)
if n not in self.coords_to_mobs:
self.coords_to_mobs[n] = {}
self.coords_to_mobs[n][k] = num_mob
self.add(*[
self.coords_to_mobs[n][k]
for n, k in self.coords
])
return self
def coords_to_center(self, n, k):
x_offset = self.cell_width * (k+self.nrows/2.0 - n/2.0)
y_offset = self.cell_height * (self.nrows - n)
return self.bottom_left + x_offset*RIGHT + y_offset*UP
def generate_n_choose_k_mobs(self):
self.coords_to_n_choose_k = {}
for n, k in self.coords:
nck_mob = TexMobject(r"{%d \choose %d}"%(n, k))
scale_factor = min(
1,
self.portion_to_fill * self.cell_height / nck_mob.get_height(),
self.portion_to_fill * self.cell_width / nck_mob.get_width(),
)
center = self.coords_to_mobs[n][k].get_center()
nck_mob.center().scale(scale_factor).shift(center)
if n not in self.coords_to_n_choose_k:
self.coords_to_n_choose_k[n] = {}
self.coords_to_n_choose_k[n][k] = nck_mob
return self
def fill_with_n_choose_k(self):
if not hasattr(self, "coords_to_n_choose_k"):
self.generate_n_choose_k_mobs()
self.submobjects = []
self.add(*[
self.coords_to_n_choose_k[n][k]
for n, k in self.coords
])
return self
def generate_sea_of_zeros(self):
zero = TexMobject("0")
self.sea_of_zeros = []
for n in range(self.nrows):
for a in range((self.nrows - n)/2 + 1):
for k in (n + a + 1, -a -1):
self.coords.append((n, k))
mob = zero.copy()
mob.shift(self.coords_to_center(n, k))
self.coords_to_mobs[n][k] = mob
self.add(mob)
return self
def get_lowest_row(self):
n = self.nrows - 1
lowest_row = VGroup(*[
self.coords_to_mobs[n][k]
for k in range(n+1)
])
return lowest_row
class PascalsTriangle(GeneralizedPascalsTriangle):
CONFIG = {
"submob_class" : combinationMobject,
}
| 35.328205 | 79 | 0.561765 |
951dbbf634f5dfcca99c8adbef42067cd82d7d19 | 4,479 | py | Python | doc/source/conf.py | openstack/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 50 | 2015-04-21T14:12:01.000Z | 2020-06-01T06:23:13.000Z | doc/source/conf.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 4 | 2015-09-25T04:42:38.000Z | 2019-10-12T19:37:46.000Z | doc/source/conf.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 25 | 2015-05-22T04:02:33.000Z | 2020-01-14T12:15:12.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinxcontrib.apidoc',
'oslo_config.sphinxext',
'oslo_config.sphinxconfiggen',
]
# Don't use default openstack theme, for readthedocs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
extensions.append('openstackdocstheme')
# openstackdocstheme options
repository_name = 'openstack/congress'
bug_project = 'congress'
bug_tag = ''
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../congress'
apidoc_output_dir = 'api'
apidoc_excluded_paths = [
'datalog/Python2/*',
'datalog/Python3/*',
'db/migration/alembic_migrations/*',
'server/*',
'tests/*',
'/dse2/disabled_test_control_bus.py']
apidoc_separate_modules = True
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'congress'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of glob-style patterns that should be excluded when looking for
# source files. They are matched against the source file names relative to the
# source directory, using slashes as directory separators on all platforms.
exclude_patterns = ['api/congress.db.migration.alembic_migrations.*',
'api/congress.server.*']
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['congress.']
autodoc_mock_imports = ['congress.datalog.Python2', 'congress.datalog.Python3',
'cloudfoundryclient', 'congress.dse',
'monascaclient']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
html_theme = 'openstackdocs'
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
latex_domain_indices = False
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
# NOTE: Specify toctree_only=True for a better document structure of
# the generated PDF file.
latex_documents = [
('index',
'doc-%s.tex' % project,
u'Congress Documentation',
u'OpenStack Foundation', 'manual', True),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for oslo_config.sphinxconfiggen ---------------------------------
config_generator_config_file = [
('../../etc/congress-config-generator.conf',
'_static/congress'),
('../../etc/congress-agent-config-generator.conf',
'_static/congress-agent')
]
[extensions]
todo_include_todos=True
| 31.321678 | 79 | 0.697924 |
b15d753b3a2691f8cedbbacd99255253111f951b | 677 | py | Python | shiny_sheep/shiny_sheep/users/models.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | shiny_sheep/shiny_sheep/users/models.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | shiny_sheep/shiny_sheep/users/models.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Shiny Sheep.
"""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
REQUIRED_FIELDS = [] # username is added automatically here?
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 29.434783 | 74 | 0.686854 |
8b0d3a6614e92dd722263a293b8ba3f460a0bfe3 | 35,348 | py | Python | scipy/integrate/_ode.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | 1 | 2018-10-04T15:34:14.000Z | 2018-10-04T15:34:14.000Z | scipy/integrate/_ode.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | scipy/integrate/_ode.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | # Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f,jac=None)
integrator = integrator.set_integrator(name,**params)
integrator = integrator.set_initial_value(y0,t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1,step=0,relax=0)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real valued system. It supports the real valued solvers (i.e not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz
# To wrap cvode to Python, one must write extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccesful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag
import vode as _vode
import _dop
import lsoda as _lsoda
#------------------------------------------------------------------------------
# User interface
#------------------------------------------------------------------------------
class ode(object):
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- rband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+lband, j] = jac[i,j].
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
Whether to use the jacobian
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- rband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+lband, j] = jac[i,j].
- with_jacobian : bool
Whether to use the jacobian
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
>>> return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
>>> return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf', with_jacobian=True)
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
>>> r.integrate(r.t+dt)
>>> print r.t, r.y
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params :
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is not None:
ode.__init__(self, self._wrap, self._wrap_jac)
else:
ode.__init__(self, self._wrap, None)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
self.jac_tmp[1::2, 1::2] = self.jac_tmp[::2, ::2] = real(jac)
self.jac_tmp[1::2, ::2] = imag(jac)
self.jac_tmp[::2, 1::2] = -self.jac_tmp[1::2, ::2]
return self.jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params :
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode should be used with ode, not zode")
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
if self.cjac is not None:
self.jac_tmp = zeros((y.size * 2, y.size * 2), 'float')
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
#------------------------------------------------------------------------------
# ODE integrators
#------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
supports_run_relax = None
supports_step = None
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
#XXX: __str__ method for getting visual state of the integrator
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=0,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
if self.ml == self.mu == 0:
miter = 3
else:
miter = 5
mf = 10 * self.meth + miter
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if miter in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, *args):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) +
args[5:]))
if istate < 0:
warnings.warn('vode: ' +
self.messages.get(istate,
'Unexpected istate=%s' % istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
if self.ml == self.mu == 0:
miter = 3
else:
miter = 5
mf = 10 * self.meth + miter
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if miter in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, *args):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) +
args[5:]))
if istate < 0:
warnings.warn('zvode: ' +
self.messages.get(istate, 'Unexpected istate=%s' % istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
messages = {1: 'computation successful',
2: 'comput. successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nmax is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, idid = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
if idid < 0:
warnings.warn(self.name + ': ' +
self.messages.get(idid, 'Unexpected idid=%s' % idid))
self.success = 0
return y, x
def _solout(self, *args):
# dummy solout function
pass
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=0,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f,jac,y0,t0,t1,f_params,jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
if istate < 0:
warnings.warn('lsoda: ' +
self.messages.get(istate,
'Unexpected istate=%s' % istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| 32.88186 | 84 | 0.538361 |
500eee08da6b630ecbe38700899309b202448910 | 1,679 | py | Python | marmee/abstract_marmee.py | francbartoli/marmee | f971adc84636629b9afbe79c917b74cac81fabf2 | [
"MIT"
] | 2 | 2018-05-02T08:11:44.000Z | 2020-08-09T23:30:04.000Z | marmee/abstract_marmee.py | francbartoli/marmee | f971adc84636629b9afbe79c917b74cac81fabf2 | [
"MIT"
] | 215 | 2018-03-06T03:28:23.000Z | 2021-02-08T20:44:08.000Z | marmee/abstract_marmee.py | francbartoli/marmee | f971adc84636629b9afbe79c917b74cac81fabf2 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Francesco Bartoli
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod, abstractproperty
"""Marmee module."""
class AbstractMarmee(object):
"""Give docstring for AbstractMarmee."""
__metaclass__ = ABCMeta
@abstractproperty
def name(self):
"""Give the name."""
raise NotImplementedError # abstract
@abstractmethod
def get_name(self):
"""Set the method that should give the name."""
raise NotImplementedError # abstract
@abstractmethod
def is_marmee(self):
"""Give if it is the implementation."""
raise NotImplementedError # abstract
@abstractproperty
def inputs(self):
"""Give the inputs of a calculation at certain point."""
raise NotImplementedError # abstract
@abstractmethod
def set_inputs(self):
"""Set the method that should place the inputs."""
raise NotImplementedError # abstract
@abstractproperty
def outputs(self):
"""Give the outputs of a calculation at certain point."""
raise NotImplementedError # abstract
@abstractmethod
def get_outputs(self):
"""Set the method that should give the outputs."""
raise NotImplementedError # abstract
@abstractproperty
def filters(self):
"""Give the filters of a calculation at certain point."""
raise NotImplementedError # abstract
@abstractmethod
def set_filters(self):
"""Set the method that should place the filters."""
raise NotImplementedError # abstract
| 27.080645 | 65 | 0.660512 |
d1ce970636df1655b0a937afcf76e8229e66f622 | 849 | py | Python | dic_data/pdd_pagedata/all/file_gen_sep_head.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | null | null | null | dic_data/pdd_pagedata/all/file_gen_sep_head.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | 6 | 2019-07-28T20:34:58.000Z | 2021-05-10T06:53:05.000Z | dic_data/pdd_pagedata/all/file_gen_sep_head.py | cabbagerice/word-chain | 5d2978df61a7136f502b1b601213e97454103e1e | [
"BSD-2-Clause"
] | null | null | null | import re
def fi_le(head,source):
pattern = re.compile(r'@%s'%head)
print(pattern)
data,tmp,flag=[],[],False
for l in source:
if re.match(pattern,l):
tmp.append(f"@{l[1:]}")
flag=True
elif (not re.match(r"^\n",l)) and flag:
tmp.append(l)
elif re.match(r"^\n",l) and flag:
tmp.append("\n")
data.append(tmp)
tmp,flag=[],False
'''File書き込み'''
with open(f'./{head}.txt',mode="w") as f:
for s in data:
for ss in s:
f.write(ss)
######
#main#
######
source=open("all_dic.txt")
fi_le("ゔ",source)
exit()
for i in [chr(i) for i in range(12353, 12436)]:
source=open("all_dic.txt")
if not i in ["ぁ","ぃ","ぅ","ぇ","ぉ","ゃ","ゅ","ょ","っ","ゎ"]:
fi_le(i,source)
| 26.53125 | 58 | 0.464075 |
aba3ddeb69eba08c0e71cbb8238d63d0c26a1af3 | 1,558 | py | Python | src/pages/migrations/0003_filmparameter.py | chairco/dj-realtime-visualization | 49cdee124bf7414fde5245f28329f65adb359599 | [
"0BSD"
] | null | null | null | src/pages/migrations/0003_filmparameter.py | chairco/dj-realtime-visualization | 49cdee124bf7414fde5245f28329f65adb359599 | [
"0BSD"
] | 7 | 2018-09-28T03:31:51.000Z | 2022-03-11T23:29:00.000Z | src/pages/migrations/0003_filmparameter.py | chairco/dj-realtime-visualization | 49cdee124bf7414fde5245f28329f65adb359599 | [
"0BSD"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-06 06:09
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20180705_0951'),
]
operations = [
migrations.CreateModel(
name='FilmParameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gap0', models.FloatField(blank=True, null=True, verbose_name='左邊|粉色')),
('gap1', models.FloatField(blank=True, null=True, verbose_name='粉色|橘色')),
('gap2', models.FloatField(blank=True, null=True, verbose_name='橘色|黃色')),
('gap3', models.FloatField(blank=True, null=True, verbose_name='黃色|綠色')),
('gap4', models.FloatField(blank=True, null=True, verbose_name='綠色|藍色')),
('gap5', models.FloatField(blank=True, null=True, verbose_name='藍色|右邊')),
('pink', models.FloatField(blank=True, null=True, verbose_name='粉色')),
('orange', models.FloatField(blank=True, null=True, verbose_name='橘色')),
('yellow', models.FloatField(blank=True, null=True, verbose_name='黃色')),
('green', models.FloatField(blank=True, null=True, verbose_name='綠色')),
('blue', models.FloatField(blank=True, null=True, verbose_name='藍色')),
('create_time', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 47.212121 | 114 | 0.598845 |
2e9e950cfe4f48b81020e5e4d44c9444ae501136 | 6,554 | py | Python | neo/bin/import_blocks.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | 1 | 2019-06-11T06:03:48.000Z | 2019-06-11T06:03:48.000Z | neo/bin/import_blocks.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | null | null | null | neo/bin/import_blocks.py | WhisperQFun/neo-python | 1790581bfb9c91e92814fe6624997f90c08f989f | [
"MIT"
] | 1 | 2019-06-16T04:29:49.000Z | 2019-06-16T04:29:49.000Z | #!/usr/bin/env python3
from neo.Core.Blockchain import Blockchain
from neo.Core.Block import Block
from neo.IO.MemoryStream import MemoryStream
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Implementations.Blockchains.LevelDB.DBPrefix import DBPrefix
from neo.Settings import settings
from neocore.IO.BinaryReader import BinaryReader
from neocore.IO.BinaryWriter import BinaryWriter
from neo.IO.MemoryStream import StreamManager, MemoryStream
import argparse
import os
import shutil
from tqdm import trange
from prompt_toolkit import prompt
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mainnet", action="store_true", default=False,
help="use MainNet instead of the default TestNet")
parser.add_argument("-c", "--config", action="store", help="Use a specific config file")
# Where to store stuff
parser.add_argument("--datadir", action="store",
help="Absolute path to use for database directories")
parser.add_argument("-i", "--input", help="Where the input file lives")
parser.add_argument("-t", "--totalblocks", help="Total blocks to import", type=int)
parser.add_argument("-l", "--logevents", help="Log Smart Contract Events", default=False, action="store_true")
parser.add_argument("-n", "--notifications", help="Persist Notifications to database", default=False, action="store_true")
parser.add_argument("-a", "--append", action="store_true", default=False, help="Append to current Block database")
args = parser.parse_args()
if args.mainnet and args.config:
print("Cannot use both --config and --mainnet parameters, please use only one.")
exit(1)
# Setting the datadir must come before setting the network, else the wrong path is checked at net setup.
if args.datadir:
settings.set_data_dir(args.datadir)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
if args.logevents:
settings.log_smart_contract_events = True
if not args.input:
raise Exception("Please specify an input path")
file_path = args.input
append = False
store_notifications = False
start_block = 0
if args.append:
append = True
if args.notifications:
store_notifications = True
header_hash_list = []
with open(file_path, 'rb') as file_input:
total_blocks_available = int.from_bytes(file_input.read(4), 'little')
if total_blocks_available == 0:
total_blocks_available = int.from_bytes(file_input.read(4), 'little')
total_blocks = total_blocks_available
if args.totalblocks and args.totalblocks < total_blocks and args.totalblocks > 0:
total_blocks = args.totalblocks
target_dir = os.path.join(settings.DATA_DIR_PATH, settings.LEVELDB_PATH)
notif_target_dir = os.path.join(settings.DATA_DIR_PATH, settings.NOTIFICATION_DB_PATH)
if append:
blockchain = LevelDBBlockchain(settings.chain_leveldb_path, skip_header_check=True)
Blockchain.RegisterBlockchain(blockchain)
start_block = Blockchain.Default().Height
print("Starting import at %s " % start_block)
else:
print("Will import %s of %s blocks to %s" % (total_blocks, total_blocks_available, target_dir))
print("This will overwrite any data currently in %s and %s.\nType 'confirm' to continue" % (target_dir, notif_target_dir))
confirm = prompt("[confirm]> ", is_password=False)
if not confirm == 'confirm':
print("Cancelled operation")
return False
try:
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
if os.path.exists(notif_target_dir):
shutil.rmtree(notif_target_dir)
except Exception as e:
print("Could not remove existing data %s " % e)
return False
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
Blockchain.RegisterBlockchain(blockchain)
chain = Blockchain.Default()
if store_notifications:
NotificationDB.instance().start()
stream = MemoryStream()
reader = BinaryReader(stream)
block = Block()
length_ba = bytearray(4)
for index in trange(total_blocks, desc='Importing Blocks', unit=' Block'):
# set stream data
file_input.readinto(length_ba)
block_len = int.from_bytes(length_ba, 'little')
reader.stream.write(file_input.read(block_len))
reader.stream.seek(0)
# get block
block.DeserializeForImport(reader)
header_hash_list.append(block.Hash.ToBytes())
# add
if block.Index > start_block:
chain.AddBlockDirectly(block, do_persist_complete=store_notifications)
# reset blockheader
block._header = None
block.__hash = None
# reset stream
reader.stream.Cleanup()
print("Wrote blocks. Now writing headers")
chain = Blockchain.Default()
# reset header hash list
chain._db.delete(DBPrefix.IX_HeaderHashList)
total = len(header_hash_list)
chain._header_index = header_hash_list
print("storing header hash list...")
while total - 2000 >= chain._stored_header_count:
ms = StreamManager.GetStream()
w = BinaryWriter(ms)
headers_to_write = chain._header_index[chain._stored_header_count:chain._stored_header_count + 2000]
w.Write2000256List(headers_to_write)
out = ms.ToArray()
StreamManager.ReleaseStream(ms)
with chain._db.write_batch() as wb:
wb.put(DBPrefix.IX_HeaderHashList + chain._stored_header_count.to_bytes(4, 'little'), out)
chain._stored_header_count += 2000
last_index = len(header_hash_list)
chain._db.put(DBPrefix.SYS_CurrentHeader, header_hash_list[-1] + last_index.to_bytes(4, 'little'))
print("Imported %s blocks to %s " % (total_blocks, target_dir))
if __name__ == "__main__":
main()
| 35.619565 | 134 | 0.668904 |
0db5a4b1cf91865bdf2e7e2c16bf0079606cea37 | 2,114 | py | Python | trucoGemSite/trucoGemSite/settings.py | emmanuel-santos/GEM | ee04c652a623550581f95579c80bc4f9d084c3cd | [
"MIT"
] | 1 | 2015-10-28T14:11:49.000Z | 2015-10-28T14:11:49.000Z | trucoGemSite/trucoGemSite/settings.py | emmanuel-santos/GEM | ee04c652a623550581f95579c80bc4f9d084c3cd | [
"MIT"
] | null | null | null | trucoGemSite/trucoGemSite/settings.py | emmanuel-santos/GEM | ee04c652a623550581f95579c80bc4f9d084c3cd | [
"MIT"
] | null | null | null | """
Django settings for trucoGemSite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f&7hqk&$ie74w2e7xjho#in6ak#ai=0w_yyh(ndwu2x66y*gtl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'truco',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'trucoGemSite.urls'
WSGI_APPLICATION = 'trucoGemSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es-ar'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| 24.298851 | 71 | 0.736518 |
5c7bef4373980eea1507a2a16d9068a020148737 | 5,244 | py | Python | Edge-Detection/canny_edge.py | HarshShah03325/Image_Processing | f15b7d8d7e2b055de11b1eb94925555852c5c901 | [
"MIT"
] | 6 | 2021-09-11T02:53:49.000Z | 2021-12-14T18:07:29.000Z | Edge-Detection/canny_edge.py | HarshShah03325/Image_Processing | f15b7d8d7e2b055de11b1eb94925555852c5c901 | [
"MIT"
] | null | null | null | Edge-Detection/canny_edge.py | HarshShah03325/Image_Processing | f15b7d8d7e2b055de11b1eb94925555852c5c901 | [
"MIT"
] | null | null | null | from PIL import Image
from math import sqrt
import numpy as np
def gaussian_blur(input_image):
kernel=[[1 / 256, 4 / 256, 6 / 256, 4 / 256, 1 / 256],
[4 / 256, 16 / 256, 24 / 256, 16 / 256, 4 / 256],
[6 / 256, 24 / 256, 36 / 256, 24 / 256, 6 / 256],
[4 / 256, 16 / 256, 24 / 256, 16 / 256, 4 / 256],
[1 / 256, 4 / 256, 6 / 256, 4 / 256, 1 / 256]]
ht = input_image.shape[0]
wid = input_image.shape[1]
channel = input_image.shape[2]
# Middle of the kernel
offset = len(kernel) // 2
# Create empty output array
output_image = np.empty((ht,wid,channel))
# Compute convolution between value and kernels
for x in range(offset, ht - offset):
for y in range(offset, wid - offset):
acc = [0] * channel
for a in range(len(kernel)):
for b in range(len(kernel)):
xn = x + a - offset
yn = y + b - offset
value = input_image[xn][yn]
for c in range(channel):
acc[c] += value[c] * kernel[a][b]
for c in range(channel):
output_image[x][y][c] = acc[c]
return output_image
def sobel_edge(input_image):
# Calculate pixel intensity as the average of red, green and blue colors.
intensity = [[sum(input_image[x, y]) / 3 for y in range(input_image.shape[1])] for x in range(input_image.shape[0])]
# Sobel kernels
kernelx = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
kernely = [[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]
# Create output image
outputx = np.empty((input_image.shape[0], input_image.shape[1]))
outputy = np.empty((input_image.shape[0], input_image.shape[1]))
outputx.fill(0)
outputy.fill(0)
# Compute convolution between intensity and kernels
for x in range(1, input_image.shape[0] - 1):
for y in range(1, input_image.shape[1] - 1):
for a in range(3):
for b in range(3):
xn = x + a - 1
yn = y + b - 1
outputx[x][y] += intensity[xn][yn] * kernelx[a][b]
outputy[x][y] += intensity[xn][yn] * kernely[a][b]
sobel=np.empty((outputx.shape[0],outputx.shape[1]))
sobel = np.hypot(outputx, outputy)
thetha = np.arctan2(outputy, outputx)
return(sobel,thetha)
def non_max_suppression(img, D):
M, N = img.shape
Z = np.zeros((M,N), dtype=np.int32)
angle = D * 180. / np.pi
angle[angle < 0] += 180
for i in range(1,M-1):
for j in range(1,N-1):
try:
q = 255
r = 255
#angle 0
if (0 <= angle[i,j] < 22.5) or (157.5 <= angle[i,j] <= 180):
q = img[i, j+1]
r = img[i, j-1]
#angle 45
elif (22.5 <= angle[i,j] < 67.5):
q = img[i+1, j-1]
r = img[i-1, j+1]
#angle 90
elif (67.5 <= angle[i,j] < 112.5):
q = img[i+1, j]
r = img[i-1, j]
#angle 135
elif (112.5 <= angle[i,j] < 157.5):
q = img[i-1, j-1]
r = img[i+1, j+1]
if (img[i,j] >= q) and (img[i,j] >= r):
Z[i,j] = img[i,j]
else:
Z[i,j] = 0
except IndexError as e:
pass
return Z
def threshold(img, lowThresholdRatio=0.05, highThresholdRatio=0.09):
highThreshold = img.max() * highThresholdRatio;
lowThreshold = highThreshold * lowThresholdRatio;
M, N = img.shape
res = np.zeros((M,N), dtype=np.int32)
weak = np.int32(25)
strong = np.int32(255)
strong_i, strong_j = np.where(img >= highThreshold)
zeros_i, zeros_j = np.where(img < lowThreshold)
weak_i, weak_j = np.where((img <= highThreshold) & (img >= lowThreshold))
res[strong_i, strong_j] = strong
res[weak_i, weak_j] = weak
return (res, weak, strong)
def hysteresis(img, weak, strong=255):
M, N = img.shape
for i in range(1, M-1):
for j in range(1, N-1):
if (img[i,j] == weak):
try:
if ((img[i+1, j-1] == strong) or (img[i+1, j] == strong) or (img[i+1, j+1] == strong)
or (img[i, j-1] == strong) or (img[i, j+1] == strong)
or (img[i-1, j-1] == strong) or (img[i-1, j] == strong) or (img[i-1, j+1] == strong)):
img[i, j] = strong
else:
img[i, j] = 0
except IndexError as e:
pass
return img
input_image = Image.open("edge-detection.png")
input_pixels = np.array(input_image)
sobel_output=sobel_edge(gaussian_blur(input_pixels))
non_max=non_max_suppression(sobel_output[0],sobel_output[1])
thresh=threshold(non_max)
canny_output=hysteresis(thresh[0],thresh[1],thresh[2])
canny=Image.fromarray(np.uint8(canny_output))
canny.save('canny_output.png')
| 35.432432 | 120 | 0.487414 |
384769a81083c687eeee744b2add7edef6f89fab | 847 | py | Python | setup.py | owlint/CookiesNoticeRemover | fe922637cc585f818b41e453847df366fcada2d0 | [
"Apache-2.0"
] | null | null | null | setup.py | owlint/CookiesNoticeRemover | fe922637cc585f818b41e453847df366fcada2d0 | [
"Apache-2.0"
] | null | null | null | setup.py | owlint/CookiesNoticeRemover | fe922637cc585f818b41e453847df366fcada2d0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
import removecookiesnotice
setup(
name="removecookiesnotice",
version=removecookiesnotice.__version__,
packages=find_packages(),
author="Laurent Evrard",
author_email="laurent@owlint.fr",
description="Tool to remove cookies notice in html pages",
long_description=open("README.md").read(),
install_requires=[],
url="https://github.com/owlint/CookiesNoticeRemover",
classifiers=[
"Programming Language :: Python",
"Development Status :: 1 - Planning",
"License :: OSI Approved",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.10",
"Topic :: Communications",
],
license="Apache License 2.0",
)
| 29.206897 | 62 | 0.656434 |
d8522f8ef1555498e3a9afbec82d931801f6e51d | 720 | py | Python | babelsubs/generators/dfxp.py | bendk/babelsubs | d2b503338781307b09e4ee40758d1c6c18976d31 | [
"BSD-3-Clause"
] | 1 | 2016-12-26T21:09:33.000Z | 2016-12-26T21:09:33.000Z | babelsubs/generators/dfxp.py | bendk/babelsubs | d2b503338781307b09e4ee40758d1c6c18976d31 | [
"BSD-3-Clause"
] | null | null | null | babelsubs/generators/dfxp.py | bendk/babelsubs | d2b503338781307b09e4ee40758d1c6c18976d31 | [
"BSD-3-Clause"
] | null | null | null | from babelsubs.generators.base import register, BaseGenerator
class DFXPGenerator(BaseGenerator):
"""
Since the internal storage is already in dfxp, the generator is just
a small shim to keep the public interface between all generators
regular.
"""
file_type = ['dfxp', 'xml' ]
def __init__(self, subtitle_set, line_delimiter=u'\n', language=None):
super(DFXPGenerator, self).__init__(subtitle_set, line_delimiter,
language)
def __unicode__(self):
return self.subtitle_set.to_xml()
@classmethod
def generate(cls, subtitle_set, language=None):
return unicode(cls(subtitle_set=subtitle_set, language=language))
register(DFXPGenerator)
| 28.8 | 74 | 0.706944 |
51df736c314aac79d0f2d101d3bbfbe399decdca | 7,010 | py | Python | tests/test_transactions.py | ixc/dps-pxpy | 27659b2a62022921fa9833b57916e7c985e63871 | [
"MIT"
] | 1 | 2017-10-05T03:58:26.000Z | 2017-10-05T03:58:26.000Z | tests/test_transactions.py | ixc/dps-pxpy | 27659b2a62022921fa9833b57916e7c985e63871 | [
"MIT"
] | 2 | 2017-04-20T02:40:17.000Z | 2018-08-29T06:00:28.000Z | tests/test_transactions.py | ixc/dps-pxpy | 27659b2a62022921fa9833b57916e7c985e63871 | [
"MIT"
] | 1 | 2018-08-29T05:37:34.000Z | 2018-08-29T05:37:34.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import decimal
from dps import transactions as txn
class MockTransaction(txn.BaseTransaction):
amount = txn.AmountField(required=True)
currency = txn.StringField(max_length=4, choices=txn.CURRENCY_CHOICES, required=True)
enable_avs_data = txn.BooleanField(default=False)
class MockSubTransaction(MockTransaction):
class Meta:
required = ["enable_avs_data"]
class FieldsTest(unittest.TestCase):
def test_choices(self):
class MockObject(object):
field = txn.StringField(choices=['choice1', 'choice2'])
o = MockObject()
o.field = 'choice1'
o.field = 'choice2'
with self.assertRaises(ValueError):
o.field = 'invalid choice'
o.field = None
def test_default(self):
class MockObject(object):
field = txn.StringField(default='initial')
o = MockObject()
self.assertEqual(o.field, 'initial')
o.field = 'newvalue'
self.assertEqual(o.field, 'newvalue')
del o.field
self.assertEqual(o.field, 'initial')
def test_instance_owner_isolation(self):
class MockObject(object):
field = txn.IntegerField()
a = MockObject()
b = MockObject()
a.field = 1
self.assertEqual(a.field, 1)
self.assertIsNone(b.field)
b.field = 2
self.assertEqual(a.field, 1)
self.assertEqual(b.field, 2)
def test_string_field(self):
class MockObject(object):
field = txn.StringField(max_length=5)
exp = txn.StringField(pattern=r'(0[1-9]|1[0-2])\d{2}')
o = MockObject()
o.field = 'short'
self.assertEqual(o.field, 'short')
with self.assertRaises(ValueError):
o.field = 1
with self.assertRaises(ValueError):
o.field = 'toolong'
o.exp = '0122'
self.assertEqual(o.exp, '0122')
with self.assertRaises(ValueError):
o.exp = '1322'
with self.assertRaises(ValueError):
o.exp = '0022'
def test_boolean_field(self):
class MockObject(object):
field = txn.BooleanField()
o = MockObject()
o.field = True
self.assertEqual(o.field, True)
self.assertEqual(str(o.field), '1')
o.field = False
self.assertEqual(o.field, False)
self.assertEqual(str(o.field), '0')
o.field = None
self.assertIsNone(o.field, None)
with self.assertRaises(ValueError):
o.field = 1
with self.assertRaises(ValueError):
o.field = 'nonbool'
def test_integer_field(self):
class MockObject(object):
field = txn.IntegerField()
o = MockObject()
o.field = 12345
self.assertEqual(o.field, 12345)
o.field = None
self.assertIsNone(o.field, None)
with self.assertRaises(ValueError):
o.field = 'nonint'
def test_amount_field(self):
import decimal
class MockObject(object):
field = txn.AmountField(decimal_context=decimal.Context(rounding=decimal.ROUND_DOWN))
o = MockObject()
o.field = decimal.Decimal('1.105')
self.assertEqual(o.field, decimal.Decimal('1.10'))
self.assertEqual(str(o.field), '1.10')
o.field = 1.105
self.assertEqual(o.field, decimal.Decimal('1.10'))
self.assertEqual(str(o.field), '1.10')
o.field = '1.105'
self.assertEqual(o.field, decimal.Decimal('1.10'))
self.assertEqual(str(o.field), '1.10')
o.field = '1'
self.assertEqual(o.field, decimal.Decimal('1.00'))
self.assertEqual(str(o.field), '1.00')
o.field = None
self.assertIsNone(o.field, None)
with self.assertRaises(ValueError):
o.field = 'invalid'
class TransactionTest(unittest.TestCase):
def test_init(self):
txn = MockTransaction(amount='10.123', currency='NZD')
self.assertEqual(txn.amount, decimal.Decimal('10.12'))
self.assertEqual(txn.currency, 'NZD')
self.assertEqual(txn.enable_avs_data, 0)
def test_invalid_values(self):
with self.assertRaises(ValueError):
MockTransaction(amount='10.123', currency='NZD', enable_avs_data='invalid')
def test_invalid_fields(self):
with self.assertRaises(ValueError):
MockTransaction(invalid="invalid")
def test_validate(self):
with self.assertRaises(ValueError):
txn = MockTransaction(amount='10.123')
txn.validate()
def test_is_valid(self):
txn = MockTransaction(amount='10.123')
self.assertFalse(txn.is_valid())
txn = MockTransaction(amount='10.123', currency='NZD')
self.assertTrue(txn.is_valid())
def test_iterable(self):
txn = MockTransaction(amount='10.123', currency='NZD')
self.assertDictEqual(dict(txn), {'amount': decimal.Decimal('10.12'), 'currency': 'NZD', 'enable_avs_data': False})
def test_transactions_subclasses_inherit_fields(self):
txn = MockSubTransaction(amount='10.123', currency='NZD', enable_avs_data=True)
self.assertEqual(txn.amount, decimal.Decimal('10.12'))
self.assertEqual(txn.currency, 'NZD')
self.assertEqual(txn.enable_avs_data, True)
def test_validate_with_meta_required(self):
with self.assertRaises(ValueError):
txn = MockSubTransaction(amount='10.123', currency='NZD', enable_avs_data=None)
txn.validate()
class DecoratorsTest(unittest.TestCase):
def setUp(self):
class Client(object):
@txn.accept_txn(MockTransaction)
def test(self, **kwargs):
return kwargs
self.client = Client()
def test_call_with_transaction(self):
self.assertDictEqual({"amount": decimal.Decimal("10.01"), "currency": "NZD", "enable_avs_data": False},
self.client.test(MockTransaction(amount=decimal.Decimal("10.01"), currency="NZD")))
def test_call_with_invalid_transaction(self):
class InvalidTransaction(txn.BaseTransaction):
pass
with self.assertRaises(ValueError):
self.client.test(InvalidTransaction())
def test_call_with_kwargs_matching_transaction(self):
self.assertDictEqual({"amount": decimal.Decimal("10.01"), "currency": "NZD", "enable_avs_data": False},
self.client.test(amount=decimal.Decimal("10.01"), currency="NZD", enable_avs_data=False))
def test_call_with_kwargs_not_matching_transaction(self):
with self.assertRaises(ValueError):
self.client.test(amount=decimal.Decimal("10.01"))
def test_call_with_invalid_args(self):
with self.assertRaises(ValueError):
self.client.test()
if __name__ == "__main__":
unittest.main()
| 29.087137 | 122 | 0.619829 |
05d87a983cee72b648895a2541485a669b4b87c7 | 2,426 | py | Python | tests/test_constants.py | eli88fine/nuclease-off-target | 2fc1fba942c0f50e51b87c2a596d1666c29a2489 | [
"MIT"
] | null | null | null | tests/test_constants.py | eli88fine/nuclease-off-target | 2fc1fba942c0f50e51b87c2a596d1666c29a2489 | [
"MIT"
] | 32 | 2020-10-07T13:05:40.000Z | 2022-03-31T21:04:59.000Z | tests/test_constants.py | eli88fine/nuclease-off-target | 2fc1fba942c0f50e51b87c2a596d1666c29a2489 | [
"MIT"
] | 1 | 2022-01-16T04:29:08.000Z | 2022-01-16T04:29:08.000Z | # -*- coding: utf-8 -*-
from nuclease_off_target import ALIGNMENT_GAP_CHARACTER
from nuclease_off_target import CAS_VARIETIES
from nuclease_off_target import SECONDS_BETWEEN_UCSC_REQUESTS
from nuclease_off_target import SEPARATION_BETWEEN_GUIDE_AND_PAM
from nuclease_off_target import VERTICAL_ALIGNMENT_DNA_BULGE_CHARACTER
from nuclease_off_target import VERTICAL_ALIGNMENT_MATCH_CHARACTER
from nuclease_off_target import VERTICAL_ALIGNMENT_MISMATCH_CHARACTER
from nuclease_off_target import VERTICAL_ALIGNMENT_RNA_BULGE_CHARACTER
def test_ucsc():
assert SECONDS_BETWEEN_UCSC_REQUESTS == 3
def test_alignment_display():
assert VERTICAL_ALIGNMENT_MATCH_CHARACTER == " "
assert VERTICAL_ALIGNMENT_MISMATCH_CHARACTER == "X"
assert VERTICAL_ALIGNMENT_DNA_BULGE_CHARACTER == "+"
assert VERTICAL_ALIGNMENT_RNA_BULGE_CHARACTER == "-"
assert ALIGNMENT_GAP_CHARACTER == "-"
assert SEPARATION_BETWEEN_GUIDE_AND_PAM == " "
def test_cas_varieties():
assert CAS_VARIETIES == {
"Sa": {
"PAM": "NNGRRT",
"cut_site_relative_to_pam": -3,
"mismatch-penalties-starting-from-PAM": {
0: 6,
1: 5,
2: 4,
3: 3,
21: 0.1,
20: 0.1,
19: 0.12,
18: 0.13,
17: 0.15,
16: 0.17,
15: 0.19,
14: 0.21,
13: 0.23,
12: 0.27,
11: 0.35,
10: 0.5,
9: 0.7,
8: 0.8,
7: 1.1,
6: 1.3,
5: 1.9,
4: 2.3,
},
},
"Sp": {
"PAM": "NGG",
"cut_site_relative_to_pam": -3,
"mismatch-penalties-starting-from-PAM": {
0: 6,
1: 5,
2: 4,
3: 3,
21: 0.1,
20: 0.1,
19: 0.12,
18: 0.13,
17: 0.15,
16: 0.17,
15: 0.19,
14: 0.21,
13: 0.23,
12: 0.27,
11: 0.35,
10: 0.5,
9: 0.7,
8: 0.8,
7: 1.1,
6: 1.3,
5: 1.9,
4: 2.3,
},
},
}
| 28.880952 | 70 | 0.468673 |
a8e5f97b3878c750a84c33078d8612b678f2f5ba | 1,716 | py | Python | globe/util/_user.py | T620/globe | 5033a9750387d169b757538764bdf4fd229b81ae | [
"MIT"
] | null | null | null | globe/util/_user.py | T620/globe | 5033a9750387d169b757538764bdf4fd229b81ae | [
"MIT"
] | 14 | 2018-04-06T16:19:38.000Z | 2018-04-09T18:59:08.000Z | globe/util/_user.py | T620/globe | 5033a9750387d169b757538764bdf4fd229b81ae | [
"MIT"
] | null | null | null | #used to handle user authentication, registration, and user's folder
from globe import app
from globe.models import User
import uuid
def get_id(username):
user = User.query.filter_by(username=unicode.title(username)).first()
return user.id
def password_hash_matches(userid, password):
storedPass = User.query.filter_by(id=userid).first()
if storedPass.password == password:
return True
else:
return False
def register(newUser):
from globe import app, db
import os, tinys3, string
from globe.util import id_gen, mail
userID = id_gen.user_id(5, string.digits)
username = id_gen.username(newUser['forename'], newUser['surname'])
passwordToken = uuid.uuid4().hex
confirmToken = uuid.uuid4().hex
url = "/static/user_uploads/profiles/placeholder/placeholder.jpg"
#url = '/static/user_uploads/50123/profile/placeholder.jpg'
stockImage = "http://" + os.environ['S3_ENDPOINT'] + "/" + os.environ['S3_BUCKET_NAME'] + url
print stockImage
newAccount = User(
id=userID,
email=newUser['email'],
username=username,
password=newUser['password'],
confirmationToken=confirmToken,
passwordToken=passwordToken,
forename=unicode.title(newUser['forename']),
surname=unicode.title(newUser['surname']),
city=newUser['city'],
biography="None",
verified="False",
photo=stockImage
)
db.session.add(newAccount)
db.session.commit()
def authorise(token, username):
user = User.query.filter_by(username=username).first_or_404()
if token == user.confirmationToken:
#verify the new account
print "[INFO]: tokens match. Tokens: %s" % token + ", " + user.confirmationToken
user.verified=True
db.session.add(user)
db.session.commit()
return True
else:
return False
| 23.506849 | 94 | 0.734848 |
d0a8b53e7dea9b6e09e03c0f5e0e98c3836a1867 | 914 | py | Python | Arrays/longest_increasing_subsequence.py | omk42/a | d996b4407460017321ac63ccc5ed92505d1d459b | [
"Apache-2.0"
] | null | null | null | Arrays/longest_increasing_subsequence.py | omk42/a | d996b4407460017321ac63ccc5ed92505d1d459b | [
"Apache-2.0"
] | null | null | null | Arrays/longest_increasing_subsequence.py | omk42/a | d996b4407460017321ac63ccc5ed92505d1d459b | [
"Apache-2.0"
] | null | null | null | #Longest increasing subsequence
# The Longest Increasing Subsequence (LIS) problem is to find the length of the longest subsequence of a given sequence such that all elements of the subsequence are sorted in increasing order. For example, the length of LIS for {10, 22, 9, 33, 21, 50, 41, 60, 80} is 6 and LIS is {10, 22, 33, 50, 60, 80}.
# https://www.geeksforgeeks.org/longest-increasing-subsequence-dp-3/
import math
def lis(list, start, memo):
if len(list) == 0:
return 0
elif list[0] <= start:
return lis(list[1:], start, memo)
else:
if list[0] in memo:
a = memo[list[0]]
else:
memo[list[0]] = 1+lis(list[1:],list[0], memo)
a = memo[list[0]]
return max(a, lis(list[1:], start, memo))
if __name__ == "__main__":
memo = {}
print (lis([100,122,10,22,9,33,21,50,41,60,80], -math.inf, memo))
print (memo) | 30.466667 | 306 | 0.611597 |
1fdd11c4b63bcb2f5f1f3301ab1699a49735b5cc | 223 | py | Python | torrentsearch/File.py | romanpitak/torrentsearch | ea4723ce40ea0a23fc4969038c01086adc237e8b | [
"MIT"
] | null | null | null | torrentsearch/File.py | romanpitak/torrentsearch | ea4723ce40ea0a23fc4969038c01086adc237e8b | [
"MIT"
] | 1 | 2015-09-01T08:30:06.000Z | 2015-09-01T08:30:06.000Z | torrentsearch/File.py | romanpitak/torrentsearch | ea4723ce40ea0a23fc4969038c01086adc237e8b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .utils import *
class File:
def __init__(self):
self.name = ''
self.size = 0.0
def __str__(self):
return '{} ({})'.format(self.name, human_sized(self.size))
| 15.928571 | 66 | 0.538117 |
685b8e599be2dba96abc7143b76882067052613f | 593 | py | Python | 168.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | 168.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | 168.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | """168 · Burst Balloons"""
class Solution:
"""
@param nums: A list of integer
@return: An integer, maximum coins
"""
def maxCoins(self, nums):
# write your code here
if not nums:
return 0
nums = [1, *nums, 1]
print(nums)
n = len(nums)
dp = [[0] * n for _ in range(n)]
for i in range(n - 3, -1, -1):
for j in range(i + 2, n):
for k in range(i + 1, j):
dp[i][j] = max(dp[i][j], dp[i][k] + dp[k][j] + nums[i] * nums[k] * nums[j])
return dp[0][n - 1]
| 26.954545 | 95 | 0.441821 |
921dc9486f53c5aff9db96e8023644dbfc0dc0c6 | 12,674 | py | Python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_identity_sql_control_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_identity_sql_control_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_identity_sql_control_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedIdentitySqlControlSettingsOperations(object):
"""WorkspaceManagedIdentitySqlControlSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedIdentitySqlControlSettingsModel"
"""Get Managed Identity Sql Control Settings.
Get Managed Identity Sql Control Settings.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedIdentitySqlControlSettingsModel, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedIdentitySqlControlSettingsModel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
managed_identity_sql_control_settings, # type: "_models.ManagedIdentitySqlControlSettingsModel"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ManagedIdentitySqlControlSettingsModel"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ManagedIdentitySqlControlSettingsModel"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(managed_identity_sql_control_settings, 'ManagedIdentitySqlControlSettingsModel')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
managed_identity_sql_control_settings, # type: "_models.ManagedIdentitySqlControlSettingsModel"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedIdentitySqlControlSettingsModel"]
"""Create or update Managed Identity Sql Control Settings.
Create or update Managed Identity Sql Control Settings.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param managed_identity_sql_control_settings: Managed Identity Sql Control Settings.
:type managed_identity_sql_control_settings: ~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedIdentitySqlControlSettingsModel or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedIdentitySqlControlSettingsModel"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
managed_identity_sql_control_settings=managed_identity_sql_control_settings,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
| 52.589212 | 231 | 0.696939 |
9d519e154166be0615fd1de3bb18a236dfeb67c4 | 2,905 | py | Python | METRIC/extract_wx_data.py | NASA-DEVELOP/METRIC | 88071a4368b821bdf2d1ce3fb349bb93a028bd51 | [
"NASA-1.3"
] | 42 | 2015-12-28T21:05:58.000Z | 2021-07-02T11:48:02.000Z | METRIC/extract_wx_data.py | Surf0618/METRIC | 88071a4368b821bdf2d1ce3fb349bb93a028bd51 | [
"NASA-1.3"
] | 10 | 2016-02-19T17:38:25.000Z | 2020-10-03T08:24:56.000Z | METRIC/extract_wx_data.py | Surf0618/METRIC | 88071a4368b821bdf2d1ce3fb349bb93a028bd51 | [
"NASA-1.3"
] | 34 | 2015-12-28T23:48:20.000Z | 2021-12-29T03:00:59.000Z | __author__ = "jwely"
import time_series
import textio
from datetime import datetime
def extract_wx_data(time_obj, wx_path):
"""
This function was writen to reenstate wx file parsing for tha greed upon NOAA
weather data format for any study area within the USA. This is THE function
that should be used for reading weather data, the others will not be supported.
It expects data in the format as retrieved from this URL:
[http://gis.ncdc.noaa.gov/map/viewer/#app=cdo&cfg=cdo&theme=hourly&layers=1&node=gi]
Please see the readme for more detailed instructions on data download.
Inputs:
time_obj A datetime object representing the image data aquisition datetime
wx_path filepath to the weather data. (hourly data)
Returns:
an array with specific ordering of climate variables.
"""
# format weather (daily and hourly) as a time series object
wx = time_series.time_series("wx_data")
tdo = textio.read_DS3505(wx_path, has_headers = True)
wx.from_tdo(tdo)
time_lable = "YR--MODAHRMN"
time_format = "%Y%m%d%H%M"
start_time = "200001010000"
wx.define_time(time_lable, time_format, start_time)
# bin the data into days pull out the one we want.
wx.make_subsets("%j", cust_center_time = time_obj)
day_name = time_obj.strftime("%Y-%m-%d")
wx.interrogate()
# if it cant find a subset in wx with the input dates name, wx data is for wrong time.
try:
wx_day = wx[day_name]
except:
raise Exception("wx data has no entries for date of landsat acquisition ({0})".format(time_obj))
# get min/max temperatures and convert to Celcius (statistical operations clean up NoData)
print("Centered statistics around {0}".format(wx_day.center_time))
Tstats = wx_day.column_stats("TEMP")
temp_C_min = (Tstats["TEMP_min_v"] - 32) * (5.0/9) # F --> C
temp_C_max = (Tstats["TEMP_max_v"] - 32) * (5.0/9) # F --> C
# get instantaneous variables at input @param time_obj by interpolating between nearest values
temp_C_mid = (wx_day.interp_col(time_obj, "TEMP") - 32) * (5.0/9) # F --> C
P_air = wx_day.interp_col(time_obj, "STP" ) # in millibars
wind_speed = wx_day.interp_col(time_obj, "SPD" ) * 0.51444 # knots --> meters / second
dewp_C = (wx_day.interp_col(time_obj, "DEWP") - 32) * (5.0/9) # F --> C
# this format is for legacy support, just an array of values, not a dict.
print("Temperature is {0}C".format(temp_C_mid))
return [temp_C_min, temp_C_max, temp_C_mid, P_air, wind_speed, dewp_C]
# testing
if __name__ == "__main__":
wx_filepath = r"E:\DEVELOP\Team_Projects\2015_Spring_METRIC\code_current_dev\input_weather\2013_July_CravenCountyAirport.txt"
time = datetime(2013,7,17, 11,43,24)
wx = extract_wx_data(time, wx_filepath)
print wx
| 38.223684 | 129 | 0.680207 |
911207b4312a1d21ddf92fea22f914cbcf6b911c | 680 | py | Python | cms/urls.py | 360youlun/django-cms | bc1240fd46de4c04f3b5402be99a81728a4a324c | [
"BSD-3-Clause"
] | 1 | 2019-04-15T10:28:46.000Z | 2019-04-15T10:28:46.000Z | cms/urls.py | vstoykov/django-cms | 344a62654c5785d8bc6d668e85132e1a79978aa3 | [
"BSD-3-Clause"
] | null | null | null | cms/urls.py | vstoykov/django-cms | 344a62654c5785d8bc6d668e85132e1a79978aa3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import url
from cms.apphook_pool import apphook_pool
from cms.appresolver import get_app_patterns
from cms.views import details
if settings.APPEND_SLASH:
regex = r'^(?P<slug>[0-9A-Za-z-_.//]+)/$'
else:
regex = r'^(?P<slug>[0-9A-Za-z-_.//]+)$'
if apphook_pool.get_apphooks():
# If there are some application urls, use special resolver,
# so we will have standard reverse support.
urlpatterns = get_app_patterns()
else:
urlpatterns = []
urlpatterns.extend([
url(regex, details, name='pages-details-by-slug'),
url(r'^$', details, {'slug': ''}, name='pages-root'),
])
| 26.153846 | 63 | 0.676471 |
be9989d062f2112916398c46155a5b9ae8b1a54b | 38,914 | py | Python | tests/loops/test_loops.py | adamviola/pytorch-lightning | 9826de21625b162dfef43f30864b5a6d9efab003 | [
"Apache-2.0"
] | 8 | 2020-01-15T06:58:15.000Z | 2021-11-05T11:36:36.000Z | tests/loops/test_loops.py | adamviola/pytorch-lightning | 9826de21625b162dfef43f30864b5a6d9efab003 | [
"Apache-2.0"
] | 3 | 2020-02-16T07:58:26.000Z | 2020-03-30T09:32:55.000Z | tests/loops/test_loops.py | adamviola/pytorch-lightning | 9826de21625b162dfef43f30864b5a6d9efab003 | [
"Apache-2.0"
] | 3 | 2020-02-15T21:35:52.000Z | 2021-02-06T09:13:49.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Iterator
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.loops import EvaluationLoop, Loop, TrainingBatchLoop, TrainingEpochLoop
from pytorch_lightning.trainer.progress import BaseProgress
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class NestedLoop(Loop):
def __init__(self):
super().__init__()
self.child_loop0 = None
self.child_loop1 = None
@property
def done(self) -> bool:
return False
def connect(self, child0, child1):
self.child_loop0 = child0
self.child_loop1 = child1
def reset(self) -> None:
pass
def advance(self, *args, **kwargs):
pass
@pytest.mark.parametrize("loop_name", ["fit_loop", "validate_loop", "test_loop", "predict_loop"])
def test_connect_loops_direct(loop_name):
"""Test Trainer referenes in loops on assignment."""
loop = NestedLoop()
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = loop.trainer
trainer = Trainer()
# trainer.loop_name = loop
setattr(trainer, loop_name, loop)
assert loop.trainer is trainer
def test_connect_loops_recursive():
"""Test Trainer references in a nested loop assigned to a Trainer."""
main_loop = NestedLoop()
child0 = NestedLoop()
child1 = NestedLoop()
main_loop.connect(child0, child1)
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.trainer
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.child_loop0.trainer
trainer = Trainer()
trainer.fit_loop = main_loop
assert child0.trainer is trainer
assert child1.trainer is trainer
def test_connect_subloops(tmpdir):
"""Test connecting individual subloops by calling `trainer.x.y.connect()`"""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
epoch_loop = trainer.fit_loop.epoch_loop
new_batch_loop = TrainingBatchLoop()
epoch_loop.connect(batch_loop=new_batch_loop)
assert epoch_loop.batch_loop is new_batch_loop
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = new_batch_loop.trainer
trainer.fit(model)
assert new_batch_loop.trainer is trainer
def test_replace_loops():
class TestLoop(TrainingEpochLoop):
def __init__(self, foo):
super().__init__()
trainer = Trainer(min_steps=123, max_steps=321)
with pytest.raises(
MisconfigurationException, match=r"FitLoop.replace\(TestLoop\)`.*`__init__`.*`TrainingEpochLoop`"
):
trainer.fit_loop.replace(epoch_loop=TestLoop)
class TestLoop(TrainingEpochLoop):
...
# test passing a loop where previous state should be connected
old_loop = trainer.fit_loop.epoch_loop
trainer.fit_loop.replace(epoch_loop=TestLoop)
new_loop = trainer.fit_loop.epoch_loop
assert isinstance(new_loop, TestLoop)
assert trainer.fit_loop.epoch_loop is new_loop
assert new_loop.min_steps == 123
assert new_loop.max_steps == 321
assert new_loop.batch_loop is old_loop.batch_loop
assert new_loop.val_loop is old_loop.val_loop
assert new_loop.trainer is trainer
class MyBatchLoop(TrainingBatchLoop):
...
class MyEvalLoop(EvaluationLoop):
...
# test passing more than one where one is an instance and the other a class
trainer.fit_loop.epoch_loop.replace(batch_loop=MyBatchLoop, val_loop=MyEvalLoop())
new_batch_loop = trainer.fit_loop.epoch_loop.batch_loop
new_val_loop = trainer.fit_loop.epoch_loop.val_loop
assert isinstance(new_batch_loop, MyBatchLoop)
assert isinstance(new_val_loop, MyEvalLoop)
class CustomException(Exception):
pass
def test_loop_restore():
class Simple(Loop):
def __init__(self, dataset: Iterator):
super().__init__()
self.iteration_count = 0
self.dataset = dataset
@property
def skip(self) -> bool:
return False
@property
def done(self) -> bool:
return self.iteration_count > len(self.dataset)
def reset(self) -> None:
self.iter_dataset = iter(self.dataset)
if self.restarting:
for _ in range(self.iteration_count):
next(self.iter_dataset)
self.iteration_count += 1
else:
self.outputs = []
def advance(self) -> None:
value = next(self.iter_dataset)
if self.iteration_count == 5:
raise CustomException
self.outputs.append(value)
def on_advance_end(self) -> None:
self.iteration_count += 1
def state_dict(self) -> Dict:
return {"iteration_count": self.iteration_count, "outputs": self.outputs}
def load_state_dict(self, state_dict: Dict) -> None:
self.iteration_count = state_dict["iteration_count"]
self.outputs = state_dict["outputs"]
trainer = Trainer()
data = range(10)
loop = Simple(data)
loop.trainer = trainer
try:
loop.run()
state_dict = {}
except CustomException:
state_dict = loop.state_dict()
loop = Simple(data)
loop.trainer = trainer
loop.load_state_dict(state_dict)
loop.restarting = True
loop.run()
assert not loop.restarting
assert loop.outputs == list(range(10))
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
def test_loop_hierarchy():
@dataclass
class SimpleProgress(BaseProgress):
increment: int = 0
class Simple(Loop):
def __init__(self, a):
super().__init__()
self.a = a
self.progress = SimpleProgress()
def advance(self, *args: Any, **kwargs: Any) -> None:
loop = getattr(self, "loop_child", None)
if not loop:
return
loop.run()
def on_advance_end(self):
self.progress.increment += 1
@property
def done(self) -> bool:
return self.progress.increment > 0
def reset(self) -> None:
...
def on_save_checkpoint(self) -> Dict:
return {"a": self.a}
def on_load_checkpoint(self, state_dict: Dict) -> None:
self.a = state_dict["a"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
# check the trainer reference is propagated
loop_parent.trainer = Trainer()
assert loop_child.trainer is loop_parent.trainer
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 0},
"loop_child.state_dict": {"a": 2},
"loop_child.progress": {"increment": 0},
}
state_dict["loop_child.state_dict"]["a"] = 3
# check restarting after `load_state_dict`
loop_parent.load_state_dict(state_dict)
assert loop_parent.restarting
loop_parent.run()
# check the new state after `run`
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 1},
"loop_child.state_dict": {"a": 3},
"loop_child.progress": {"increment": 1},
}
loop_parent_copy = deepcopy(loop_parent)
assert loop_parent_copy.state_dict() == loop_parent.state_dict()
assert loop_parent_copy.on_save_checkpoint() == state_dict["state_dict"]
assert loop_parent_copy.loop_child.on_save_checkpoint() == state_dict["loop_child.state_dict"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
loop_parent.load_state_dict(state_dict)
assert loop_parent.progress.increment == 1
assert loop_parent.loop_child.progress.increment == 1
del loop_parent.loop_child
state_dict = loop_parent.state_dict()
assert state_dict == {"state_dict": {"a": 1}, "progress": {"increment": 1}}
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)])
def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch):
n_batches = 5
n_epochs = 3
class ValidationModel(BoringModel):
def __init__(self):
super().__init__()
def validation_step(self, batch, batch_idx, dataloader_idx):
if self.current_epoch == stop_epoch and batch_idx == stop_batch and dataloader_idx == stop_dataloader:
raise CustomException
return super().validation_step(batch, batch_idx)
def val_dataloader(self):
return [super(ValidationModel, self).val_dataloader() for _ in range(n_dataloaders)]
model = ValidationModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=1,
limit_val_batches=n_batches,
num_sanity_val_steps=0,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
total_dataloader = stop_epoch * n_dataloaders + stop_dataloader
expected = {
"total": {"ready": total_dataloader + 1, "completed": total_dataloader},
"current": {"ready": stop_dataloader + 1, "completed": stop_dataloader},
}
assert checkpoint["epoch_loop.val_loop.dataloader_progress"] == expected
trainer.fit_loop.load_state_dict(checkpoint)
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_total_val_batch = stop_epoch * n_dataloaders * n_batches
be_total_val_batch = stop_dataloader * n_batches + stop_batch
total_val_batch = nbe_total_val_batch + be_total_val_batch
expected = {
"total": {
"ready": total_val_batch + 1,
"started": total_val_batch + 1,
"processed": total_val_batch,
"completed": total_val_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
}
assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("stop_optimizer", (1, 2))
def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer, n_optimizers, tmpdir):
stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0
n_epochs = 3
n_batches = 3
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
if self.trainer.current_epoch == stop_epoch and batch_idx == stop_batch and optimizer_idx == stop_optimizer:
raise CustomException
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=n_batches,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
enable_progress_bar=False,
logger=False,
enable_checkpointing=False,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
sch_progress = trainer.fit_loop.epoch_loop.scheduler_progress
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_batches_completed = stop_epoch * n_batches
be_batches_completed = stop_batch
be_batches_ready = stop_batch + 1
# lightning applies leftover accumulated gradients when the epoch ends
has_leftover_accumulation_batches = n_batches % accumulate_grad_batches != 0
# number of batches that will call `optimizer.step()` during non-breaking and breaking epochs
nbe_stepping_batches = nbe_batches_completed // accumulate_grad_batches
be_stepping_batches = be_batches_completed // accumulate_grad_batches
nbe_total_opt_steps = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_step = be_batches_ready % accumulate_grad_batches == 0 or has_leftover_accumulation_batches
be_total_opt_steps = be_stepping_batches * n_optimizers + does_last_be_batch_step * stop_optimizer
assert optim_progress.optimizer_steps == nbe_total_opt_steps + be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
has_opt_stepped_in_be = stop_batch + 1 >= accumulate_grad_batches
nbe_total_zero_grad = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_zero_grad = be_batches_completed % accumulate_grad_batches == 0
# `max` because the first batch always zero-grads
be_total_zero_grad = max(1, be_stepping_batches) * n_optimizers + stop_optimizer * does_last_be_batch_zero_grad
assert optim_progress.optimizer.zero_grad.total.completed == nbe_total_zero_grad + be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
nbe_sch_steps = stop_epoch
be_sch_steps = 0 # the current epoch did not complete
if n_optimizers > 1:
# assumes that the scheduler config is unchanged
# `* 1` because there is only one step-level scheduler
nbe_sch_steps = stop_epoch + nbe_stepping_batches + has_leftover_accumulation_batches * 1
# `0 +` for the epoch-level scheduler
be_sch_steps = 0 + be_stepping_batches
assert sch_progress.total.completed == nbe_sch_steps + be_sch_steps
assert sch_progress.current.completed == be_sch_steps
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
"current": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": nbe_batches_completed + be_batches_completed + 1,
"started": nbe_batches_completed + be_batches_completed + 1,
"processed": nbe_batches_completed + be_batches_completed,
"completed": nbe_batches_completed + be_batches_completed,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
},
"epoch_loop.scheduler_progress": {
"total": {"ready": nbe_sch_steps + be_sch_steps, "completed": nbe_sch_steps + be_sch_steps},
"current": {"ready": be_sch_steps, "completed": be_sch_steps},
},
"epoch_loop.batch_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.state_dict": ANY,
"epoch_loop.batch_loop.optimizer_loop.state_dict": {},
"epoch_loop.batch_loop.optimizer_loop.optim_progress": {
"optimizer_position": stop_optimizer,
"optimizer": {
"step": {
"total": {
"ready": nbe_total_opt_steps + be_total_opt_steps + has_opt_stepped_in_be,
"completed": nbe_total_opt_steps + be_total_opt_steps,
},
"current": {"ready": be_total_opt_steps + has_opt_stepped_in_be, "completed": be_total_opt_steps},
},
"zero_grad": {
"total": {
"ready": nbe_total_zero_grad + be_total_zero_grad,
"started": nbe_total_zero_grad + be_total_zero_grad,
"completed": nbe_total_zero_grad + be_total_zero_grad,
},
"current": {
"ready": be_total_zero_grad,
"started": be_total_zero_grad,
"completed": be_total_zero_grad,
},
},
},
},
"epoch_loop.val_loop.state_dict": ANY,
"epoch_loop.val_loop.dataloader_progress": ANY,
"epoch_loop.val_loop.epoch_loop.state_dict": ANY,
"epoch_loop.val_loop.epoch_loop.batch_progress": ANY,
"epoch_loop.val_loop._results": ANY,
"epoch_loop._results": ANY,
}
assert checkpoint["loops"]["fit_loop"] == expected
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"])
state_dict = trainer.fit_loop.state_dict()
# need to remove these elements for comparison; comparing with `fit_loop.state_dict()` would require the
# fit loop to have an iterator, which is only available during training
state_dict["epoch_loop.state_dict"]["dataloader_state_dict"] = ANY
checkpoint["loops"]["fit_loop"]["epoch_loop.state_dict"]["dataloader_state_dict"] = ANY
assert state_dict == checkpoint["loops"]["fit_loop"]
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"])
# test resetting manually, we expect all `ready` counters to be reset to `completed`
trainer.fit_loop.reset()
trainer.fit_loop.epoch_loop.reset()
trainer.fit_loop.epoch_loop.batch_loop.reset()
trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.reset()
trainer.fit_loop.epoch_loop.val_loop.reset()
trainer.fit_loop.epoch_loop.val_loop.epoch_loop.reset()
epoch_progress = trainer.fit_loop.epoch_progress
assert epoch_progress.current.ready == stop_epoch
assert epoch_progress.current.completed == stop_epoch
batch_progress = trainer.fit_loop.epoch_loop.batch_progress
assert batch_progress.current.ready == be_batches_completed
assert batch_progress.current.completed == be_batches_completed
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
assert optim_progress.optimizer.step.current.ready == be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
assert optim_progress.optimizer.zero_grad.current.ready == be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
state_dict = trainer.fit_loop.state_dict()
assert state_dict != checkpoint["loops"]["fit_loop"]
assert state_dict["epoch_progress"]["total"]["started"] == stop_epoch + 1
assert state_dict["epoch_progress"]["current"]["started"] == stop_epoch
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
def test_loop_state_on_complete_run(n_optimizers, tmpdir):
n_epochs = 3
n_batches = 3
accumulate_grad_batches = 1
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
def train_dataloader(self):
# override to test the `is_last_batch` value
return DataLoader(RandomDataset(32, n_batches))
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
enable_progress_bar=False,
logger=False,
)
trainer.fit(model)
assert trainer.num_training_batches == n_batches
ckpt_path = trainer.checkpoint_callback.best_model_path
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
n_sch_steps_total = n_epochs
n_sch_steps_current = 1
if n_optimizers > 1:
n_sch_steps_total = n_epochs + n_epochs * n_batches
n_sch_steps_current = n_batches + 1
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": n_epochs,
"started": n_epochs,
"processed": n_epochs,
# TODO: the following "-1" offset will be fixed by
# https://github.com/PyTorchLightning/pytorch-lightning/pull/8578
"completed": n_epochs - 1,
},
"current": {
"ready": n_epochs,
"started": n_epochs,
"processed": n_epochs,
# TODO: the following "-1" offset will be fixed by
# https://github.com/PyTorchLightning/pytorch-lightning/pull/8578
"completed": n_epochs - 1,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": n_epochs * n_batches,
"started": n_epochs * n_batches,
"processed": n_epochs * n_batches,
"completed": n_epochs * n_batches,
},
"current": {
"ready": n_batches,
"started": n_batches,
"processed": n_batches,
"completed": n_batches,
},
"is_last_batch": True,
},
"epoch_loop.scheduler_progress": {
"total": {"ready": n_sch_steps_total, "completed": n_sch_steps_total},
"current": {"ready": n_sch_steps_current, "completed": n_sch_steps_current},
},
"epoch_loop.batch_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.state_dict": ANY,
"epoch_loop.batch_loop.optimizer_loop.state_dict": {},
"epoch_loop.batch_loop.optimizer_loop.optim_progress": {
"optimizer_position": n_optimizers,
"optimizer": {
"step": {
"total": {
"ready": n_epochs * n_batches * n_optimizers,
"completed": n_epochs * n_batches * n_optimizers,
},
"current": {
"ready": n_batches * n_optimizers,
"completed": n_batches * n_optimizers,
},
},
"zero_grad": {
"total": {
"ready": n_epochs * n_batches * n_optimizers,
"started": n_epochs * n_batches * n_optimizers,
"completed": n_epochs * n_batches * n_optimizers,
},
"current": {
"ready": n_batches * n_optimizers,
"started": n_batches * n_optimizers,
"completed": n_batches * n_optimizers,
},
},
},
},
"epoch_loop.val_loop.state_dict": ANY,
"epoch_loop.val_loop.dataloader_progress": ANY,
"epoch_loop.val_loop.epoch_loop.state_dict": ANY,
"epoch_loop.val_loop.epoch_loop.batch_progress": ANY,
"epoch_loop.val_loop._results": ANY,
"epoch_loop._results": ANY,
}
assert checkpoint["loops"]["fit_loop"] == expected
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
def test_fit_loop_reset(tmpdir):
"""Test that the reset logic in fit- and epoch loop is aware of whether the loop is restarting from a completed
loop or from a mid-epoch checkpoint."""
# generate checkpoints at end of epoch and mid-epoch
model = BoringModel()
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
every_n_train_steps=2,
save_top_k=-1,
)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=4,
num_sanity_val_steps=0,
max_epochs=2,
callbacks=[checkpoint_callback],
logger=False,
enable_model_summary=False,
)
trainer.fit(model)
# reset state loaded from a checkpoint from mid-epoch
mid_epoch_ckpt = torch.load(str(tmpdir / "epoch=0-step=1.ckpt"))
fit_loop = trainer.fit_loop
epoch_loop = fit_loop.epoch_loop
optimizer_loop = epoch_loop.batch_loop.optimizer_loop
assert not fit_loop.restarting
assert not epoch_loop.restarting
assert not optimizer_loop.restarting
# we load exactly what was saved - no reset yet
fit_loop.load_state_dict(mid_epoch_ckpt["loops"]["fit_loop"])
# resetting from a mid-of-epoch checkpoint SHOULD NOT reset the current counters to 0
fit_loop.reset()
epoch_loop.reset()
optimizer_loop.reset()
assert fit_loop.restarting
assert fit_loop.epoch_progress.total.ready == 1
assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint was saved mid epoch
assert fit_loop.epoch_progress.current.ready == 0
assert fit_loop.epoch_progress.current.completed == 0
assert epoch_loop.restarting
assert epoch_loop.batch_progress.total.ready == 2
assert epoch_loop.batch_progress.total.processed == 2
assert epoch_loop.batch_progress.total.completed == 1 # the checkpoint was saved on train_batch_end
assert epoch_loop.batch_progress.current.ready == 1 # currents get set to the completed value
assert epoch_loop.batch_progress.current.processed == 1
assert epoch_loop.batch_progress.current.completed == 1
assert optimizer_loop.restarting
assert optimizer_loop.optim_progress.optimizer_position == 1
# reset state loaded from a checkpoint from the end of an epoch
end_of_epoch_ckpt = torch.load(str(tmpdir / "epoch=0-step=3.ckpt"))
fit_loop = trainer.fit_loop
epoch_loop = fit_loop.epoch_loop
fit_loop.restarting = False
epoch_loop.restarting = False
optimizer_loop.restarting = False
# we load exactly what was saved - no reset yet
fit_loop.load_state_dict(end_of_epoch_ckpt["loops"]["fit_loop"])
# resetting from a end-of-epoch checkpoint SHOULD reset the current counters to 0
fit_loop.reset()
epoch_loop.reset()
optimizer_loop.reset()
assert fit_loop.restarting
assert fit_loop.epoch_progress.total.ready == 1
assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint saves before the epoch completes
assert fit_loop.epoch_progress.current.ready == 0
assert fit_loop.epoch_progress.current.completed == 0
assert epoch_loop.restarting
assert epoch_loop.batch_progress.total.ready == 4
assert epoch_loop.batch_progress.total.processed == 4
assert epoch_loop.batch_progress.total.completed == 3 # the checkpoint was saved on train_batch_end
assert epoch_loop.batch_progress.current.ready == 3 # currents get set to the completed value
assert epoch_loop.batch_progress.current.processed == 3
assert epoch_loop.batch_progress.current.completed == 3
assert optimizer_loop.optim_progress.optimizer_position == 1
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize(
["train_datasets", "val_datasets"],
[([RandomDataset], [RandomDataset]), ([RandomDataset], [RandomDataset, RandomDataset])],
)
@pytest.mark.parametrize("val_check_interval", [0.5, 1.0])
def test_fit_can_fail_during_validation(train_datasets, val_datasets, val_check_interval, tmpdir):
size, n_batches = 2, 4
stop_batch = 1
n_val_dataloaders = len(val_datasets)
stop_dataloader = n_val_dataloaders - 1
class TestModel(LightningModule):
def __init__(self, should_fail):
super().__init__()
self.layer = torch.nn.Linear(size, 2)
self.should_fail = should_fail
def step(self, batch):
return sum(self.layer(b).sum() for b in batch)
def training_step(self, batch, batch_idx):
return self.step(batch)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
if self.should_fail and dataloader_idx == stop_dataloader and batch_idx == stop_batch:
raise CustomException
return self.step(batch)
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def train_dataloader(self):
return [DataLoader(cls(size, n_batches)) for cls in train_datasets]
def val_dataloader(self):
return [DataLoader(cls(size, n_batches)) for cls in val_datasets]
model = TestModel(False)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
enable_progress_bar=False,
)
trainer.fit(model)
ckpt_path = os.path.join(tmpdir, ".pl_auto_save.ckpt")
assert not os.path.exists(ckpt_path), "Shouldn't have failed"
state_dict = trainer.fit_loop.state_dict()
expected_global_step = trainer.global_step
assert state_dict["epoch_loop.batch_progress"] == {
"total": {"ready": n_batches, "started": n_batches, "processed": n_batches, "completed": n_batches},
"current": {"ready": n_batches, "started": n_batches, "processed": n_batches, "completed": n_batches},
"is_last_batch": True,
}
val_per_epoch = int(1 // val_check_interval)
assert state_dict["epoch_loop.val_loop.dataloader_progress"] == {
"total": {"ready": n_val_dataloaders * val_per_epoch, "completed": n_val_dataloaders * val_per_epoch},
"current": {"ready": n_val_dataloaders, "completed": n_val_dataloaders},
}
assert state_dict["epoch_loop.val_loop.epoch_loop.batch_progress"] == {
"total": {
"ready": n_val_dataloaders * val_per_epoch * n_batches,
"started": n_val_dataloaders * val_per_epoch * n_batches,
"processed": n_val_dataloaders * val_per_epoch * n_batches,
"completed": n_val_dataloaders * val_per_epoch * n_batches,
},
"current": {"ready": n_batches, "completed": n_batches, "started": n_batches, "processed": n_batches},
"is_last_batch": True,
}
model = TestModel(True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
enable_progress_bar=False,
)
with pytest.raises(CustomException):
# will stop during validation
trainer.fit(model)
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
per_val_train_batches = int(n_batches * val_check_interval)
assert checkpoint["epoch_loop.batch_progress"] == {
"total": {
"ready": per_val_train_batches,
"started": per_val_train_batches,
"processed": per_val_train_batches,
"completed": per_val_train_batches,
},
"current": {
"ready": per_val_train_batches,
"started": per_val_train_batches,
"processed": per_val_train_batches,
"completed": per_val_train_batches,
},
"is_last_batch": val_check_interval == 1,
}
val_batch_progress = "epoch_loop.val_loop.epoch_loop.batch_progress"
# "nb_": non-breaking
nb_total_val_batch = stop_dataloader * n_batches
assert checkpoint[val_batch_progress] == {
"total": {
"ready": nb_total_val_batch + stop_batch + 1,
"started": nb_total_val_batch + stop_batch + 1,
"processed": nb_total_val_batch + stop_batch,
"completed": nb_total_val_batch + stop_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
}
model = TestModel(False)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
enable_progress_bar=False,
)
trainer.fit(model, ckpt_path=ckpt_path)
# TODO: -1 because there's a bug where global step is off by one on reload
assert trainer.global_step - 1 == expected_global_step
state_dict_after_restart = trainer.fit_loop.state_dict()
# should get the same values as in the run that did not fail
# totals are increased by 1 (the failed batch which never completed)
expected = state_dict.copy()
# TODO: `is_last_batch` is not correct on reload, the next line should not be necessary
expected["epoch_loop.batch_progress"]["is_last_batch"] = val_check_interval == 1.0
assert state_dict_after_restart["epoch_loop.batch_progress"] == expected["epoch_loop.batch_progress"]
val_dl_progress = "epoch_loop.val_loop.dataloader_progress"
expected[val_dl_progress]["total"]["ready"] += 1
assert state_dict_after_restart[val_dl_progress] == expected[val_dl_progress]
expected[val_batch_progress]["total"]["ready"] += 1
expected[val_batch_progress]["total"]["started"] += 1
assert state_dict_after_restart[val_batch_progress] == expected[val_batch_progress]
@RunIf(min_torch="1.8.0")
@pytest.mark.parametrize("should_fail", [False, True])
@pytest.mark.parametrize("persistent_workers", [pytest.param(False, marks=RunIf(slow=True)), True])
def test_workers_are_shutdown(tmpdir, should_fail, persistent_workers):
# `num_workers == 1` uses `_MultiProcessingDataLoaderIter`
# `persistent_workers` makes sure `self._iterator` gets set on the `DataLoader` instance
class _TestMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):
def __init__(self, *args, dataloader, **kwargs):
super().__init__(*args, **kwargs)
self.dataloader = dataloader
def _shutdown_workers(self):
self.dataloader.count_shutdown_workers += 1
super()._shutdown_workers()
class TestDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count_shutdown_workers = 0
def _get_iterator(self):
if self.num_workers == 0:
return super()._get_iterator()
else:
self.check_worker_number_rationality()
return _TestMultiProcessingDataLoaderIter(self, dataloader=self)
train_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)
val_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)
class TestCallback(Callback):
def on_train_epoch_end(self, trainer, *_):
if trainer.current_epoch == 1:
raise CustomException
max_epochs = 3
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=max_epochs,
callbacks=TestCallback() if should_fail else None,
)
if should_fail:
with pytest.raises(CustomException):
trainer.fit(model, train_dataloader, val_dataloader)
else:
trainer.fit(model, train_dataloader, val_dataloader)
assert train_dataloader.count_shutdown_workers == 2 if should_fail else (2 if persistent_workers else max_epochs)
# on sanity checking end, the workers are being deleted too.
assert val_dataloader.count_shutdown_workers == 2 if persistent_workers else (3 if should_fail else max_epochs + 1)
assert train_dataloader._iterator is None
assert val_dataloader._iterator is None
| 38.490603 | 120 | 0.66223 |
9d2bfa21faea8198d7434edbb6c1febfca82ef78 | 1,547 | py | Python | TerminalCodeVersion.py | PranavEranki/Stocks | 9521f195a00b3216ed40283834890a9527138ca1 | [
"MIT"
] | null | null | null | TerminalCodeVersion.py | PranavEranki/Stocks | 9521f195a00b3216ed40283834890a9527138ca1 | [
"MIT"
] | null | null | null | TerminalCodeVersion.py | PranavEranki/Stocks | 9521f195a00b3216ed40283834890a9527138ca1 | [
"MIT"
] | null | null | null | import quandl
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
import warnings
import plotHelper
import printHelper
import randomRetrieval
warnings.filterwarnings("ignore")
def getData(name):
data = quandl.get("WIKI/" + name)
return data
def preprocess(data, forecast):
data = data[['Adj. Close']]
data['Prediction'] = data[['Adj. Close']].shift(-forecast)
X = np.array(data.drop(['Prediction'], 1))
X = preprocessing.scale(X)
X_forecast = X[-forecast:] # set X_forecast equal to last forecast days
X = X[:-forecast]
y = np.array(data['Prediction'])
y = y[:-forecast]
X_train = X
y_train = y.reshape((y.shape[0],1))
return X_forecast,X_train,y_train
def predict(name,forecast):
data = getData(name)
X_forecast,X_train,y_train = preprocess(data,forecast)
#regressor = LinearRegression()
regressor = Lasso()
regressor.fit(X_train,y_train)
forecast_prediction = regressor.predict(X_forecast)
forecast_prediction = forecast_prediction.reshape((forecast,1))
plotHelper.oldPlot(y_train,name)
plotHelper.newPlot(forecast_prediction,name)
howfar = plotHelper.gatherHowFar()
plotHelper.allPlot(y_train,forecast_prediction,howfar,name)
def main():
randomRetrieval.getKey()
name,forecast = randomRetrieval.getNameAndForecast()
printHelper.printWorking()
predict(name,forecast)
if __name__ == "__main__":
main()
| 23.089552 | 75 | 0.707822 |
171a74982a389b78288ff2696dcdf4847b14ee79 | 1,199 | py | Python | server/engines/autodet_engine/model_manager_bus.py | CanboYe/BusEdge | 2e53e1d1d82559fc3e9f0029b2f0faf4e356b210 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2021-08-17T14:14:28.000Z | 2022-02-02T02:09:33.000Z | server/engines/autodet_engine/model_manager_bus.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | server/engines/autodet_engine/model_manager_bus.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-09-01T16:18:29.000Z | 2021-09-01T16:18:29.000Z | # SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import glob
import os
import time
from detector_fbnet import AutoDetector
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--target-name",
default="trash_can",
help="Set target name for the Auto-Detectron pipeline",
)
args = parser.parse_args()
target_name = args.target_name
detector = AutoDetector(target_name)
task_dir = os.path.join("./autoDet_tasks/", target_name)
os.makedirs(task_dir, exist_ok=True)
manual_anno_dir = os.path.join(task_dir, "manual_anno")
os.makedirs(manual_anno_dir, exist_ok=True)
while True:
model_version = detector.model_version
file_list = glob.glob(os.path.join(manual_anno_dir, "*.json"))
for anno_file in file_list:
basename = os.path.basename(anno_file)
if basename == "annotations_{}.json".format(model_version + 1):
print("Now reading anno from {}".format(anno_file))
detector.train_svc(basename)
time.sleep(1)
if __name__ == "__main__":
main()
| 26.644444 | 75 | 0.662219 |
f7fc514a52d7fd959e3b80d9c3b97a0d2c8a549c | 3,204 | py | Python | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy import MetaData, Table
from cinder.i18n import _LE, _LI
# Get default values via config. The defaults will either
# come from the default values set in the quota option
# configuration or via cinder.conf if the user has configured
# default values for quotas there.
CONF = cfg.CONF
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
LOG = logging.getLogger(__name__)
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def upgrade(migrate_engine):
"""Add default quota class data into DB."""
meta = MetaData()
meta.bind = migrate_engine
quota_classes = Table('quota_classes', meta, autoload=True)
rows = quota_classes.count().\
where(quota_classes.c.class_name == 'default').execute().scalar()
# Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added.
if rows:
LOG.info(_LI("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values."))
return
try:
# Set default volumes
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'volumes',
'hard_limit': CONF.quota_volumes,
'deleted': False, })
# Set default snapshots
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'snapshots',
'hard_limit': CONF.quota_snapshots,
'deleted': False, })
# Set default gigabytes
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
LOG.info(_LI("Added default quota class data into the DB."))
except Exception:
LOG.error(_LE("Default quota class data not inserted into the DB."))
raise
def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass
| 36.409091 | 78 | 0.645443 |
100682e1e569dc035da05b799cd1f72d4632de7b | 22,339 | py | Python | lisa/energy_meter.py | bea-arm/lisa | 95b95b302bef76093e8eaad91f291ec2c4e085ec | [
"Apache-2.0"
] | null | null | null | lisa/energy_meter.py | bea-arm/lisa | 95b95b302bef76093e8eaad91f291ec2c4e085ec | [
"Apache-2.0"
] | null | null | null | lisa/energy_meter.py | bea-arm/lisa | 95b95b302bef76093e8eaad91f291ec2c4e085ec | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import json
import os
import os.path
import psutil
import time
import logging
import inspect
import abc
from collections import namedtuple
from collections.abc import Mapping
from subprocess import Popen, PIPE, STDOUT
import subprocess
from time import sleep
import numpy as np
import pandas as pd
import devlib
from lisa.utils import Loggable, get_subclasses, ArtifactPath, HideExekallID
from lisa.datautils import series_integrate
from lisa.conf import (
SimpleMultiSrcConf, KeyDesc, TopLevelKeyDesc, Configurable,
)
from lisa.generic import TypedList
from lisa.target import Target
# Default energy measurements for each board
EnergyReport = namedtuple('EnergyReport',
['channels', 'report_file', 'data_frame'])
class EnergyMeter(Loggable, Configurable):
"""
Abstract Base Class of energy meters.
"""
def __init__(self, target, res_dir=None):
self._target = target
res_dir = res_dir if res_dir else target.get_res_dir(
name='EnergyMeter-{}'.format(self.name),
symlink=False,
)
self._res_dir = res_dir
@classmethod
def from_conf(cls, target, conf, res_dir=None):
"""
Build an instance of :class:`EnergyMeter` from a
configuration object.
:param target: Target to use
:type target: lisa.target.Target
:param conf: Configuration object to use
:param res_dir: Result directory to use
:type res_dir: str or None
"""
# Select the right subclass according to the type of the configuration
# object we are given
for subcls in get_subclasses(cls) | {cls}:
try:
conf_cls = subcls.CONF_CLASS
except AttributeError:
continue
if isinstance(conf, conf_cls):
cls = subcls
break
cls.get_logger('{} energy meter configuration:\n{}'.format(cls.name, conf))
kwargs = cls.conf_to_init_kwargs(conf)
kwargs.update(
target=target,
res_dir=res_dir,
)
cls.check_init_param(**kwargs)
return cls(**kwargs)
@abc.abstractmethod
def name():
pass
@abc.abstractmethod
def sample(self):
"""
Get a sample from the energy meter
"""
pass
@abc.abstractmethod
def reset(self):
"""
Reset the energy meter
"""
pass
@abc.abstractmethod
def report(self):
"""
Get total energy consumption since last :meth:`reset`
"""
pass
class HWMonConf(SimpleMultiSrcConf, HideExekallID):
"""
Configuration class for :class:`HWMon`.
{generated_help}
"""
STRUCTURE = TopLevelKeyDesc('hwmon-conf', 'HWMon Energy Meter configuration', (
# TODO: find a better help and maybe a better type
KeyDesc('channel-map', 'Channels to use', [Mapping]),
))
class HWMon(EnergyMeter):
"""
HWMon energy meter
{configurable_params}
"""
CONF_CLASS = HWMonConf
name = 'hwmon'
def __init__(self, target, channel_map, res_dir=None):
super().__init__(target, res_dir)
logger = self.get_logger()
# Energy readings
self.readings = {}
if not self._target.is_module_available('hwmon'):
raise RuntimeError('HWMON devlib module not enabled')
# Initialize HWMON instrument
logger.info('Scanning for HWMON channels, may take some time...')
self._hwmon = devlib.HwmonInstrument(self._target)
# Decide which channels we'll collect data from.
# If the caller provided a channel_map, require that all the named
# channels exist.
# Otherwise, try using the big.LITTLE core names as channel names.
# If they don't match, just collect all available channels.
available_sites = [c.site for c in self._hwmon.get_channels('energy')]
self._channels = channel_map
if self._channels:
# If the user provides a channel_map then require it to be correct.
if not all(s in available_sites for s in list(self._channels.values())):
raise RuntimeError(
"Found sites {} but channel_map contains {}".format(
sorted(available_sites), sorted(self._channels.values())))
elif self._target.big_core:
bl_sites = [self._target.big_core.upper(),
self._target.little_core.upper()]
if all(s in available_sites for s in bl_sites):
logger.info('Using default big.LITTLE hwmon channels')
self._channels = dict(zip(['big', 'LITTLE'], bl_sites))
if not self._channels:
logger.info('Using all hwmon energy channels')
self._channels = {site: site for site in available_sites}
# Configure channels for energy measurements
channels = sorted(self._channels.values())
logger.debug('Enabling channels: {}'.format(channels))
self._hwmon.reset(kinds=['energy'], sites=channels)
# Logging enabled channels
logger.info('Channels selected for energy sampling: {}'.format(
', '.join(channel.label for channel in self._hwmon.active_channels)
))
def sample(self):
logger = self.get_logger()
samples = self._hwmon.take_measurement()
for s in samples:
site = s.channel.site
value = s.value
if site not in self.readings:
self.readings[site] = {
'last': value,
'delta': 0,
'total': 0
}
continue
self.readings[site]['delta'] = value - self.readings[site]['last']
self.readings[site]['last'] = value
self.readings[site]['total'] += self.readings[site]['delta']
logger.debug('SAMPLE: {}'.format(self.readings))
return self.readings
def reset(self):
self.sample()
for site in self.readings:
self.readings[site]['delta'] = 0
self.readings[site]['total'] = 0
self.get_logger().debug('RESET: {}'.format(self.readings))
def report(self, out_dir, out_file='energy.json'):
# Retrive energy consumption data
nrg = self.sample()
# Reformat data for output generation
clusters_nrg = {}
for channel, site in self._channels.items():
if site not in nrg:
raise RuntimeError('hwmon channel "{}" not available. '
'Selected channels: {}'.format(
channel, list(nrg.keys())))
nrg_total = nrg[site]['total']
self.get_logger().debug('Energy [{:>16}]: {:.6f}'.format(site, nrg_total))
clusters_nrg[channel] = nrg_total
# Dump data as JSON file
nrg_file = os.path.join(out_dir, out_file)
with open(nrg_file, 'w') as ofile:
json.dump(clusters_nrg, ofile, sort_keys=True, indent=4)
return EnergyReport(clusters_nrg, nrg_file, None)
class _DevlibContinuousEnergyMeter(EnergyMeter):
"""Common functionality for devlib Instruments in CONTINUOUS mode"""
def reset(self):
self._instrument.start()
def report(self, out_dir, out_energy='energy.json', out_samples='samples.csv'):
self._instrument.stop()
df = self._read_csv(out_dir, out_samples)
df = self._build_timeline(df)
if df.empty:
raise RuntimeError('No energy data collected')
channels_nrg = self._compute_energy(df)
# Dump data as JSON file
nrg_file = os.path.join(out_dir, out_energy)
with open(nrg_file, 'w') as ofile:
json.dump(channels_nrg, ofile, sort_keys=True, indent=4)
return EnergyReport(channels_nrg, nrg_file, df)
def _read_csv(self, out_dir, out_samples):
csv_path = os.path.join(out_dir, out_samples)
csv_data = self._instrument.get_data(csv_path)
with open(csv_path) as f:
# Each column in the CSV will be headed with 'SITE_measure'
# (e.g. 'BAT_power'). Convert that to a list of ('SITE', 'measure')
# tuples, then pass that as the `names` parameter to read_csv to get
# a nested column index. None of devlib's standard measurement types
# have '_' in the name so this use of rsplit should be fine.
exp_headers = [c.label for c in csv_data.channels]
headers = f.readline().strip().split(',')
if set(headers) != set(exp_headers):
raise ValueError(
'Unexpected headers in CSV from devlib instrument. '
'Expected {}, found {}'.format(sorted(headers),
sorted(exp_headers)))
columns = [tuple(h.rsplit('_', 1)) for h in headers]
# Passing `names` means read_csv doesn't expect to find headers in
# the CSV (i.e. expects every line to hold data). This works because
# we have already consumed the first line of `f`.
df = pd.read_csv(f, names=columns)
return df
def _build_timeline(self, df):
sample_period = 1. / self._instrument.sample_rate_hz
df.index = np.linspace(0, sample_period * len(df), num=len(df))
return df
def _compute_energy(self, df):
channels_nrg = {}
for site, measure in df:
if measure == 'power':
channels_nrg[site] = series_integrate(df[site]['power'], method='trapz')
return channels_nrg
class AEPConf(SimpleMultiSrcConf, HideExekallID):
"""
Configuration class for :class:`AEP`.
{generated_help}
"""
STRUCTURE = TopLevelKeyDesc('aep-conf', 'AEP Energy Meter configuration', (
KeyDesc('channel-map', 'Channels to use', [Mapping]),
KeyDesc('resistor-values', 'Resistor values', [TypedList[float]]),
KeyDesc('labels', 'List of labels', [TypedList[str]]),
KeyDesc('device-entry', 'TTY device', [TypedList[str]]),
))
class AEP(_DevlibContinuousEnergyMeter):
"""
Arm Energy Probe energy meter
{configurable_params}
"""
name = 'aep'
CONF_CLASS = AEPConf
def __init__(self, target, resistor_values, labels=None, device_entry='/dev/ttyACM0', res_dir=None):
super().__init__(target, res_dir)
logger = self.get_logger()
# Configure channels for energy measurements
self._instrument = devlib.EnergyProbeInstrument(
self._target,
resistor_values=resistor_values,
labels=labels,
device_entry=device_entry,
)
# Configure channels for energy measurements
logger.debug('Enabling channels')
self._instrument.reset()
# Logging enabled channels
logger.info('Channels selected for energy sampling: {}'.format(
self._instrument.active_channels
))
logger.debug('Results dir: {}'.format(self._res_dir))
class MonsoonConf(SimpleMultiSrcConf, HideExekallID):
"""
Configuration class for :class:`Monsoon`.
{generated_help}
"""
STRUCTURE = TopLevelKeyDesc('monsoon-conf', 'Monsoon Energy Meter configuration', (
KeyDesc('channel-map', 'Channels to use', [Mapping]),
KeyDesc('monsoon-bin', 'monsoon binary path', [str]),
KeyDesc('tty-device', 'TTY device to use', [str]),
))
class Monsoon(_DevlibContinuousEnergyMeter):
"""
Monsoon Solutions energy meter
{configurable_params}
"""
name = 'monsoon'
CONF_CLASS = MonsoonConf
def __init__(self, target, monsoon_bin=None, tty_device=None, res_dir=None):
super().__init__(target, res_dir)
self._instrument = devlib.MonsoonInstrument(self._target,
monsoon_bin=monsoon_bin, tty_device=tty_device)
self._instrument.reset()
_acme_install_instructions = '''
If you need to measure energy using an ACME EnergyProbe,
please do follow installation instructions available here:
https://github.com/ARM-software/lisa/wiki/Energy-Meters-Requirements#iiocapture---baylibre-acme-cape
Othwerwise, please select a different energy meter in your
configuration file.
'''
class ACMEConf(SimpleMultiSrcConf, HideExekallID):
"""
Configuration class for :class:`ACME`.
{generated_help}
"""
STRUCTURE = TopLevelKeyDesc('acme-conf', 'ACME Energy Meter configuration', (
KeyDesc('channel-map', 'Channels to use', [Mapping]),
KeyDesc('host', 'Hostname or IP address of the ACME board', [str]),
KeyDesc('iio-capture-bin', 'path to iio-capture binary', [str]),
))
class ACME(EnergyMeter):
"""
BayLibre's ACME board based EnergyMeter
{configurable_params}
"""
name = 'acme'
CONF_CLASS = ACMEConf
REPORT_DELAY_S = 2.0
"""
iio-capture returns an empty string if killed right after its invocation,
so we have to enforce a delay between reset() and report()
"""
def __init__(self, target,
channel_map={'CH0': 0},
host='baylibre-acme.local', iio_capture_bin='iio-capture',
res_dir=None):
super().__init__(target, res_dir)
logger = self.get_logger()
self._iiocapturebin = iio_capture_bin
self._hostname = host
# Make a copy to be sure to never modify the default value
self._channels = dict(channel_map)
self._iio = {}
logger.info('ACME configuration:')
logger.info(' binary: {}'.format(self._iiocapturebin))
logger.info(' device: {}'.format(self._hostname))
logger.info(' channels: {}'.format(', '.join(
self._str(channel) for channel in self._channels
)))
# Check if iio-capture binary is available
try:
p = subprocess.call([self._iiocapturebin, '-h'], stdout=PIPE, stderr=STDOUT)
except FileNotFoundError as e:
logger.error('iio-capture binary {} not available'.format(
self._iiocapturebin
))
logger.warning(_acme_install_instructions)
raise FileNotFoundError('Missing iio-capture binary') from e
def sample(self):
raise NotImplementedError('Not available for ACME')
def _iio_device(self, channel):
return 'iio:device{}'.format(self._channels[channel])
def _str(self, channel):
return '{} ({})'.format(channel, self._iio_device(channel))
def reset(self):
"""
Reset energy meter and start sampling from channels specified in the
target configuration.
"""
logger = self.get_logger()
# Terminate already running iio-capture instance (if any)
wait_for_termination = 0
for proc in psutil.process_iter():
if self._iiocapturebin not in proc.cmdline():
continue
for channel in self._channels:
if self._iio_device(channel) in proc.cmdline():
logger.debug('Killing previous iio-capture for {}'.format(
self._iio_device(channel)))
logger.debug(proc.cmdline())
proc.kill()
wait_for_termination = 2
# Wait for previous instances to be killed
sleep(wait_for_termination)
# Start iio-capture for all channels required
for channel in self._channels:
ch_id = self._channels[channel]
# Setup CSV file to collect samples for this channel
csv_file = ArtifactPath.join(self._res_dir, 'samples_{}.csv'.format(channel))
# Start a dedicated iio-capture instance for this channel
self._iio[ch_id] = Popen(['stdbuf', '-i0', '-o0', '-e0',
self._iiocapturebin, '-n',
self._hostname, '-o',
'-c', '-f',
str(csv_file),
self._iio_device(channel)],
stdout=PIPE, stderr=STDOUT,
universal_newlines=True)
# Wait some time before to check if there is any output
sleep(1)
# Check that all required channels have been started
for channel in self._channels:
ch_id = self._channels[channel]
self._iio[ch_id].poll()
if self._iio[ch_id].returncode:
logger.error('Failed to run {} for {}'.format(
self._iiocapturebin, self._str(channel)
))
logger.warning('Make sure there are no iio-capture processes connected to {} and device {}'.format(self._hostname, self._str(channel)))
out, _ = self._iio[ch_id].communicate()
logger.error('Output: {}'.format(out.strip()))
self._iio[ch_id] = None
raise RuntimeError('iio-capture connection error')
logger.debug('Started {} on {}...'.format(
self._iiocapturebin, self._str(channel)))
self.reset_time = time.monotonic()
def report(self, out_dir, out_energy='energy.json'):
"""
Stop iio-capture and collect sampled data.
:param out_dir: Output directory where to store results
:type out_dir: str
:param out_file: File name where to save energy data
:type out_file: str
"""
delta = time.monotonic() - self.reset_time
if delta < self.REPORT_DELAY_S:
sleep(self.REPORT_DELAY_S - delta)
logger = self.get_logger()
channels_nrg = {}
channels_stats = {}
for channel, ch_id in self._channels.items():
if self._iio[ch_id] is None:
continue
self._iio[ch_id].poll()
if self._iio[ch_id].returncode:
# returncode not None means that iio-capture has terminated
# already, so there must have been an error
out, _ = self._iio[ch_id].communicate()
logger.error('{} terminated for {}: {}'.format(
self._iiocapturebin, self._str(channel), out
))
self._iio[ch_id] = None
continue
# kill process and get return
self._iio[ch_id].terminate()
out, _ = self._iio[ch_id].communicate()
self._iio[ch_id].wait()
self._iio[ch_id] = None
# iio-capture return "energy=value", add a simple format check
if '=' not in out:
logger.error('Bad output format for {}: {}'.format(
self._str(channel), out
))
continue
else:
logger.debug('{}: {}'.format(self._str(channel), out))
# Build energy counter object
nrg = {}
for kv_pair in out.split():
key, val = kv_pair.partition('=')[::2]
nrg[key] = float(val)
channels_stats[channel] = nrg
logger.debug(self._str(channel))
logger.debug(nrg)
src = os.path.join(self._res_dir, 'samples_{}.csv'.format(channel))
shutil.move(src, out_dir)
# Add channel's energy to return results
channels_nrg['{}'.format(channel)] = nrg['energy']
# Dump energy data
nrg_file = os.path.join(out_dir, out_energy)
with open(nrg_file, 'w') as ofile:
json.dump(channels_nrg, ofile, sort_keys=True, indent=4)
# Dump energy stats
nrg_stats_file = os.path.splitext(out_energy)[0] + \
'_stats' + os.path.splitext(out_energy)[1]
nrg_stats_file = os.path.join(out_dir, nrg_stats_file)
with open(nrg_stats_file, 'w') as ofile:
json.dump(channels_stats, ofile, sort_keys=True, indent=4)
return EnergyReport(channels_nrg, nrg_file, None)
class Gem5EnergyMeterConf(SimpleMultiSrcConf, HideExekallID):
"""
Configuration class for :class:`Gem5EnergyMeter`.
{generated_help}
"""
STRUCTURE = TopLevelKeyDesc('gem5-energy-meter-conf', 'Gem5 Energy Meter configuration', (
KeyDesc('channel-map', 'Channels to use', [Mapping]),
))
class Gem5EnergyMeter(_DevlibContinuousEnergyMeter):
name = 'gem5'
CONF_CLASS = Gem5EnergyMeterConf
def __init__(self, target, channel_map, res_dir=None):
super().__init__(target, res_dir)
power_sites = list(channel_map.values())
self._instrument = devlib.Gem5PowerInstrument(self._target, power_sites)
def reset(self):
self._instrument.reset()
self._instrument.start()
def _build_timeline(self, df):
# Power measurements on gem5 are performed not only periodically but also
# spuriously on OPP changes. Let's use the time channel provided by the
# gem5 power instrument to build the timeline accordingly.
for site, measure in df:
if measure == 'time':
meas_dur = df[site]['time']
break
timeline = np.zeros(len(meas_dur))
# The time channel gives the elapsed time since previous measurement
for i in range(1, len(meas_dur)):
timeline[i] = meas_dur[i] + timeline[i - 1]
df.index = timeline
return df
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| 34.687888 | 151 | 0.60137 |
5a8b77cf5e793424d2c73d417ea5473e5b61876a | 3,594 | py | Python | StockAnalysisSystem/plugin/Collector/index_data_tushare_pro.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 138 | 2018-01-03T03:32:49.000Z | 2022-03-12T02:57:46.000Z | StockAnalysisSystem/plugin/Collector/index_data_tushare_pro.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 9 | 2018-01-01T03:16:24.000Z | 2021-05-27T09:57:24.000Z | StockAnalysisSystem/plugin/Collector/index_data_tushare_pro.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 50 | 2019-08-05T01:02:30.000Z | 2022-03-07T00:52:14.000Z | import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ------------------------------------------------------- Fields -------------------------------------------------------
FIELDS = {
'TradeData.Index.Daily': {
'ts_code': 'TS指数代码',
'trade_date': '交易日',
'close': '收盘点位',
'open': '开盘点位',
'high': '最高点位',
'low': '最低点位',
'pre_close': '昨日收盘点',
'change': '涨跌点',
'pct_chg': '涨跌幅', # (%)
'vol': '成交量', # (手)
'amount': '成交额', # (千元)
},
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'index_data_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# index_daily: https://tushare.pro/document/2?doc_id=95
def __fetch_index_data_daily(**kwargs) -> pd.DataFrame:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('trade_date')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
pro = ts.pro_api(TS_TOKEN)
time_iter = DateTimeIterator(since, until)
result = None
if is_slice_update(ts_code, since, until):
result = None
else:
while not time_iter.end():
# 8000 items per one time
sub_since, sub_until = time_iter.iter_years(25)
ts_since = sub_since.strftime('%Y%m%d')
ts_until = sub_until.strftime('%Y%m%d')
clock = Clock()
ts_delay('index_daily')
sub_result = pro.index_daily(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
result = pd.concat([result, sub_result], ignore_index=True)
check_execute_dump_flag(result, **kwargs)
if result is not None:
convert_ts_code_field(result)
convert_ts_date_field(result, 'trade_date')
# result['trade_date'] = pd.to_datetime(result['trade_date'])
# result['stock_identity'] = result['ts_code'].apply(ts_code_to_stock_identity)
return result
# ----------------------------------------------------------------------------------------------------------------------
def query(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
if uri in list(FIELDS.keys()):
return __fetch_index_data_daily(**kwargs)
else:
return None
def validate(**kwargs) -> bool:
nop(kwargs)
return True
def fields() -> dict:
return FIELDS
| 31.80531 | 120 | 0.46995 |
d837d738a0b2aced11dfee63ccc0883b35ddfcad | 5,428 | py | Python | tests/models/test_legacy.py | kyleheyne/mopidy | 3b1b0dd2e9052cadb7bd1a29e724498f9fba7bca | [
"Apache-2.0"
] | 2 | 2019-02-13T15:16:55.000Z | 2019-02-18T08:47:29.000Z | tests/models/test_legacy.py | kyleheyne/mopidy | 3b1b0dd2e9052cadb7bd1a29e724498f9fba7bca | [
"Apache-2.0"
] | 40 | 2019-02-13T09:33:00.000Z | 2019-02-19T13:21:12.000Z | tests/models/test_legacy.py | kyleheyne/mopidy | 3b1b0dd2e9052cadb7bd1a29e724498f9fba7bca | [
"Apache-2.0"
] | 1 | 2020-03-10T05:11:49.000Z | 2020-03-10T05:11:49.000Z | from __future__ import absolute_import, unicode_literals
import unittest
from mopidy.models import ImmutableObject
class Model(ImmutableObject):
uri = None
name = None
models = frozenset()
def __init__(self, *args, **kwargs):
self.__dict__['models'] = frozenset(kwargs.pop('models', None) or [])
super(Model, self).__init__(self, *args, **kwargs)
class SubModel(ImmutableObject):
uri = None
name = None
class GenericCopyTest(unittest.TestCase):
def compare(self, orig, other):
self.assertEqual(orig, other)
self.assertNotEqual(id(orig), id(other))
def test_copying_model(self):
model = Model()
self.compare(model, model.replace())
def test_copying_model_with_basic_values(self):
model = Model(name='foo', uri='bar')
other = model.replace(name='baz')
self.assertEqual('baz', other.name)
self.assertEqual('bar', other.uri)
def test_copying_model_with_missing_values(self):
model = Model(uri='bar')
other = model.replace(name='baz')
self.assertEqual('baz', other.name)
self.assertEqual('bar', other.uri)
def test_copying_model_with_private_internal_value(self):
model = Model(models=[SubModel(name=123)])
other = model.replace(models=[SubModel(name=345)])
self.assertIn(SubModel(name=345), other.models)
def test_copying_model_with_invalid_key(self):
with self.assertRaises(TypeError):
Model().replace(invalid_key=True)
def test_copying_model_to_remove(self):
model = Model(name='foo').replace(name=None)
self.assertEqual(model, Model())
class ModelTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
model = Model(uri=uri)
self.assertEqual(model.uri, uri)
with self.assertRaises(AttributeError):
model.uri = None
def test_name(self):
name = 'a name'
model = Model(name=name)
self.assertEqual(model.name, name)
with self.assertRaises(AttributeError):
model.name = None
def test_submodels(self):
models = [SubModel(name=123), SubModel(name=456)]
model = Model(models=models)
self.assertEqual(set(model.models), set(models))
with self.assertRaises(AttributeError):
model.models = None
def test_models_none(self):
self.assertEqual(set(), Model(models=None).models)
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Model(foo='baz')
def test_repr_without_models(self):
self.assertEqual(
"Model(name=u'name', uri=u'uri')",
repr(Model(uri='uri', name='name')))
def test_repr_with_models(self):
self.assertEqual(
"Model(models=[SubModel(name=123)], name=u'name', uri=u'uri')",
repr(Model(uri='uri', name='name', models=[SubModel(name=123)])))
def test_serialize_without_models(self):
self.assertDictEqual(
{'__model__': 'Model', 'uri': 'uri', 'name': 'name'},
Model(uri='uri', name='name').serialize())
def test_serialize_with_models(self):
submodel = SubModel(name=123)
self.assertDictEqual(
{'__model__': 'Model', 'uri': 'uri', 'name': 'name',
'models': [submodel.serialize()]},
Model(uri='uri', name='name', models=[submodel]).serialize())
def test_eq_uri(self):
model1 = Model(uri='uri1')
model2 = Model(uri='uri1')
self.assertEqual(model1, model2)
self.assertEqual(hash(model1), hash(model2))
def test_eq_name(self):
model1 = Model(name='name1')
model2 = Model(name='name1')
self.assertEqual(model1, model2)
self.assertEqual(hash(model1), hash(model2))
def test_eq_models(self):
models = [SubModel()]
model1 = Model(models=models)
model2 = Model(models=models)
self.assertEqual(model1, model2)
self.assertEqual(hash(model1), hash(model2))
def test_eq_models_order(self):
submodel1 = SubModel(name='name1')
submodel2 = SubModel(name='name2')
model1 = Model(models=[submodel1, submodel2])
model2 = Model(models=[submodel2, submodel1])
self.assertEqual(model1, model2)
self.assertEqual(hash(model1), hash(model2))
def test_eq_none(self):
self.assertNotEqual(Model(), None)
def test_eq_other(self):
self.assertNotEqual(Model(), 'other')
def test_ne_uri(self):
model1 = Model(uri='uri1')
model2 = Model(uri='uri2')
self.assertNotEqual(model1, model2)
self.assertNotEqual(hash(model1), hash(model2))
def test_ne_name(self):
model1 = Model(name='name1')
model2 = Model(name='name2')
self.assertNotEqual(model1, model2)
self.assertNotEqual(hash(model1), hash(model2))
def test_ne_models(self):
model1 = Model(models=[SubModel(name='name1')])
model2 = Model(models=[SubModel(name='name2')])
self.assertNotEqual(model1, model2)
self.assertNotEqual(hash(model1), hash(model2))
def test_ignores_values_with_default_value_none(self):
model1 = Model(name='name1')
model2 = Model(name='name1', uri=None)
self.assertEqual(model1, model2)
self.assertEqual(hash(model1), hash(model2))
| 32.89697 | 77 | 0.629698 |
f8c7b4f8b150365f75b15a702d4197ead8a6514d | 1,203 | py | Python | jacdac/light_bulb/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-15T21:30:36.000Z | 2022-02-15T21:30:36.000Z | jacdac/light_bulb/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | null | null | null | jacdac/light_bulb/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-08T19:32:45.000Z | 2022-02-08T19:32:45.000Z | # Autogenerated file. Do not edit.
from jacdac.bus import Bus, Client
from .constants import *
from typing import Optional
class LightBulbClient(Client):
"""
A light bulb controller.
Implements a client for the `Light bulb <https://microsoft.github.io/jacdac-docs/services/lightbulb>`_ service.
"""
def __init__(self, bus: Bus, role: str) -> None:
super().__init__(bus, JD_SERVICE_CLASS_LIGHT_BULB, JD_LIGHT_BULB_PACK_FORMATS, role)
@property
def brightness(self) -> Optional[float]:
"""
Indicates the brightness of the light bulb. Zero means completely off and 0xffff means completely on.
For non-dimmable lights, the value should be clamp to 0xffff for any non-zero value., _: /
"""
return self.register(JD_LIGHT_BULB_REG_BRIGHTNESS).float_value(100)
@brightness.setter
def brightness(self, value: float) -> None:
self.register(JD_LIGHT_BULB_REG_BRIGHTNESS).set_values(value / 100)
@property
def dimmable(self) -> Optional[bool]:
"""
(Optional) Indicates if the light supports dimming.,
"""
return self.register(JD_LIGHT_BULB_REG_DIMMABLE).bool_value()
| 30.846154 | 115 | 0.684123 |
f6504bcad6d9ae92a3dc424b60aef25053b8a442 | 18,682 | py | Python | python/ccxt/bit2c.py | sky1008/ccxt | dc094a17dddb2f5fd703901f0b0a08d3fa2618f4 | [
"MIT"
] | 1 | 2022-03-20T07:15:44.000Z | 2022-03-20T07:15:44.000Z | python/ccxt/bit2c.py | sky1008/ccxt | dc094a17dddb2f5fd703901f0b0a08d3fa2618f4 | [
"MIT"
] | null | null | null | python/ccxt/bit2c.py | sky1008/ccxt | dc094a17dddb2f5fd703901f0b0a08d3fa2618f4 | [
"MIT"
] | 2 | 2022-03-08T20:43:26.000Z | 2022-03-14T19:28:27.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidNonce
class bit2c(Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': ['IL'], # Israel
'rateLimit': 3000,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'referral': 'https://bit2c.co.il/Aff/63bfed10-e359-420c-ab5a-ad368dab0baf',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
'Exchanges/{pair}/lasttrades',
],
},
'private': {
'post': [
'Merchant/CreateCheckout',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/AddCoinFundsRequest',
'Order/AddStopOrder',
'Payment/GetMyId',
'Payment/Send',
'Payment/Pay',
],
'get': [
'Account/Balance',
'Account/Balance/v2',
'Order/MyOrders',
'Order/GetById',
'Order/AccountHistory',
'Order/OrderHistory',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS', 'baseId': 'Btc', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'ETH/NIS': {'id': 'EthNis', 'symbol': 'ETH/NIS', 'base': 'ETH', 'quote': 'NIS', 'baseId': 'Eth', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'BCH/NIS': {'id': 'BchabcNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS', 'baseId': 'Bchabc', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS', 'baseId': 'Ltc', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'ETC/NIS': {'id': 'EtcNis', 'symbol': 'ETC/NIS', 'base': 'ETC', 'quote': 'NIS', 'baseId': 'Etc', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS', 'baseId': 'Btg', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'BSV/NIS': {'id': 'BchsvNis', 'symbol': 'BSV/NIS', 'base': 'BSV', 'quote': 'NIS', 'baseId': 'Bchsv', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
'GRIN/NIS': {'id': 'GrinNis', 'symbol': 'GRIN/NIS', 'base': 'GRIN', 'quote': 'NIS', 'baseId': 'Grin', 'quoteId': 'Nis', 'type': 'spot', 'spot': True},
},
'fees': {
'trading': {
'maker': self.parse_number('0.005'),
'taker': self.parse_number('0.005'),
},
},
'options': {
'fetchTradesMethod': 'public_get_exchanges_pair_trades',
},
'exceptions': {
'exact': {
'Please provide valid APIkey': AuthenticationError, # {"error" : "Please provide valid APIkey"}
},
'broad': {
# {"error": "Please provide valid nonce in Request Nonce(1598218490) is not bigger than last nonce(1598218490)."}
# {"error": "Please provide valid nonce in Request UInt64.TryParse failed for nonce :"}
'Please provide valid nonce': InvalidNonce,
'please approve new terms of use on site': PermissionDenied, # {"error" : "please approve new terms of use on site."}
},
},
})
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currency = self.currency(code)
uppercase = currency['id'].upper()
if uppercase in response:
account['free'] = self.safe_string(response, 'AVAILABLE_' + uppercase)
account['total'] = self.safe_string(response, uppercase)
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccountBalanceV2(params)
#
# {
# "AVAILABLE_NIS": 0.0,
# "NIS": 0.0,
# "LOCKED_NIS": 0.0,
# "AVAILABLE_BTC": 0.0,
# "BTC": 0.0,
# "LOCKED_BTC": 0.0,
# "AVAILABLE_ETH": 0.0,
# "ETH": 0.0,
# "LOCKED_ETH": 0.0,
# "AVAILABLE_BCHSV": 0.0,
# "BCHSV": 0.0,
# "LOCKED_BCHSV": 0.0,
# "AVAILABLE_BCHABC": 0.0,
# "BCHABC": 0.0,
# "LOCKED_BCHABC": 0.0,
# "AVAILABLE_LTC": 0.0,
# "LTC": 0.0,
# "LOCKED_LTC": 0.0,
# "AVAILABLE_ETC": 0.0,
# "ETC": 0.0,
# "LOCKED_ETC": 0.0,
# "AVAILABLE_BTG": 0.0,
# "BTG": 0.0,
# "LOCKED_BTG": 0.0,
# "AVAILABLE_GRIN": 0.0,
# "GRIN": 0.0,
# "LOCKED_GRIN": 0.0,
# "Fees": {
# "BtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EthNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchabcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BtgNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcBtc": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchsvNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "GrinNis": {"FeeMaker": 1.0, "FeeTaker": 1.0}
# }
# }
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
orderbook = self.publicGetExchangesPairOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook, symbol)
def parse_ticker(self, ticker, market=None):
symbol = self.safe_symbol(None, market)
timestamp = self.milliseconds()
averagePrice = self.safe_string(ticker, 'av')
baseVolume = self.safe_string(ticker, 'a')
last = self.safe_string(ticker, 'll')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'h'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'l'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetExchangesPairTicker(self.extend(request, params))
return self.parse_ticker(response, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = self.options['fetchTradesMethod']
request = {
'pair': market['id'],
}
if since is not None:
request['date'] = int(since)
if limit is not None:
request['limit'] = limit # max 100000
response = getattr(self, method)(self.extend(request, params))
if isinstance(response, str):
raise ExchangeError(response)
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
method = 'privatePostOrderAddOrder'
request = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
request['Price'] = price
request['Total'] = amount * price
request['IsBid'] = (side == 'buy')
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': response['NewOrder']['id'],
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return self.privatePostOrderCancelOrder(self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.privateGetOrderMyOrders(self.extend(request, params))
orders = self.safe_value(response, market['id'], {})
asks = self.safe_value(orders, 'ask', [])
bids = self.safe_value(orders, 'bid', [])
return self.parse_orders(self.array_concat(asks, bids), market, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'created')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
market = self.safe_market(None, market)
side = self.safe_value(order, 'type')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
id = self.safe_string(order, 'id')
status = self.safe_string(order, 'status')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': market['symbol'],
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'info': order,
'average': None,
}, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if limit is not None:
request['take'] = limit
request['take'] = limit
if since is not None:
request['toTime'] = self.yyyymmdd(self.milliseconds(), '.')
request['fromTime'] = self.yyyymmdd(since, '.')
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privateGetOrderOrderHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = None
id = None
price = None
amount = None
orderId = None
fee = None
side = None
reference = self.safe_string(trade, 'reference')
if reference is not None:
timestamp = self.safe_timestamp(trade, 'ticks')
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'firstAmount')
reference_parts = reference.split('|') # reference contains 'pair|orderId|tradeId'
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId in self.markets_by_id[marketId]:
market = self.markets_by_id[marketId]
elif reference_parts[0] in self.markets_by_id:
market = self.markets_by_id[reference_parts[0]]
orderId = reference_parts[1]
id = reference_parts[2]
side = self.safe_integer(trade, 'action')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
feeCost = self.safe_string(trade, 'feeAmount')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': 'NIS',
}
else:
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'amount')
side = self.safe_value(trade, 'isBid')
if side is not None:
if side:
side = 'buy'
else:
side = 'sell'
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
}, market)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': nonce,
}, params)
auth = self.urlencode(query)
if method == 'GET':
if query:
url += '?' + auth
else:
body = auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"error" : "please approve new terms of use on site."}
# {"error": "Please provide valid nonce in Request Nonce(1598218490) is not bigger than last nonce(1598218490)."}
#
error = self.safe_string(response, 'error')
if error is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback) # unknown message
| 41.059341 | 167 | 0.488063 |
06da4846ccd52eaa3b96486cecb589fbdd58c436 | 2,666 | py | Python | daps/test/test_datasets.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 28 | 2017-03-19T12:02:22.000Z | 2021-07-08T13:49:41.000Z | daps/test/test_datasets.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 2 | 2018-05-07T07:43:15.000Z | 2018-12-14T16:06:48.000Z | daps/test/test_datasets.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 7 | 2017-03-19T11:51:21.000Z | 2020-01-07T11:17:48.000Z | import unittest
import nose.tools as nt
import pandas as pd
from daps.datasets import Dataset, DatasetBase, ActivityNet, Thumos14
def test_Dataset():
for i in ['thumos14', 'activitynet']:
ds = Dataset(i)
nt.assert_is_instance(ds.wrapped_dataset, DatasetBase)
# Assert main methods
nt.assert_true(hasattr(ds.wrapped_dataset, 'segments_info'))
nt.assert_true(hasattr(ds.wrapped_dataset, 'video_info'))
def test_DatasetBase():
ds = DatasetBase()
nt.raises(NotImplementedError, ds.segments_info)
nt.raises(NotImplementedError, ds.video_info)
class test_ActivityNet(unittest.TestCase):
def setUp(self):
self.assertRaises(IOError, ActivityNet, 'nonexistent')
self.anet = ActivityNet()
def test_video_info(self):
df_train = self.anet.video_info('train')
df_val = self.anet.video_info('val')
df_test = self.anet.video_info('test')
self.assertEqual(4819, df_train.shape[0])
self.assertEqual(2383, df_val.shape[0])
self.assertEqual(2480, df_test.shape[0])
def test_segments_info(self):
df_train = self.anet.segments_info('train')
df_val = self.anet.segments_info('val')
self.assertEqual(7151, df_train.shape[0])
self.assertEqual(3582, df_val.shape[0])
def test_index_from_action_name(self):
action = 'Long jump'
index = 80
self.assertEqual(index, self.anet.index_from_action_name(action))
class test_Thumos14(unittest.TestCase):
def setUp(self):
self.assertRaises(IOError, Thumos14, 'nonexistent')
self.thumos = Thumos14()
def test_annotation_files(self):
# Dummy test to verify correct number of files. An exhaustive test is
# welcome
self.assertEqual(21, len(self.thumos.annotation_files()))
self.assertEqual(21, len(self.thumos.annotation_files('test')))
def test_index_from_filename(self):
actions = ['SoccerPenalty_val.txt', 'b/Ambiguous_val.txt']
idx = [16, -1]
for i, v in enumerate(actions):
self.assertEqual(idx[i], self.thumos.index_from_filename(v))
def test_segments_info(self):
for i in ['val', 'test']:
result = self.thumos.segments_info(i)
self.assertTrue(isinstance(result, pd.DataFrame))
self.assertEqual(len(self.thumos.fields_segment), result.shape[1])
def test_video_info(self):
for i in ['val', 'test']:
result = self.thumos.video_info(i)
self.assertTrue(isinstance(result, pd.DataFrame))
self.assertEqual(len(self.thumos.fields_video), result.shape[1])
| 34.623377 | 78 | 0.666917 |
c406ea88672b70b4c306a76d8a96bb253354b8de | 6,296 | py | Python | ZotCollectionNotes.py | dkiesow/Zotero-Workflows | e4e727a59a8025691cd4ed74a2d19cbc34aa351c | [
"MIT"
] | 2 | 2021-01-27T19:17:09.000Z | 2022-02-26T22:27:40.000Z | ZotCollectionNotes.py | dkiesow/Zotero-Workflows | e4e727a59a8025691cd4ed74a2d19cbc34aa351c | [
"MIT"
] | 3 | 2020-12-13T15:13:28.000Z | 2020-12-31T02:23:24.000Z | ZotCollectionNotes.py | dkiesow/Zotero-Workflows | e4e727a59a8025691cd4ed74a2d19cbc34aa351c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import config as cfg
from pyzotero import zotero
import datetime
import io
import sys
import re
userID = cfg.zotCollectionNotes["userID"]
secretKey = cfg.zotCollectionNotes["secretKey"]
filePath = cfg.zotCollectionNotes["filePath"]
# collectionQuery = cfg.zotCollectionNotes["collectionQuery"]
default = "None"
searchQuery = ""
collectionsListKeys = {}
noteItems = []
notes = []
notesHold = []
collectionParentID = []
parentID = {}
# Comment out the next line to test using the searchterm in config.py
collectionQuery = sys.argv[1]
zot = zotero.Zotero(userID, 'user', secretKey, 'preserve_json_order = true')
# we now have a Zotero object, zot, and access to all its methods
# create a list of collection keys
collectionsInfo = zot.collections()
i = 0
for i in range(len(collectionsInfo)):
collectionsListKeys[(collectionsInfo[i]['data']['key'])] = dict(
{'Name': collectionsInfo[i]['data']['name'], 'Parent': collectionsInfo[i]['data']['parentCollection'],
'Key': collectionsInfo[i]['data']['key']})
'''
CollectionsListKeys dict then looks something like
u'55GCTGSE': {'Name': u'Innovation Theory', 'Parent': False}
u'789HEDID': {'Name': u'Memory', 'Parent': u'XCNYW8JH'}
'''
for Name, Key in collectionsListKeys.items():
if Key['Name'] == collectionQuery:
searchQuery = Key['Key']
searchResult = zot.everything(zot.collection_items(searchQuery))
indices = [i for i, n in enumerate(searchResult) if n['data']['itemType'] == 'attachment']
searchResult[:] = [j for i, j in enumerate(searchResult)
if i not in indices]
i = 0
for i in range(len(searchResult)):
childHold = searchResult[i]
if "note" in childHold['data']['itemType']:
noteItems.append(childHold['data'])
# remove notes with 'the following values have no...'
indices = [i for i, n in enumerate(noteItems) if n['note'].startswith('The following values')]
noteItems[:] = [j for i, j in enumerate(noteItems)
if i not in indices]
# build the body of the file
i = 0
for i in range(len(noteItems)):
notesHold = (noteItems[i])
notesRaw = notesHold['note']
if notesRaw.startswith('<p><strong>Extracted Annotations') or notesRaw.startswith('<p><b>Extracted Annotations'):
parentID = notesHold['parentItem']
'''ID of Parent Document'''
parentDoc = zot.item(parentID)
'''Full data for Parent Document'''
match = re.search(r"(?<!\d)\d{4,20}(?!\d)", parentDoc['data']['date'])
parentDate = match.group(0) if match else None
'''Get publication date for Parent Document'''
collectionID = parentDoc['data']['collections'][0]
'''Get collectionID for Parent Document'''
collectionParentID = collectionsListKeys[collectionID]['Parent']
'''Get ID for Parent Collection'''
if not str(collectionParentID):
'''Minor error catching/branch if the collection is the parent'''
parentCollectionName = collectionsListKeys[collectionParentID]['Name'] \
if 'Name' in collectionsListKeys[collectionParentID] else default
breadCrumb = parentCollectionName + "/" + collectionsListKeys[collectionID]['Name'] \
if 'Name' in collectionsListKeys[collectionID] else default
collectionTitle = parentCollectionName
else:
breadCrumb = collectionsListKeys[collectionID]['Name'] \
if 'Name' in collectionsListKeys[collectionID] else default
collectionName = breadCrumb
collectionTitle = collectionName
parentTitle = parentDoc['data']['title'] if 'title' in parentDoc['data'] else default
parentCreators = parentDoc['meta']['creatorSummary'] if 'creatorSummary' in parentDoc['meta'] else default
if parentTitle:
w = 1
else:
parentTitle = "No Title"
if parentCreators:
x = 1
else:
parentCreators = "No Author"
if parentDate:
y = 1
else:
parentDate = "N.d."
if not notesRaw:
package = "\\i " + "No Notes"
notes.append(package)
else:
package = "\\i " + str(
breadCrumb) + "\\i0 \\line " + "\\fs28 \\b " + parentTitle + " (" + parentDate + ") " \
+ " \\b0 \\fs22 \\line " + parentCreators + " \\line \\fs24 " + notesRaw
notes.append(package)
'''Some concatenation and appending to the overall file'''
output = "\\par".join(notes)
# various translations for RTF and quick replacement for characters that don't encode correctly
output = output.replace("(<a href=", "{\\field{\\*\\fldinst { HYPERLINK")
output = output.replace("\">", "}}{\\fldrslt {")
output = output.replace("</a>)", "}}}")
output = output.replace("<p>", "\\line")
output = output.replace("</p>", "\\line")
output = output.replace("<br>", "\\line")
output = output.replace("<strong>", "\\b ")
output = output.replace("</strong>", " \\b0")
output = output.replace("<b>", "\\b ")
output = output.replace("</b>", " \\b0")
output = output.replace("<i>", "\\i ")
output = output.replace("</i>", " \\i0")
output = output.replace("\u02d8", "˘")
output = output.replace("\u02C7", "˘")
output = output.replace("\x8e", "Ž")
output = output.replace("\u2212", "−")
output = output.replace("\u2715", "✕")
output = output.replace("\u03b5", "ε")
output = output.replace("\u0301", "́")
output = output.replace("\u2192", "→")
output = output.replace("\u25cf", "●")
output = output.replace("\u2015", "―")
timestamp = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
rtf = "{\\rtf1\\ansi\\ansicpg1252\\deff0\\deftab720{\\fonttbl{\\f0\\fswiss MS Sans Serif;}{\\f1\\froman\\fcharset2 " \
"Symbol;}{\\f2\\fmodern\\fprq1 Courier New;}{\\f3\\froman Times New Roman;}}{\\colortbl\\red0\\green0\\" \
"blue0;\\red0\\green0\\blue255;\\red255\\green0\\blue0;}\\deflang1033\\horzdoc{\\*\\fchars }{\\*\\lchars}"
f = io.open(filePath + collectionQuery + '_Zotero_notes_' + timestamp + '.rtf',
'w+', encoding="cp1252", )
f.write(rtf)
f.write(output + "\\par")
f.write("}")
f.close()
| 41.421053 | 118 | 0.623888 |
27bab3c4651428c5a7f4a9944c00d5054c794783 | 2,849 | py | Python | warp/tests/test_all.py | addy1997/warp | 1c231e3eda88a39ce8142b9727e918d2a3e4a4b1 | [
"MIT",
"Unlicense",
"Apache-2.0",
"0BSD"
] | null | null | null | warp/tests/test_all.py | addy1997/warp | 1c231e3eda88a39ce8142b9727e918d2a3e4a4b1 | [
"MIT",
"Unlicense",
"Apache-2.0",
"0BSD"
] | null | null | null | warp/tests/test_all.py | addy1997/warp | 1c231e3eda88a39ce8142b9727e918d2a3e4a4b1 | [
"MIT",
"Unlicense",
"Apache-2.0",
"0BSD"
] | null | null | null | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from unittest import runner
import warp as wp
import warp.tests.test_codegen
import warp.tests.test_mesh_query_aabb
import warp.tests.test_mesh_query_point
import warp.tests.test_conditional
import warp.tests.test_operators
import warp.tests.test_rounding
import warp.tests.test_hash_grid
import warp.tests.test_ctypes
import warp.tests.test_rand
import warp.tests.test_noise
import warp.tests.test_tape
import warp.tests.test_compile_consts
import warp.tests.test_volume
def run():
tests = unittest.TestSuite()
result = unittest.TestResult()
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_codegen.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_mesh_query_aabb.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_mesh_query_point.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_conditional.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_operators.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_rounding.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_hash_grid.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_ctypes.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_rand.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_noise.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_tape.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_compile_consts.register(unittest.TestCase)))
tests.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(warp.tests.test_volume.register(unittest.TestCase)))
# load all modules
wp.force_load()
runner = unittest.TextTestRunner(verbosity=2, failfast=False)
ret = not runner.run(tests).wasSuccessful()
return ret
if __name__ == '__main__':
ret = run()
import sys
sys.exit(ret)
| 47.483333 | 129 | 0.824149 |
e5c258bae2f15b7fe07a9663153e35be60bb1814 | 6,622 | py | Python | setup.py | bsolomon1124/pycld3 | af6187d4020eafbe3f8517fa144072a8aa1d9bbc | [
"Apache-2.0"
] | 101 | 2019-10-06T09:52:56.000Z | 2022-03-28T16:37:21.000Z | setup.py | bsolomon1124/pycld3 | af6187d4020eafbe3f8517fa144072a8aa1d9bbc | [
"Apache-2.0"
] | 22 | 2019-10-10T13:54:04.000Z | 2022-02-09T11:17:19.000Z | setup.py | bsolomon1124/pycld3 | af6187d4020eafbe3f8517fa144072a8aa1d9bbc | [
"Apache-2.0"
] | 10 | 2019-10-10T10:09:30.000Z | 2021-02-26T07:27:45.000Z | #!/usr/bin/env python
import os
import platform
import shutil
import subprocess
from distutils.command.build import build
from os import makedirs, path
from setuptools import Extension, setup
use_cython_val = os.environ.get("USE_CYTHON")
USE_CYTHON = use_cython_val is not None and use_cython_val not in ("0", "false", "False")
if USE_CYTHON:
try:
from Cython.Build import cythonize
except ImportError as e:
# ModuleNotFoundError is Python 3.6+
# If user explicitly specifies USE_CYTHON=1, they need Cython installed prior to build
raise RuntimeError("Specified USE_CYTHON=1 but could not find Cython installed") from e
HERE = path.abspath(path.dirname(__file__))
# List of source filenames, relative to the distribution root
# (where the setup script lives)
SOURCES = [
"src/base.cc",
"src/cld_3/protos/feature_extractor.pb.cc",
"src/cld_3/protos/sentence.pb.cc",
"src/cld_3/protos/task_spec.pb.cc",
"src/embedding_feature_extractor.cc",
"src/embedding_network.cc",
"src/feature_extractor.cc",
"src/feature_types.cc",
"src/fml_parser.cc",
"src/lang_id_nn_params.cc",
"src/language_identifier_features.cc",
"src/nnet_language_identifier.cc",
"src/registry.cc",
"src/relevant_script_feature.cc",
"src/script_span/fixunicodevalue.cc",
"src/script_span/generated_entities.cc",
"src/script_span/generated_ulscript.cc",
"src/script_span/getonescriptspan.cc",
"src/script_span/offsetmap.cc",
"src/script_span/text_processing.cc",
"src/script_span/utf8statetable.cc",
"src/sentence_features.cc",
"src/task_context.cc",
"src/task_context_params.cc",
"src/unicodetext.cc",
"src/utils.cc",
"src/workspace.cc",
]
# Avoid forcing user to have Cython; let them compile the intermediate
# CPP source file instead
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#distributing-cython-modules
#
# Note that both of these (most notably the Cython-generate .cpp file) must be included
# in the sdist at distribution generation time! The conditional logic here applies to the installing
# user and what they choose to build the extension from. Both files must be available in the sdist.
if USE_CYTHON:
SOURCES.insert(0, "cld3/pycld3.pyx")
else:
SOURCES.insert(0, "cld3/pycld3.cpp")
# List of directories to search for C/C++ header files
INCLUDES = [
"/usr/local/include/",
path.join(HERE, "src/"),
path.join(HERE, "src/cld_3/protos/"),
]
# List of library names (not filenames or paths) to link against
LIBRARIES = ["protobuf"]
# https://docs.python.org/3/distutils/setupscript.html#describing-extension-modules
kwargs = dict(
sources=SOURCES,
include_dirs=INCLUDES,
libraries=LIBRARIES,
language="c++",
)
plat = platform.system()
if plat == "Darwin":
kwargs["extra_compile_args"] = ["-std=c++11", "-stdlib=libc++"]
kwargs["extra_link_args"] = ["-stdlib=libc++"]
elif plat != "Windows":
kwargs["extra_compile_args"] = ["-std=c++11"]
ext = [
Extension("cld3._cld3", **kwargs)
] # Name of the extension by which it can be imported
# .proto files define protocol buffer message formats
# https://developers.google.com/protocol-buffers/docs/cpptutorial
PROTOS = ["sentence.proto", "feature_extractor.proto", "task_spec.proto"]
class BuildProtobuf(build):
"""Compile protocol buffers via `protoc` compiler."""
def run(self):
# Raise & exit early if `protoc` compiler not available
if shutil.which("protoc") is None:
raise RuntimeError(
"The Protobuf compiler, `protoc`, which is required for"
" building this package, could not be found.\n"
"See https://github.com/protocolbuffers/protobuf for"
" information on installing Protobuf."
)
# Create protobufs dir if it does not exist
protobuf_dir = path.join(HERE, "src/cld_3/protos/")
if not path.exists(protobuf_dir):
print("Creating dirs at \033[1m{}\033[0;0m".format(protobuf_dir))
makedirs(protobuf_dir)
# Run command via subprocess, using protoc compiler on .proto
# files
#
# $ cd src && protoc --cpp-_ut cld_3/protos \
# > sentence.proto feature_extractor.proto task_spec.proto
command = ["protoc"]
command.extend(PROTOS)
command.append(
"--cpp_out={}".format(path.join(HERE, "src/cld_3/protos/"))
)
print("Running \033[1m{}\033[0;0m".format(" ".join(command)))
subprocess.run(command, check=True, cwd=path.join(HERE, "src/"))
build.run(self)
CLASSIFIERS = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: C++",
"Development Status :: 3 - Alpha",
"Topic :: Text Processing :: Linguistic",
"Intended Audience :: Developers",
]
def find_version(filepath):
version = None
with open(filepath) as f:
for line in f:
if line.startswith("__version__"):
version = line.partition("=")[-1].strip().strip("'\"")
if not version:
raise RuntimeError("Could not find version in __init__.py")
return version
if __name__ == "__main__":
# https://docs.python.org/3/distutils/setupscript.html#additional-meta-data
if USE_CYTHON:
extensions = cythonize(ext)
else:
extensions = ext
setup(
name="pycld3",
version=find_version("cld3/__init__.py"),
cmdclass={"build": BuildProtobuf},
author="Brad Solomon",
author_email="bsolomon@protonmail.com",
description="CLD3 Python bindings",
long_description=open(
path.join(HERE, "README.md"), encoding="utf-8"
).read(),
long_description_content_type="text/markdown",
license="Apache 2.0",
keywords=["cld3", "cffi", "language", "langdetect", "cld", "nlp"],
url="https://github.com/bsolomon1124/pycld3",
ext_modules=extensions,
python_requires=">2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
classifiers=CLASSIFIERS,
zip_safe=False,
packages=["cld3"],
)
| 34.310881 | 117 | 0.657203 |
e76346b8c6ea721c292e173ba322d5807875e457 | 1,849 | py | Python | azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/models/error_response_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/models/error_response_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/models/error_response_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""Error response indicates ARM is not able to process the incoming request.
The reason is provided in the error message.
:param http_status: Http status code.
:type http_status: str
:param error_code: Error code.
:type error_code: str
:param error_message: Error message indicating why the operation failed.
:type error_message: str
"""
_attribute_map = {
'http_status': {'key': 'httpStatus', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(self, *, http_status: str=None, error_code: str=None, error_message: str=None, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.http_status = http_status
self.error_code = error_code
self.error_message = error_message
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
| 36.254902 | 114 | 0.646836 |
5fd419241e542812b31ae858b734d687e794fcfb | 1,222 | py | Python | resources/lib/screensaver.py | drinfernoo/screensaver.arctic.mirage | 759d4750fad65a3279d51264e9963214c4a35a92 | [
"Apache-2.0"
] | 5 | 2020-03-20T10:43:34.000Z | 2022-02-21T18:21:41.000Z | resources/lib/screensaver.py | drinfernoo/screensaver.arctic.mirage | 759d4750fad65a3279d51264e9963214c4a35a92 | [
"Apache-2.0"
] | 2 | 2020-03-18T02:40:35.000Z | 2020-03-23T03:39:04.000Z | resources/lib/screensaver.py | drinfernoo/screensaver.arctic.mirage | 759d4750fad65a3279d51264e9963214c4a35a92 | [
"Apache-2.0"
] | 2 | 2020-05-06T21:46:04.000Z | 2021-09-01T21:53:29.000Z | # -*- coding: utf-8 -*-
import xbmc
import xbmcgui
from resources.lib import utils
class Screensaver(xbmcgui.WindowXMLDialog):
class ExitMonitor(xbmc.Monitor):
def __init__(self, exit_callback):
self.exit_callback = exit_callback
def onScreensaverActivated(self):
utils.log('Screensaver Activated')
def onScreensaverDeactivated(self):
utils.log('Screensaver Deactivated')
try:
self.exit_callback()
except AttributeError:
utils.log('Callback method not yet available.')
def __init__(self, *args, **kwargs):
self.exit_monitor = self.ExitMonitor(self.exit)
self.path = utils.get_setting_string('screensaver.arctic.mirage.path')
utils.log(self.path)
def onInit(self):
self.character = self.getControl(1298)
if self.path and self.exit_monitor:
self.setProperty('screensaver.arctic.mirage.path', self.path)
def exit(self):
if self.exit_monitor:
del self.exit_monitor
self.close()
| 28.418605 | 79 | 0.569558 |
d75380c7f93cf504e36d7b8aad4924bb1e6a7e9d | 1,692 | py | Python | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 5 | 2021-03-20T09:22:55.000Z | 2021-12-20T17:01:33.000Z | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-12-13T07:40:46.000Z | 2021-12-20T16:59:08.000Z | icarus/test/test_util.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-11-25T05:42:20.000Z | 2021-11-25T05:42:20.000Z | import unittest
import networkx as nx
import fnss
import icarus.util as util
class TestUtil(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_timestr(self):
self.assertEqual("1m 30s", util.timestr(90, True))
self.assertEqual("1m", util.timestr(90, False))
self.assertEqual("2m", util.timestr(120, True))
self.assertEqual("21s", util.timestr(21, True))
self.assertEqual("0m", util.timestr(21, False))
self.assertEqual("1h", util.timestr(3600, True))
self.assertEqual("1h", util.timestr(3600, False))
self.assertEqual("1h 0m 4s", util.timestr(3604, True))
self.assertEqual("1h", util.timestr(3604, False))
self.assertEqual("1h 2m 4s", util.timestr(3724, True))
self.assertEqual("1h 2m", util.timestr(3724, False))
self.assertEqual("2d 1h 3m 9s", util.timestr(49 * 3600 + 189, True))
self.assertEqual("0s", util.timestr(0, True))
self.assertEqual("0m", util.timestr(0, False))
def test_multicast_tree(self):
topo = fnss.Topology()
topo.add_path([2, 1, 3, 4])
sp = nx.all_pairs_shortest_path(topo)
tree = util.multicast_tree(sp, 1, [2, 3])
self.assertSetEqual(set(tree), set([(1, 2), (1, 3)]))
def test_apportionment(self):
self.assertEqual(util.apportionment(10, [0.53, 0.47]), [5, 5])
self.assertEqual(util.apportionment(100, [0.4, 0.21, 0.39]), [40, 21, 39])
self.assertEqual(util.apportionment(99, [0.2, 0.7, 0.1]), [20, 69, 10])
| 32.538462 | 82 | 0.615839 |
1318a4d98850f4d4bba27f17b882c8756b9ede97 | 1,702 | py | Python | text/urls.py | qianwenrock/block | 81d09dd4c419cb5487d6d7a9bbcbe214d3428b9c | [
"Apache-2.0"
] | null | null | null | text/urls.py | qianwenrock/block | 81d09dd4c419cb5487d6d7a9bbcbe214d3428b9c | [
"Apache-2.0"
] | null | null | null | text/urls.py | qianwenrock/block | 81d09dd4c419cb5487d6d7a9bbcbe214d3428b9c | [
"Apache-2.0"
] | null | null | null | """text URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from post import views as post_views
from user import views as user_views
urlpatterns = [
# 帖子的url
# 添加默认
url(r'^$', post_views.post_list),
# 帖子列表
url(r'^post/list/', post_views.post_list),
# 创建帖子
url(r'^post/create/', post_views.create_post),
# 修改帖子
url(r'^post/edit/', post_views.edit_post),
# 阅读帖子
url(r'^post/read/', post_views.read_post),
# 搜索帖子
url(r'^post/search/', post_views.search_post),
# 帖子排行
url(r'^post/top10/', post_views.top10),
# 评论
url(r'^post/comment/',post_views.comment),
# 用户的url
#
url(r'^user/register/', user_views.register),
url(r'^user/login/', user_views.login),
url(r'^user/logout/', user_views.logout),
url(r'^user/info/', user_views.user_info),
# 第三方登录接口
url(r'^weibo/callback/', user_views.wb_callback)
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 30.945455 | 79 | 0.681551 |
271e9761bbe6d1b8b98d0bc695520c9b3a8eef03 | 4,579 | py | Python | pico_vna_adapt/Lib/site-packages/daytime.py | Henriksen-Lab/Simple_DAQ | 3bd313df4c80340eb367081400cb6d75ba132623 | [
"MIT"
] | null | null | null | pico_vna_adapt/Lib/site-packages/daytime.py | Henriksen-Lab/Simple_DAQ | 3bd313df4c80340eb367081400cb6d75ba132623 | [
"MIT"
] | null | null | null | pico_vna_adapt/Lib/site-packages/daytime.py | Henriksen-Lab/Simple_DAQ | 3bd313df4c80340eb367081400cb6d75ba132623 | [
"MIT"
] | null | null | null | import time
import datetime
class Daytime(datetime.time):
"""
Compare, add or substract daytimes.
This module extends the datetime.time-module and makes it more handy
respectivly to comparison, addition and substraction.
You can compare, add and substract a daytime-object with other daytime-objects
or with an integer as the amount of seconds. You can also compare a daytime-
with a datetime.time-object.
Attributes:
delta: daytime as datetime.timedelta-instance
total_seconds: daytime in seconds as a float
"""
@classmethod
def fromtime(cls, time):
"""
Build a daytime from a datetime.time- or datetime.datetime-object.
Args:
time: datetime.time- or datetime.datetime-object
Returns a daytime.
"""
return cls(
hour=time.hour,
minute=time.minute,
second=time.second,
microsecond=time.microsecond
)
@classmethod
def strptime(cls, string, format):
"""
Build a daytime from a string and a format.
Args:
string: string parsed according to the specified format
format: See the library reference manual for formatting codes.
Returns a daytime.
"""
return cls.fromtime(datetime.datetime.strptime(string, format))
@classmethod
def fromtimestamp(cls, timestamp):
"""
Build a local daytime from timestamp.
Args:
timestamp: a POSIX timestamp, such as is returned by time.time()
Returns a daytime.
"""
return cls.fromtime(datetime.datetime.fromtimestamp(timestamp))
@classmethod
def utcfromtimestamp(cls, timestamp):
"""
Build a utc-daytime from timestamp.
Args:
timestamp: a POSIX timestamp, such as is returned by time.time()
Returns a daytime.
"""
return cls.fromtime(datetime.datetime.utcfromtimestamp(timestamp))
@classmethod
def daytime(cls):
"""
Returns the actual daytime.
"""
return cls.fromtime(datetime.datetime.today())
@property
def as_timedelta(self):
"""
Daytime as timedelta.
"""
return datetime.timedelta(
hours=self.hour,
minutes=self.minute,
seconds=self.second,
microseconds=self.microsecond
)
@property
def as_seconds(self):
"""
Absolute amount of seconds of the daytime.
"""
return self.as_timedelta.total_seconds()
def __add__(self, other, sign=1):
if isinstance(other, int) or isinstance(other, float):
seconds = self.as_seconds + sign * other
elif isinstance(other, Daytime):
seconds = self.as_seconds + sign * other.as_seconds
elif isinstance(other, datetime.time):
seconds = self.as_seconds + sign * Daytime.fromtime(other).as_seconds
elif isinstance(other, datetime.timedelta):
seconds = self.as_seconds + sign * other.total_seconds()
else: raise TypeError("unsupported operator for Daytime and {0}".format(
other.__class__.__name__))
return Daytime.utcfromtimestamp(seconds)
def __sub__(self, other):
return self.__add__(other, -1)
def __gt__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds > other
else: return super(Daytime, self).__gt__(other)
def __ge__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds >= other
else: return super(Daytime, self).__ge__(other)
def __lt__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds < other
else: return super(Daytime, self).__lt__(other)
def __le__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds <= other
else: return super(Daytime, self).__le__(other)
def __eq__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds == other
else: return super(Daytime, self).__eq__(other)
def __ne__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.as_seconds != other
else: return super(Daytime, self).__ne__(other)
| 30.324503 | 82 | 0.613016 |
c025435785000b6ee271a54399a7b124b30aa3c8 | 628 | py | Python | src/PYnative/exercise/Basic Exercise/Q9.py | c-w-m/learning_python | 8f06aa41faf9195d978a7d21cbb329280b0d3200 | [
"CNRI-Python"
] | null | null | null | src/PYnative/exercise/Basic Exercise/Q9.py | c-w-m/learning_python | 8f06aa41faf9195d978a7d21cbb329280b0d3200 | [
"CNRI-Python"
] | null | null | null | src/PYnative/exercise/Basic Exercise/Q9.py | c-w-m/learning_python | 8f06aa41faf9195d978a7d21cbb329280b0d3200 | [
"CNRI-Python"
] | null | null | null | # Reverse a given number and return true if it is the same as the original number
# My Solution
def reverse(num):
reversed = str(num)[::-1]
return num == int(reversed)
print("Original and reverse number are equal")
print(reverse(121))
# Given Solution
def reverseCheck(number):
originalNum = number
reverseNum = 0
while (number > 0):
reminder = number % 10
reverseNum = (reverseNum * 10) + reminder
number = number // 10
if (originalNum == reverseNum):
return True
else:
return False
print("orignal and revers number is equal")
print(reverseCheck(121))
| 21.655172 | 81 | 0.652866 |
eaeeab89606da65bc0421869f13f4a23a1a1fefe | 18,164 | py | Python | bin/design_naively.py | broadinstitute/adapt | ae0346dbd8c77fa9bcefbec5358cc750b146474d | [
"MIT"
] | 12 | 2020-11-25T21:19:27.000Z | 2022-03-07T10:55:52.000Z | bin/design_naively.py | broadinstitute/adapt | ae0346dbd8c77fa9bcefbec5358cc750b146474d | [
"MIT"
] | 17 | 2020-11-30T15:49:13.000Z | 2022-03-17T01:12:45.000Z | bin/design_naively.py | broadinstitute/adapt | ae0346dbd8c77fa9bcefbec5358cc750b146474d | [
"MIT"
] | 3 | 2021-04-28T13:48:07.000Z | 2022-01-10T16:51:34.000Z | #!/usr/bin/env python3
"""Design guides naively for diagnostics.
This can be used as a baseline for the output of design.py.
"""
import argparse
import logging
import heapq
from adapt import alignment
from adapt.utils import log
from adapt.utils import seq_io
__author__ = 'Hayden Metsky <hmetsky@broadinstitute.org>, Priya P. Pillai <ppillai@broadinstitute.org>'
logger = logging.getLogger(__name__)
def construct_guide_naively_at_each_pos(aln, args, ref_seq=None):
"""Naively construct a guide sequence at each position of an alignment.
This constructs a guide sequence at each position. It does so in two
ways: 'consensus' (the consensus sequence at the position) and
'mode' (the most common sequence at the position).
Args:
aln: alignment.Alignment object
args: arguments to program
ref_seq: reference sequence to base diversity guides on
Returns:
list x where x[i] is a dict, giving a guide sequence at position
i of the alignment. x[i][method] is a tuple (guide, frac),
where method is 'consensus' or 'mode', and guide gives a guide
sequence (a string) constructed using the method and frac is
the fraction of sequences in the alignment to which the guide
binds
"""
start_positions = range(aln.seq_length - args.guide_length + 1)
guides = [None for _ in start_positions]
if ref_seq is not None:
ref_seq_aln = alignment.Alignment.from_list_of_seqs([ref_seq])
for i in start_positions:
# Extract the portion of the alignment that starts at i
pos_start, pos_end = i, i + args.guide_length
aln_for_guide = aln.extract_range(pos_start, pos_end)
# When constructing guides, ignore any sequences in the alignment
# that have a gap in this region.
seqs_with_gap = set(aln_for_guide.seqs_with_gap())
seqs_to_consider = set(range(aln.num_sequences)) - seqs_with_gap
# Only look at sequences with valid flanking regions
seqs_to_consider = aln.seqs_with_required_flanking(i, args.guide_length,
args.required_flanking_seqs, seqs_to_consider=seqs_to_consider)
ref_seqs_to_consider = []
if ref_seq is not None:
ref_seqs_to_consider = ref_seq_aln.seqs_with_required_flanking(
i, args.guide_length, args.required_flanking_seqs)
consensus_guide = None
mode_guides = None
diversity_guide = None
frac_with_gap = float(len(seqs_with_gap)) / aln.num_sequences
# Do not bother designing a guide here; there are
# too many sequences with a gap
if frac_with_gap < args.skip_gaps:
# Construct guides
# Only design guides if there exist sequences to consider
if len(seqs_to_consider) > 0:
if args.consensus:
consensus_guide = aln_for_guide.determine_consensus_sequence(
seqs_to_consider=seqs_to_consider)
if args.mode:
mode_guides = aln_for_guide.determine_most_common_sequences(
seqs_to_consider=seqs_to_consider,
skip_ambiguity=True, n=args.mode_n)
if len(ref_seqs_to_consider) > 0:
if args.diversity:
diversity_guide = ref_seq[pos_start:pos_end]
# Determine the fraction of the sequences that each guide binds to
if consensus_guide is not None:
consensus_guide_bound = aln.sequences_bound_by_guide(
consensus_guide, i, args.guide_mismatches,
args.allow_gu_pairs, required_flanking_seqs=args.required_flanking_seqs)
consensus_guide_frac = float(len(consensus_guide_bound)) / aln.num_sequences
else:
consensus_guide = 'None'
consensus_guide_frac = 0
if mode_guides is not None:
mode_guides_bound = []
for mode_guide in mode_guides:
mode_guides_bound.append(aln.sequences_bound_by_guide(
mode_guide, i, args.guide_mismatches,
args.allow_gu_pairs, required_flanking_seqs=args.required_flanking_seqs))
all_mode_guides_bound = set().union(*mode_guides_bound)
# total_mode_guides_frac is the fraction of sequences bound by at
# least one of the guides
total_mode_guides_frac = (float(len(all_mode_guides_bound)) /
aln.num_sequences)
# mode_guides_frac is a list of the fraction of sequences bound by
# each guides
mode_guides_frac = [(float(len(mode_guide_bound)) /
aln.num_sequences)
for mode_guide_bound in mode_guides_bound]
else:
mode_guides = 'None'
total_mode_guides_frac = 0
mode_guides_frac = [0]
if diversity_guide is not None:
diversity_guide_bound = aln.sequences_bound_by_guide(
diversity_guide, i, args.guide_mismatches,
args.allow_gu_pairs, required_flanking_seqs=args.required_flanking_seqs)
diversity_guide_frac = float(len(diversity_guide_bound)) / aln.num_sequences
if args.diversity == 'entropy':
all_entropy = aln_for_guide.position_entropy()
diversity_metric = sum(all_entropy)/args.guide_length
else:
raise ValueError("Invalid diversity method '%s'; use one of ['entropy']" %args.diversity)
else:
diversity_guide = 'None'
diversity_guide_frac = 0
diversity_metric = float('inf')
d = {}
if args.consensus:
d['consensus'] = (consensus_guide, consensus_guide_frac)
if args.mode:
d['mode'] = ((mode_guides, mode_guides_frac), total_mode_guides_frac)
if args.diversity:
d[args.diversity] = ((diversity_guide, diversity_guide_frac), diversity_metric)
guides[i] = d
return guides
def find_guide_in_each_window(guides, aln_length, args, obj_type='max'):
"""Determine a guide for each window of an alignment.
For each window, this selects the args.best_n guides within it
(given one guide per position) that has the best metric score.
To break ties, this selects the first guide (in terms of
position) among ones with a tied metric.
Args:
guides: list such that guides[i] is a tuple (guide, metric)
giving a guide sequence (guide) at position i of
an alignment and a metric (metric) that can be used to
compare guide quality
aln_length: length of alignment
args: arguments to program
obj_type: if 'max', consider the largest value the best; else
if 'min', consider the smallest value the best. Must be
either 'min' or 'max'
Returns:
list x where x[i] gives a list of the args.best_n tuples (guide, metric)
representing guides in the window that starts at position i
"""
window_start_positions = range(aln_length - args.window_size + 1)
guide_in_window = [[] for _ in window_start_positions]
for i in window_start_positions:
window_start, window_end = i, i + args.window_size
last_guide_pos = window_end - args.guide_length
logger.info("Searching for a guide within window [%d, %d)" %
(window_start, window_end))
# Check if any of the guides are no longer within the window
# Keep track of which positions still have guides in the heap
positions = set()
if i > 0:
for guide in guide_in_window[i-1]:
if guide[1] >= window_start:
guide_in_window[i].append(guide)
positions.add(guide[1])
heapq.heapify(guide_in_window[i])
if len(guide_in_window[i]) < args.best_n:
# One of the previous args.best_n best guides is no longer in the
# window; find a new one
for j in range(window_start, last_guide_pos + 1):
# Skip if guide is already in the heap
if -j in positions:
continue
guide, metric = guides[j]
# Reverse order for minimizing
if obj_type == 'min':
metric = -metric
# If there aren't args.best_n guides on the heap yet, add the
# guide to the heap
if len(guide_in_window[i]) < args.best_n:
# Use negative position to break ties
heapq.heappush(guide_in_window[i], (metric, -j, guide))
# If the new guide has a better metric than the worst guide on the
# heap, remove the worst guide and add the new one
elif metric > guide_in_window[i][0][0]:
# Use negative position to break ties
heapq.heappushpop(guide_in_window[i], (metric, -j, guide))
else:
# All args.best_n guides are still within the window, but now
# check if the new guide at the very end of the window does better
guide, metric = guides[last_guide_pos]
# Reverse order for minimizing
if obj_type == 'min':
metric = -metric
if metric > guide_in_window[i][0][0]:
heapq.heappushpop(guide_in_window[i], (metric, -last_guide_pos, guide))
# Undo reverse order for minimizing and sort
fix = 1 if obj_type == 'max' else -1
guide_in_window = [[(guide, fix*metric) for metric, _, guide \
in sorted(guide_in_window_i, reverse=True)] \
for guide_in_window_i in guide_in_window]
return guide_in_window
def main(args):
# Allow G-U base pairing, unless it is explicitly disallowed
args.allow_gu_pairs = not args.do_not_allow_gu_pairing
# Run the consensus method, unless it is explicitly disallowed
args.consensus = not args.no_consensus
# Run the mode method, unless it is explicitly disallowed
args.mode = not args.no_mode
# Check if there is a reference sequence if there is a diversity method
if args.diversity:
if not args.ref_seq:
raise Exception('Must include a reference sequence label to run any diversity method')
args.required_flanking_seqs = (args.require_flanking5, args.require_flanking3)
# Read the input alignment
seqs = seq_io.read_fasta(args.in_fasta)
aln = alignment.Alignment.from_list_of_seqs(list(seqs.values()))
try:
ref_seq = seqs[args.ref_seq] if args.ref_seq else None
except KeyError:
raise Exception('Reference sequence %s does not match any label of the sequences in the given FASTA' \
%args.ref_seq)
# Construct a guide at each position of the alignment
logger.info("Constructing guides naively at each position of alignment")
guides = construct_guide_naively_at_each_pos(aln, args, ref_seq=ref_seq)
# Find the best guide in each window (for the
# consensus, mode, and diversity approaches)
if args.consensus:
logger.info("Searching for consensus guides")
consensus_guides_in_window = find_guide_in_each_window(
[guides[i]['consensus'] for i in range(len(guides))],
aln.seq_length, args)
if args.mode:
logger.info("Searching for mode guides")
mode_guides_in_window = find_guide_in_each_window(
[guides[i]['mode'] for i in range(len(guides))],
aln.seq_length, args)
if args.diversity:
logger.info("Searching for %s guides" %args.diversity)
diversity_guides_in_window = find_guide_in_each_window(
[guides[i][args.diversity] for i in range(len(guides))],
aln.seq_length, args, obj_type='min')
# Write the guides to a TSV file
with open(args.out_tsv, 'w') as outf:
header = ['window-start', 'window-end', 'rank']
if args.consensus:
header.extend(['target-sequence-by-consensus', 'frac-bound-by-consensus'])
if args.mode:
header.extend(['target-sequence-by-mode', 'frac-bound-by-mode', 'total-frac-bound-by-mode'])
if args.diversity:
header.extend(['target-sequence-by-%s' %args.diversity, args.diversity, 'frac-bound-by-%s' %args.diversity])
outf.write('\t'.join(header) + '\n')
for i in range(aln.seq_length - args.window_size + 1):
for j in range(args.best_n):
line = [i, i + args.window_size, j+1]
if args.consensus:
line.extend(consensus_guides_in_window[i][j])
if args.mode:
line.extend(mode_guides_in_window[i][j][0])
line.append(mode_guides_in_window[i][j][1])
if args.diversity:
diversity_line = (diversity_guides_in_window[i][j][0][0],
diversity_guides_in_window[i][j][1],
diversity_guides_in_window[i][j][0][1])
line.extend(diversity_line)
outf.write('\t'.join([str(x) for x in line]) + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input alignment and output file
parser.add_argument('in_fasta',
help=("Path to input FASTA (aligned)"))
parser.add_argument('out_tsv',
help=("Path to TSV file to which to write the output"))
# Window size
parser.add_argument('-w', '--window-size', type=int, default=200,
help=("Output guide(s) within each window (sliding along "
"the alignment) of this length"))
# Parameters on guide length and mismatches
parser.add_argument('-gl', '--guide-length', type=int, default=28,
help="Length of guide to construct")
parser.add_argument('-gm', '--guide-mismatches', type=int, default=0,
help=("Allow for this number of mismatches when "
"determining whether a guide covers a sequence"))
# Best n guides per window
parser.add_argument('--best-n', type=int, default=1,
help=("Find the best BEST_N guides in each window"))
# G-U pairing options
parser.add_argument('--do-not-allow-gu-pairing', action='store_true',
help=("When determining whether a guide binds to a region of "
"target sequence, do not count G-U (wobble) base pairs as "
"matching. Default is to tolerate G-U pairing: namely, "
"A in an output guide sequence matches G in the "
"target and C in an output guide sequence matches T "
"in the target (since the synthesized guide is the reverse "
"complement of the output guide sequence)"))
# Options to skip
parser.add_argument('--skip-gaps', type=float, default=0.5,
help=("If this fraction or more of sequences at a position contain "
"a gap character, do not design a guide there"))
# Reference sequence
parser.add_argument('--ref-seq', type=str, default=None,
help=("The label used in the FASTA file of the reference sequence "
"to design guides based on sequence diversity; required "
"for diversity method"))
# Guide sequence methods
parser.add_argument('--no-consensus', action='store_true',
help=("If set, do not use the consensus method to determine guides; "
"otherwise, will use the consensus method"))
parser.add_argument('--no-mode', action='store_true',
help=("If set, do not use the mode method to determine guides; "
"otherwise, will use the mode method"))
parser.add_argument('--diversity', type=str, default=None, choices=["entropy"],
help=("A string of which diversity method to use to determine guides "
"('entropy'); None (default) to not use a diversity method. "
"'entropy' will calculate the average per position entropy of "
"each potential guide, then return the guides at the positions "
"with the lowest entropy; nucleotides are determined by the "
"reference sequence"))
parser.add_argument('--mode-n', type=int, default=1,
help=("Use the MODE_N most common sequences as the 'mode' "
"sequences; note that this is no longer the 'mode' of the "
"data. Defaults to 1."))
# Requiring flanking sequence (PFS)
parser.add_argument('--require-flanking5',
help=("Require the given sequence on the 5' protospacer flanking "
"site (PFS) of each designed guide; this tolerates ambiguity "
"in the sequence (e.g., 'H' requires 'A', 'C', or 'T', or, "
"equivalently, avoids guides flanked by 'G'). Note that "
"this is the 5' end in the target sequence (not the spacer "
"sequence)."))
parser.add_argument('--require-flanking3',
help=("Require the given sequence on the 3' protospacer flanking "
"site (PFS) of each designed guide; this tolerates ambiguity "
"in the sequence (e.g., 'H' requires 'A', 'C', or 'T', or, "
"equivalently, avoids guides flanked by 'G'). Note that "
"this is the 3' end in the target sequence (not the spacer "
"sequence)."))
# Log levels
parser.add_argument("--debug",
dest="log_level",
action="store_const",
const=logging.DEBUG,
default=logging.WARNING,
help=("Debug output"))
parser.add_argument("--verbose",
dest="log_level",
action="store_const",
const=logging.INFO,
help=("Verbose output"))
args = parser.parse_args()
log.configure_logging(args.log_level)
main(args)
| 45.98481 | 120 | 0.619247 |
48162f3cc66c64d98e213b60b81d604e4c2f7b8e | 9,302 | py | Python | tests/test_targetapplication.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | tests/test_targetapplication.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | tests/test_targetapplication.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | import json
import validator.testcases.targetapplication as targetapp
from validator.constants import *
from validator.errorbundler import ErrorBundle
from validator.rdf import RDFParser
from helper import _do_test
targetapp.APPROVED_APPLICATIONS = \
json.load(open("validator/app_versions.json"))
def _do_test_raw(rdf, listed=True, overrides=None):
err = ErrorBundle(listed=listed)
err.overrides = overrides
rdf = RDFParser(rdf.strip())
err.save_resource("has_install_rdf", True)
err.save_resource("install_rdf", rdf)
targetapp.test_targetedapplications(err)
print err.print_summary()
return err
def test_valid_targetapps():
"""
Tests that the install.rdf contains only valid entries for target
applications.
"""
results = _do_test("tests/resources/targetapplication/pass.xpi",
targetapp.test_targetedapplications,
False,
True)
supports = results.get_resource("supports")
print supports
assert "firefox" in supports and "mozilla" in supports
assert len(supports) == 2
supported_versions = results.supported_versions
print supported_versions
assert (supported_versions['{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'] ==
['3.6', '3.6.4', '3.6.*'])
def test_bad_min_max():
"""Tests that the lower/upper-bound version number for a
targetApplication entry is indeed a valid version number"""
_do_test("tests/resources/targetapplication/bad_min.xpi",
targetapp.test_targetedapplications,
True,
True)
_do_test("tests/resources/targetapplication/bad_max.xpi",
targetapp.test_targetedapplications,
True,
True)
def test_bad_order():
"""Tests that the min and max versions are ordered correctly such
that the earlier version is the min and vice-versa."""
_do_test("tests/resources/targetapplication/bad_order.xpi",
targetapp.test_targetedapplications,
True,
True)
def test_dup_targets():
"""Tests that there are no duplicate targetAppication elements."""
_do_test("tests/resources/targetapplication/dup_targapp.xpi",
targetapp.test_targetedapplications,
True,
True)
def test_missing_installrdfs_are_handled():
"""
Tests that install.rdf files are present and supported_versions is set to
an empty dict if an install.rdf is not found.
"""
err = ErrorBundle()
# This is the default, but I'm specifying it for clarity of purpose.
err.supported_versions = None
# Test package to make sure has_install_rdf is set to True.
assert targetapp.test_targetedapplications(err, None) is None
# The supported versions list must be empty or other tests will fail.
assert err.supported_versions == {}
def test_supported_versions_not_overwritten():
"""
Test that supported_versions is not overwritten in subpackages (JARs) when
install.rdf files are tested for. This could otherwise happen because the
targetApplication tests previously gave an empty dict to supported_versions
in order to ensure a valid state when testing for version support.
If the supported_versions field in an error bundle is None, it represents
that the list of targeted applications has not been parsed yet. Setting it
to an empty dict prevents exceptions later on by stating that no supported
versions are available. If this didn't happen, it would be assumed that
compatibility (version-dependent) tests were reached before the install.rdf
was parsed. That would indicate that the validation process has encountered
an error elsewhere.
"""
err = ErrorBundle()
orig_value = err.supported_versions = {"foo": ["bar"]}
# Test package to make sure has_install_rdf is set to True.
assert targetapp.test_targetedapplications(err, None) is None
# The supported versions list must match what was specified
assert err.supported_versions is orig_value
def test_is_ff4():
"""Tests a passing install.rdf package for whether it's built for
Firefox 4. This doesn't pass or fail a package, but it is used for
other tests in other modules in higher tiers."""
results = _do_test("tests/resources/targetapplication/ff4.xpi",
targetapp.test_targetedapplications,
False,
True)
assert results.get_resource("ff4")
assert results.get_resource("supports")
assert "firefox" in results.get_resource("supports")
def test_no_supported_mozilla_apps():
"""
Tests that at least one supported Mozilla app is listed as a target
application.
"""
assert not _do_test_raw("""
<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<Description> <!-- Firefox -->
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>1.5</em:minVersion>
<em:maxVersion>3.0.*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""").failed()
assert not _do_test_raw("""
<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<Description> <!-- Something else -->
<em:id>Blah blah blah</em:id>
<em:minVersion>1.2.3</em:minVersion>
<em:maxVersion>4.5.6</em:maxVersion>
</Description>
</em:targetApplication>
<em:targetApplication>
<Description> <!-- Firefox -->
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>1.5</em:minVersion>
<em:maxVersion>3.0.*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""").failed()
failure_case = """
<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<Description> <!-- Something else -->
<em:id>Blah blah blah</em:id>
<em:minVersion>1.2.3</em:minVersion>
<em:maxVersion>4.5.6</em:maxVersion>
</Description>
</em:targetApplication>
<em:targetApplication>
<Description> <!-- More junk -->
<em:id>More junk</em:id>
<em:minVersion>9.8.7</em:minVersion>
<em:maxVersion>6.5.4</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
"""
assert _do_test_raw(failure_case).failed()
assert not _do_test_raw(failure_case, listed=False).failed()
def test_overrides():
"""Test that the validate() function can override the min/max versions"""
# Make sure a failing test can be forced to pass.
assert not _do_test_raw("""
<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<Description> <!-- Firefox -->
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>ABCDEFG</em:minVersion>
<em:maxVersion>-1.2.3.4</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""", overrides={"targetapp_minVersion":
{"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "1.5"},
"targetapp_maxVersion":
{"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "3.6"}}
).failed()
# Make sure a test can be forced to fail.
assert _do_test_raw("""
<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<Description> <!-- Firefox -->
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>1.5</em:minVersion>
<em:maxVersion>3.6</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""", overrides={"targetapp_minVersion":
{"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "foo"},
"targetapp_maxVersion":
{"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "bar"}}
).failed()
| 36.622047 | 79 | 0.612664 |
571a864fd235725550c463dedb715c9b3f3b7f05 | 3,209 | py | Python | pop_movies.py | aberdean/movie-trailer-website | bda1523f57af2b051b9686c5479e229a33039b21 | [
"MIT"
] | null | null | null | pop_movies.py | aberdean/movie-trailer-website | bda1523f57af2b051b9686c5479e229a33039b21 | [
"MIT"
] | null | null | null | pop_movies.py | aberdean/movie-trailer-website | bda1523f57af2b051b9686c5479e229a33039b21 | [
"MIT"
] | null | null | null | import requests
import json
from fresh_tomatoes import open_movies_page
# PLEASE, INSERT YOUR API KEY BELOW
API_KEY = ""
# The base URLs for the TMDb API, the poster image, and the YouTube trailer.
BASE_URL = "https://api.themoviedb.org/3/movie/"
BASE_POSTER_URL = "https://image.tmdb.org/t/p/w500"
BASE_TRAILER_URL = "https://www.youtube.com/watch?v="
class Movie:
"""A movie is characterized by its title, the URL of its poster image,
and the URL of its YouTube trailer.
"""
def __init__(self, title, poster, trailer):
"""Constructor.
Arguments:
title {string} -- the title of the movie
poster {string} -- the URL of the poster image
trailer {string} -- the URL of the YouTube trailer
"""
self.title = title
self.poster_image_url = poster
self.trailer_youtube_url = trailer
def fetch_trailer(movie_id):
"""Given a movie id, retrieves the URL for the movie's YouTube trailer.
Arguments:
movie_id {string} -- the movie id
Returns:
{string} -- the URL for the movie's YouTube trailer
"""
url = "%s%s/videos?api_key=%s&language=en-US" % (BASE_URL,
movie_id, API_KEY)
response = requests.request("GET", url)
try:
trailer_list = json.loads(response.content)["results"]
# If the trailer cannot be fetched, return None.
except requests.exceptions.RequestException:
return None
# We want a YouTube trailer, so from the list of videos, we pick the
# first one that is a trailer and is hosted on YouTube.
for trailer in trailer_list:
if trailer["site"] == "YouTube" and trailer["type"] == "Trailer":
return "%s%s" % (BASE_TRAILER_URL, trailer["key"])
def main():
"""Fetches a list of popular movies from The Movie Database (TMDb).
For each movie, extracts its title, the URL of its poster image, and
the URL of its YouTube trailer. Stores all the movies in a list and
calls fresh_tomatoes.py to generate a webpage from the list of movies.
"""
movies = []
url = "%spopular?page=1&language=en-US&api_key=%s" % (BASE_URL, API_KEY)
response = requests.request("GET", url)
try:
movie_list = json.loads(response.content)["results"]
# If movies cannot be fetched, print the error and return, since there is
# nothing to show on the webpage.
except requests.exceptions.RequestException as e:
print("Unable to fetch movies: %s" % (e))
return
for movie in movie_list:
title = movie["title"]
poster_url = "%s%s" % (BASE_POSTER_URL, movie["poster_path"])
movie_id = movie["id"]
trailer_url = fetch_trailer(movie_id)
# Since we want to be able to play a trailer for each movie, if a
# movie doesn't have a trailer, we don't add it to the list.
if trailer_url is not None:
new_movie = Movie(title, poster_url, trailer_url)
movies.append(new_movie)
# Call the fresh_tomatoes.py file to generate a webpage from the
# list of movies.
open_movies_page(movies)
if __name__ == "__main__":
main()
| 34.880435 | 77 | 0.643191 |
4f75084d7a618a1e516e472892a6169de0d10946 | 8,639 | py | Python | build/scripts/gen_configin.py | HelloAllen8893/AliOS-Things | f3a2904860c3bf87b64d634e6900d980931fac88 | [
"Apache-2.0"
] | 2 | 2018-08-20T08:33:55.000Z | 2018-11-28T03:19:22.000Z | build/scripts/gen_configin.py | HelloAllen8893/AliOS-Things | f3a2904860c3bf87b64d634e6900d980931fac88 | [
"Apache-2.0"
] | null | null | null | build/scripts/gen_configin.py | HelloAllen8893/AliOS-Things | f3a2904860c3bf87b64d634e6900d980931fac88 | [
"Apache-2.0"
] | 1 | 2021-06-20T06:43:12.000Z | 2021-06-20T06:43:12.000Z | #!/usr/bin/env python
import os, sys, re
import shutil
from aos_parse_components import get_comp_name
# Global definitions
templatedir = "build/scripts/kconfig_template"
top_config_in = "build/Config.in"
board_config_in = "board/Config.in"
example_config_in = "app/example/Config.in"
profile_config_in = "app/profile/Config.in"
autogen_start = "--- Generated Automatically ---"
autogen_end = "--- End ---"
def update_config_in(config_file, config_list):
""" Update Config.in file that copied from template """
patten = re.compile(r'\s*source (.*)')
config_contents = []
with open (config_file, "r") as cf:
for line in cf.readlines():
prefix = ""
match = patten.match(line)
if match:
config_tmp = match.group(1)
if "linkkit/" in config_file and "$SRCPATH" in config_tmp:
config_tmp = re.sub(r"\$SRCPATH/", "middleware/linkkit/", config_tmp)
if not re.sub(r'"', "", config_tmp) in config_list:
continue
config_contents += [line]
if config_contents:
with open (config_file, "w") as cf:
for line in config_contents:
cf.write(line)
def get_opt_config(config_file, keyword, sdir):
patten = re.compile(r"\s+select\s+(%s.*?)(\s+if.*)?$" % keyword)
opt_name = ""
opt_config = ""
with open(config_file, "r") as f:
for line in f.readlines():
match = patten.match(line)
if match:
opt_name = match.group(1)
if opt_name:
patten = re.compile(r"^(menuconfig|config)\s+(%s)$" % opt_name)
for root, dirs, files in os.walk(sdir):
if 'Config.in' in files:
if root == sdir:
continue
with open("%s/Config.in" % root, "r") as f:
line = f.readline().strip()
match = patten.match(line)
if match:
opt_config = "%s/Config.in" % root
if opt_config:
break
#if not opt_name:
# print("Warning: Can't found %s from %s ..." % (keyword, config_file))
#if opt_name and not opt_config:
# print("Warning: The option is not defined %s ..." % (opt_name))
return (opt_name, opt_config)
def create_board_config_in(config_file, config_list):
""" Create board/Config.in """
with open (config_file, "w") as cf:
cf.write("# %s\n" % autogen_start)
cf.write("menu \"BSP Configuration\"\n")
cf.write("choice\n")
cf.write(" prompt \"Select board\"\n")
cf.write("\n")
for config in config_list:
patten = re.compile(r"^(menuconfig|config)\s+(AOS_BOARD_.*)$")
config_name = ""
with open(config, "r") as f:
for line in f.readlines():
match = patten.match(line.strip())
if match:
config_name = match.group(2)
break
else:
print("Error: boardname empty!")
return 1
patten = re.compile(r".*board/(.*)/Config.in")
match = patten.match(config)
if match:
boarddir = match.group(1).replace("/", ".")
else:
print("Error: can't get board directory")
cf.write("source \"%s\"\n" % config)
cf.write("if %s\n" % config_name)
cf.write(" config AOS_BUILD_BOARD\n")
cf.write(" default \"%s\"\n" % boarddir)
cf.write("\n")
(mcu_name, mcu_config) = get_opt_config(config, "AOS_MCU_", "platform/mcu")
if mcu_config:
cf.write(" source \"%s\"\n" % mcu_config)
(arch_name, arch_config) = get_opt_config(mcu_config, "AOS_ARCH_", "platform/arch")
if arch_config:
cf.write(" source \"%s\"\n" % arch_config)
cf.write("endif\n")
cf.write("\n")
cf.write("endchoice\n")
cf.write("\n")
cf.write("endmenu\n")
cf.write("# %s\n" % autogen_end)
def create_app_config_in(config_file, config_list):
""" Create app/*/Config.in files """
with open (config_file, "w") as cf:
cf.write("# %s\n" % autogen_start)
if "example/" in config_file:
cf.write("config AOS_APP_EXAMPLE\n")
cf.write(" bool \"Builtin Examples\"\n")
cf.write("\n")
cf.write("if AOS_APP_EXAMPLE\n")
cf.write("choice\n")
cf.write(" prompt \"Select example\"\n")
if "profile/" in config_file:
cf.write("config AOS_APP_PROFILE\n")
cf.write(" bool \"Builtin Profiles\"\n")
cf.write("\n")
cf.write("if AOS_APP_PROFILE\n")
cf.write("choice\n")
cf.write(" prompt \"Select profile\"\n")
cf.write("\n")
for config in config_list:
mkfile = re.sub(r"Config.in", r"aos.mk", config)
appname = get_comp_name(mkfile)
if not appname:
print("Error: Can't get app name from %s" % mkfile)
return 1
patten = re.compile(r"app/(example|profile)/(.*)/Config.in")
match = patten.match(config)
if match:
appdir = match.group(2).replace("/", ".")
else:
print("Error: can't get app directory")
cf.write("source \"%s\"\n" % config)
cf.write("if AOS_APP_%s\n" % appname.upper())
cf.write(" config AOS_BUILD_APP\n")
cf.write(" default \"%s\"\n" % appdir)
cf.write("endif\n")
cf.write("\n")
cf.write("endchoice\n")
cf.write("endif\n")
cf.write("# %s\n" % autogen_end)
def update_top_config_in(top_config_in):
""" Extra updates for build/Config.in """
contents = []
patten = re.compile(r".*(NULL|Null).*")
with open (top_config_in, "r") as cf:
for line in cf.readlines():
match = patten.match(line)
if match:
continue
contents += [line]
with open (top_config_in, "w") as cf:
for line in contents:
cf.write(line)
def main():
if not os.path.isfile("build/Makefile"):
print("Error: %s must be run in Sources Root dir!\n" % sys.argv[0])
return 1
config_list = []
source_root = "./"
for root, dirs, files in os.walk(source_root):
if 'Config.in' in files:
config_file = "%s/Config.in" % root.replace(source_root, "")
config_list += [config_file]
templates = os.listdir(templatedir)
for template in templates:
destdir = re.sub(r"\.Config\.in", "", template)
destdir = re.sub(r"\.", "/", destdir)
sourcefile = os.path.join(templatedir, template)
destfile = os.path.join(destdir, "Config.in")
if os.path.isdir(destdir):
if "linkkit/Config.in" in destfile:
if not os.path.isfile(re.sub(r"Config.in", "aos.mk", destfile)):
continue
shutil.copyfile(sourcefile, destfile)
config_list += [destfile]
# Update config files according to installed comps
for config_file in config_list:
update_config_in(config_file, config_list)
# Create board/Config.in
board_config_list = []
for config_file in config_list:
if config_file.startswith("board/") and config_file != "board/Config.in":
board_config_list += [config_file]
if board_config_list:
create_board_config_in(board_config_in, board_config_list)
# Create app/example/Config.in
example_config_list = []
for config_file in config_list:
if config_file.startswith("app/example") and config_file != "app/example/Config.in":
example_config_list += [config_file]
if example_config_list:
create_app_config_in(example_config_in, example_config_list)
# Create app/profile/Config.in
profile_config_list = []
for config_file in config_list:
if config_file.startswith("app/profile") and config_file != "app/profile/Config.in":
profile_config_list += [config_file]
if profile_config_list:
create_app_config_in(profile_config_in, profile_config_list)
# Extra update for build/Config.in
update_top_config_in(top_config_in)
if __name__ == "__main__":
main()
| 34.418327 | 99 | 0.550179 |
d0c3fdf097b5c1d74004c2a73eda727ffc5a05b3 | 4,897 | py | Python | dstapi.py | alemartinello/dstapi | 100f847e9d93a05eb4c648699b044e1c2bc9f6cb | [
"MIT"
] | 13 | 2021-12-30T23:35:52.000Z | 2022-03-25T12:39:36.000Z | dstapi.py | alemartinello/dstapi | 100f847e9d93a05eb4c648699b044e1c2bc9f6cb | [
"MIT"
] | null | null | null | dstapi.py | alemartinello/dstapi | 100f847e9d93a05eb4c648699b044e1c2bc9f6cb | [
"MIT"
] | 1 | 2022-01-06T09:18:11.000Z | 2022-01-06T09:18:11.000Z | """
Helper class to facilitate working with Statistics Denmark's API. See the
official API documentation here
https://www.dst.dk/da/Statistik/brug-statistikken/muligheder-i-statistikbanken/api
Author: Alessandro Tang-Andersen Martinello
"""
import requests
import warnings
import pandas as pd
from io import StringIO
class DstApi:
def __init__(self, tablename) -> None:
self.apiip = "https://api.statbank.dk/v1"
self.tablename = str(tablename).lower()
self.tableinfo = None
def tablesummary(self, verbose=True, language='da'):
"""
Returns a summary of a published DST table containing the description of
the table and of the variables according to which the values are
reported
"""
# Get table info from API
if self.tableinfo is None:
self.tableinfo = self._get_tableinfo(language=language)
# Make report
if verbose:
print(f"Table {self.tableinfo['id']}: {self.tableinfo['description']}")
print(f"Last update: {self.tableinfo['updated']}")
table = self._wrap_tableinfo_variables(self.tableinfo)
return table
def variable_levels(self, varname, language='da'):
"""
Returns a DataFrame with the possible values of `varname` in the table.
"""
# Get table info from API
if self.tableinfo is None:
self.tableinfo = self._get_tableinfo(language=language)
try:
return pd.DataFrame(
[i for i in self.tableinfo["variables"] if i["id"] == varname][0][
"values"
]
)
except IndexError as err:
print(
"""
Error: The table does not seem to contain the requested variable.
Check the spelling (variable names are case sensitive
)"""
)
return err
def get_data(self, params=None, language='da', as_DataFrame=True, override_warning=False):
"""
Downloads table data according to API call specified in `params`. If
`params` is None (default), parameters resulting in the download of the
entire data table will be automatically generated, raising a warning.
The function returns a Pandas DataFramse by default. Specify
`as_DataFrame=False` to obtain the original `requests.Response` object
"""
if params is None:
if override_warning is False:
warnings.warn((
"API call parameters are not specified. Parameters resulting "
"in the download of the entire table will be automatically generated. "
"This can result in massive data downloads."
), stacklevel=2)
answer = input("Continue (Y/Yes)?")
else:
answer = "yes"
if answer.lower() in ["y", "yes"]:
params = self._define_base_params(language=language)
else:
print("Execution aborted")
return
r = requests.post(self.apiip + "/data", json=params)
if as_DataFrame:
return pd.read_csv(StringIO(r.text), sep=';', decimal=',')
else:
return r
def _get_tableinfo(self, language='da'):
tableinfo = self.tableinfo = requests.get(
self.apiip + "/tableinfo",
params={"id": self.tablename, "format": "JSON", 'lang': language}
).json()
return tableinfo
def _define_base_params(self, language='da'):
"""
Return a parameter dictionary resulting in the download of an entire
data table. Use with caution.
"""
ts = self.tablesummary(verbose=False)
variables = [{'code': var, 'values': ['*']} for var in ts['variable name']]
params = {
'table': self.tablename,
'format': 'BULK',
'lang': language,
'variables': variables
}
return params
@staticmethod
def _wrap_tableinfo_variables(tiresponse):
toplist = []
for var in tiresponse["variables"]:
vallist = [var["id"]]
vallist.append(len(var["values"]))
vallist.append(var["values"][0]["id"])
vallist.append(var["values"][0]["text"])
vallist.append(var["values"][-1]["id"])
vallist.append(var["values"][-1]["text"])
vallist.append(var["time"])
toplist.append(vallist)
return pd.DataFrame(
toplist,
columns=[
"variable name",
"# values",
"First value",
"First value label",
"Last value",
"Last value label",
"Time variable",
],
)
| 34.978571 | 94 | 0.559935 |
45e51345310104429fdb8a0e5e01e12832e1d036 | 5,158 | py | Python | unifold/model/tf/protein_features.py | nwod-edispu/Uni-Fold | 0ebfaf234807523067759d4c300694bb58cfb991 | [
"Apache-2.0"
] | null | null | null | unifold/model/tf/protein_features.py | nwod-edispu/Uni-Fold | 0ebfaf234807523067759d4c300694bb58cfb991 | [
"Apache-2.0"
] | null | null | null | unifold/model/tf/protein_features.py | nwod-edispu/Uni-Fold | 0ebfaf234807523067759d4c300694bb58cfb991 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Beijing DP Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains descriptions of various protein features."""
import enum
from typing import Dict, Optional, Sequence, Tuple, Union
from unifold.common import residue_constants
import tensorflow.compat.v1 as tf
# Type aliases.
FeaturesMetadata = Dict[str, Tuple[tf.dtypes.DType, Sequence[Union[str, int]]]]
class FeatureType(enum.Enum):
ZERO_DIM = 0 # Shape [x]
ONE_DIM = 1 # Shape [num_res, x]
TWO_DIM = 2 # Shape [num_res, num_res, x]
MSA = 3 # Shape [msa_length, num_res, x]
# Placeholder values that will be replaced with their true value at runtime.
NUM_RES = "num residues placeholder"
NUM_SEQ = "length msa placeholder"
NUM_TEMPLATES = "num templates placeholder"
# Sizes of the protein features, NUM_RES and NUM_SEQ are allowed as placeholders
# to be replaced with the number of residues and the number of sequences in the
# multiple sequence alignment, respectively.
FEATURES = {
#### Static features of a protein sequence ####
"aatype": (tf.float32, [NUM_RES, 21]),
"between_segment_residues": (tf.int64, [NUM_RES, 1]),
"deletion_matrix": (tf.float32, [NUM_SEQ, NUM_RES, 1]),
"domain_name": (tf.string, [1]),
"msa": (tf.int64, [NUM_SEQ, NUM_RES, 1]),
"num_alignments": (tf.int64, [NUM_RES, 1]),
"residue_index": (tf.int64, [NUM_RES, 1]),
"seq_length": (tf.int64, [NUM_RES, 1]),
"sequence": (tf.string, [1]),
"all_atom_positions": (tf.float32,
[NUM_RES, residue_constants.atom_type_num, 3]),
"all_atom_mask": (tf.int64, [NUM_RES, residue_constants.atom_type_num]),
"resolution": (tf.float32, [1]),
"template_domain_names": (tf.string, [NUM_TEMPLATES]),
"template_sum_probs": (tf.float32, [NUM_TEMPLATES, 1]),
"template_aatype": (tf.float32, [NUM_TEMPLATES, NUM_RES, 22]),
"template_all_atom_positions": (tf.float32, [
NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 3
]),
"template_all_atom_masks": (tf.float32, [
NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 1
]),
}
FEATURE_TYPES = {k: v[0] for k, v in FEATURES.items()}
FEATURE_SIZES = {k: v[1] for k, v in FEATURES.items()}
def register_feature(name: str,
type_: tf.dtypes.DType,
shape_: Tuple[Union[str, int]]):
"""Register extra features used in custom datasets."""
FEATURES[name] = (type_, shape_)
FEATURE_TYPES[name] = type_
FEATURE_SIZES[name] = shape_
def shape(feature_name: str,
num_residues: int,
msa_length: int,
num_templates: Optional[int] = None,
features: Optional[FeaturesMetadata] = None):
"""Get the shape for the given feature name.
This is near identical to _get_tf_shape_no_placeholders() but with 2
differences:
* This method does not calculate a single placeholder from the total number of
elements (eg given <NUM_RES, 3> and size := 12, this won't deduce NUM_RES
must be 4)
* This method will work with tensors
Args:
feature_name: String identifier for the feature. If the feature name ends
with "_unnormalized", theis suffix is stripped off.
num_residues: The number of residues in the current domain - some elements
of the shape can be dynamic and will be replaced by this value.
msa_length: The number of sequences in the multiple sequence alignment, some
elements of the shape can be dynamic and will be replaced by this value.
If the number of alignments is unknown / not read, please pass None for
msa_length.
num_templates (optional): The number of templates in this tfexample.
features: A feature_name to (tf_dtype, shape) lookup; defaults to FEATURES.
Returns:
List of ints representation the tensor size.
Raises:
ValueError: If a feature is requested but no concrete placeholder value is
given.
"""
features = features or FEATURES
if feature_name.endswith("_unnormalized"):
feature_name = feature_name[:-13]
unused_dtype, raw_sizes = features[feature_name]
replacements = {NUM_RES: num_residues,
NUM_SEQ: msa_length}
if num_templates is not None:
replacements[NUM_TEMPLATES] = num_templates
sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes]
for dimension in sizes:
if isinstance(dimension, str):
raise ValueError("Could not parse %s (shape: %s) with values: %s" % (
feature_name, raw_sizes, replacements))
return sizes
| 39.676923 | 82 | 0.686506 |
3f80457245f047f57a9f3ca8049acfcc65d1b2f8 | 20,343 | py | Python | tflearn/optimizers.py | amongstar/https-github.com-tflearn-tflearn | 8af77b5aebcb8aba0f1b855201aed732906c6de8 | [
"MIT"
] | 2 | 2019-08-04T04:21:57.000Z | 2019-08-05T21:58:52.000Z | tflearn/optimizers.py | amongstar/https-github.com-tflearn-tflearn | 8af77b5aebcb8aba0f1b855201aed732906c6de8 | [
"MIT"
] | null | null | null | tflearn/optimizers.py | amongstar/https-github.com-tflearn-tflearn | 8af77b5aebcb8aba0f1b855201aed732906c6de8 | [
"MIT"
] | 6 | 2020-04-13T15:33:30.000Z | 2020-06-21T19:26:55.000Z | from __future__ import division, print_function, absolute_import
import tensorflow as tf
from .utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'optimizer')
class Optimizer(object):
""" Base Optimizer class.
A basic class to create optimizers to be used with TFLearn estimators.
First, The Optimizer class is initialized with given parameters,
but no Tensor is created. In a second step, invoking `get_tensor` method
will actually build the Tensorflow `Optimizer` Tensor, and return it.
This way, a user can easily specifies an optimizer with non default
parameters and learning rate decay, while TFLearn estimators will
build the optimizer and a step tensor by itself.
Arguments:
learning_rate: `float`. Learning rate.
use_locking: `bool`. If True use locks for update operation.
name: `str`. The optimizer name.
Attributes:
tensor: `Optimizer`. The optimizer tensor.
has_decay: `bool`. True if optimizer has a learning rate decay.
"""
def __init__(self, learning_rate, use_locking, name):
self.learning_rate = learning_rate
self.use_locking = use_locking
self.name = name
self.tensor = None
self.has_decay = False
self.built = False
def build(self, step_tensor=None):
""" build optimizer tensor.
This method creates the optimizer with specified parameters. It must
be implemented for every `Optimizer`.
Arguments:
step_tensor: `tf.Tensor`. A variable holding the training step.
Only necessary when optimizer has a learning rate decay.
"""
raise NotImplementedError
def get_tensor(self):
""" get_tensor.
A method to retrieve the optimizer tensor.
Returns:
The `Optimizer`.
"""
if not self.built:
self.build()
return self.tensor
def __call__(self):
""" __call__
A shortcut for `get_tensor`. Retrieve the optimizer tensor.
Returns:
The `Optimizer`.
"""
return self.get_tensor()
class SGD(Optimizer):
""" Stochastic Gradient Descent.
SGD Optimizer accepts learning rate decay. When training a model,
it is often recommended to lower the learning rate as the training
progresses. The function returns the decayed learning rate. It is
computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
Examples:
```python
# With TFLearn estimators.
sgd = SGD(learning_rate=0.01, lr_decay=0.96, decay_step=100)
regression = regression(net, optimizer=sgd)
# Without TFLearn estimators (returns tf.Optimizer).
sgd = SGD(learning_rate=0.01).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
use_locking: `bool`. If True use locks for update operation.
lr_decay: `float`. The learning rate decay to apply.
decay_step: `int`. Apply decay every provided steps.
staircase: `bool`. It `True` decay learning rate at discrete intervals.
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "GradientDescent".
"""
def __init__(self, learning_rate=0.001, lr_decay=0., decay_step=100,
staircase=False, use_locking=False, name="SGD"):
super(SGD, self).__init__(learning_rate, use_locking, name)
self.lr_decay = lr_decay
if self.lr_decay > 0.:
self.has_decay = True
self.decay_step = decay_step
self.staircase = staircase
def build(self, step_tensor=None):
self.built = True
if self.has_decay:
if not step_tensor:
raise Exception("Learning rate decay but no step_tensor "
"provided.")
self.learning_rate = tf.train.exponential_decay(
self.learning_rate, step_tensor,
self.decay_step, self.lr_decay,
staircase=self.staircase)
tf.add_to_collection(tf.GraphKeys.LR_VARIABLES, self.learning_rate)
self.tensor = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate,
use_locking=self.use_locking,
name=self.name)
# Shortcut
sgd = SGD
class RMSProp(Optimizer):
""" RMSprop.
Maintain a moving (discounted) average of the square of gradients.
Divide gradient by the root of this average.
Examples:
```python
# With TFLearn estimators.
rmsprop = RMSProp(learning_rate=0.1, decay=0.999)
regression = regression(net, optimizer=rmsprop)
# Without TFLearn estimators (returns tf.Optimizer).
rmsprop = RMSProp(learning_rate=0.01, decay=0.999).get_tensor()
# or
rmsprop = RMSProp(learning_rate=0.01, decay=0.999)()
```
Arguments:
learning_rate: `float`. Learning rate.
decay: `float`. Discounting factor for the history/coming gradient.
momentum: `float`. Momentum.
epsilon: `float`. Small value to avoid zero denominator.
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "RMSProp".
"""
def __init__(self, learning_rate=0.001, decay=0.9, momentum=0.0,
epsilon=1e-10, use_locking=False, name="RMSProp"):
super(RMSProp, self).__init__(learning_rate, use_locking, name)
self.decay = decay
self.momentum = momentum
self.epsilon = epsilon
def build(self, step_tensor=None):
self.built = True
self.tensor = tf.train.RMSPropOptimizer(
learning_rate=self.learning_rate, decay=self.decay,
momentum=self.momentum, epsilon=self.epsilon,
use_locking=self.use_locking, name=self.name)
rmsprop = RMSProp
class Adam(Optimizer):
""" Adam.
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1.
Examples:
```python
# With TFLearn estimators
adam = Adam(learning_rate=0.001, beta1=0.99)
regression = regression(net, optimizer=adam)
# Without TFLearn estimators (returns tf.Optimizer)
adam = Adam(learning_rate=0.01).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
beta1: `float`. The exponential decay rate for the 1st moment
estimates.
beta2: `float`. The exponential decay rate for the 2nd moment
estimates.
epsilon: `float`. A small constant for numerical stability.
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "Adam".
References:
Adam: A Method for Stochastic Optimization. Diederik Kingma,
Jimmy Ba. ICLR 2015.
Links:
[Paper](http://arxiv.org/pdf/1412.6980v8.pdf)
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8, use_locking=False, name="Adam"):
super(Adam, self).__init__(learning_rate, use_locking, name)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def build(self, step_tensor=None):
self.built = True
self.tensor = tf.train.AdamOptimizer(
learning_rate=self.learning_rate, beta1=self.beta1,
beta2=self.beta2, epsilon=self.epsilon,
use_locking=self.use_locking, name=self.name)
adam = Adam
class Momentum(Optimizer):
""" Momentum.
Momentum Optimizer accepts learning rate decay. When training a model,
it is often recommended to lower the learning rate as the training
progresses. The function returns the decayed learning rate. It is
computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
Examples:
```python
# With TFLearn estimators
momentum = Momentum(learning_rate=0.01, lr_decay=0.96, decay_step=100)
regression = regression(net, optimizer=momentum)
# Without TFLearn estimators (returns tf.Optimizer)
mm = Momentum(learning_rate=0.01, lr_decay=0.96).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
momentum: `float`. Momentum.
lr_decay: `float`. The learning rate decay to apply.
decay_step: `int`. Apply decay every provided steps.
staircase: `bool`. It `True` decay learning rate at discrete intervals.
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "Momentum".
"""
def __init__(self, learning_rate=0.001, momentum=0.9, lr_decay=0.,
decay_step=100, staircase=False, use_locking=False,
name="Momentum"):
super(Momentum, self).__init__(learning_rate, use_locking, name)
self.momentum = momentum
self.lr_decay = lr_decay
if self.lr_decay > 0.:
self.has_decay = True
self.decay_step = decay_step
self.staircase = staircase
def build(self, step_tensor=None):
self.built = True
if self.has_decay:
if not step_tensor:
raise Exception("Learning rate decay but no step_tensor "
"provided.")
self.learning_rate = tf.train.exponential_decay(
self.learning_rate, step_tensor,
self.decay_step, self.lr_decay,
staircase=self.staircase)
tf.add_to_collection(tf.GraphKeys.LR_VARIABLES, self.learning_rate)
self.tensor = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.momentum,
use_locking=self.use_locking,
name=self.name)
momentum = Momentum
class AdaGrad(Optimizer):
""" AdaGrad.
Examples:
```python
# With TFLearn estimators
adagrad = AdaGrad(learning_rate=0.01, initial_accumulator_value=0.01)
regression = regression(net, optimizer=adagrad)
# Without TFLearn estimators (returns tf.Optimizer)
adagrad = AdaGrad(learning_rate=0.01).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
initial_accumulator_value: `float`. Starting value for the
accumulators, must be positive
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "AdaGrad".
References:
Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization. J. Duchi, E. Hazan & Y. Singer. Journal of Machine
Learning Research 12 (2011) 2121-2159.
Links:
[Paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1,
use_locking=False, name="AdaGrad"):
super(AdaGrad, self).__init__(learning_rate, use_locking, name)
self.initial_accumulator_value = initial_accumulator_value
def build(self, step_tensor=None):
self.built = True
self.tensor = tf.train.AdagradOptimizer(
self.learning_rate,
initial_accumulator_value=self.initial_accumulator_value,
use_locking=self.use_locking, name=self.name)
adagrad = AdaGrad
class Ftrl(Optimizer):
""" Ftrl Proximal.
The Ftrl-proximal algorithm, abbreviated for Follow-the-regularized-leader,
is described in the paper below.
It can give a good performance vs. sparsity tradeoff.
Ftrl-proximal uses its own global base learning rate and can behave like
Adagrad with `learning_rate_power=-0.5`, or like gradient descent with
`learning_rate_power=0.0`.
Examples:
```python
# With TFLearn estimators.
ftrl = Ftrl(learning_rate=0.01, learning_rate_power=-0.1)
regression = regression(net, optimizer=ftrl)
# Without TFLearn estimators (returns tf.Optimizer).
ftrl = Ftrl(learning_rate=0.01).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
learning_rate_power: `float`. Must be less or equal to zero.
initial_accumulator_value: `float`. The starting value for accumulators.
Only positive values are allowed.
l1_regularization_strength: `float`. Must be less or equal to zero.
l2_regularization_strength: `float`. Must be less or equal to zero.
use_locking: bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "Ftrl".
Links:
[Ad Click Prediction: a View from the Trenches](https://www.eecs.tufts.
edu/~dsculley/papers/ad-click-prediction.pdf)
"""
def __init__(self, learning_rate=3.0, learning_rate_power=-0.5,
initial_accumulator_value=0.1, l1_regularization_strength=0.0,
l2_regularization_strength=0.0, use_locking=False,
name="Ftrl"):
super(Ftrl, self).__init__(learning_rate, use_locking, name)
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
def build(self, step_tensor=None):
self.built = True
with tf.device('/cpu:0'):
self.tensor = tf.train.FtrlOptimizer(
self.learning_rate,
learning_rate_power=self.learning_rate_power,
initial_accumulator_value=self.initial_accumulator_value,
l1_regularization_strength=self.l1_regularization_strength,
l2_regularization_strength=self.l2_regularization_strength,
use_locking=self.use_locking, name=self.name)
ftrl = Ftrl
class AdaDelta(Optimizer):
""" AdaDelta.
Construct a new Adadelta optimizer.
Arguments:
learning_rate: A `Tensor` or a floating point value. The learning rate.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
References:
ADADELTA: An Adaptive Learning Rate Method, Matthew D. Zeiler, 2012.
Links:
[http://arxiv.org/abs/1212.5701](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, learning_rate=0.001, rho=0.1, epsilon=1e-08,
use_locking=False, name="AdaDelta"):
super(AdaDelta, self).__init__(learning_rate, use_locking, name)
self.rho = rho
self.epsilon = epsilon
def build(self, step_tensor=None):
self.built = True
self.tensor = tf.train.AdadeltaOptimizer(
self.learning_rate,
rho=self.rho, epsilon=self.epsilon,
use_locking=self.use_locking, name=self.name)
adadelta = AdaDelta
class ProximalAdaGrad(Optimizer):
""" ProximalAdaGrad.
Examples:
```python
# With TFLearn estimators
proxi_adagrad = ProximalAdaGrad(learning_rate=0.01,
l2_regularization_strength=0.01,
initial_accumulator_value=0.01)
regression = regression(net, optimizer=proxi_adagrad)
# Without TFLearn estimators (returns tf.Optimizer)
adagrad = ProximalAdaGrad(learning_rate=0.01).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
initial_accumulator_value: `float`. Starting value for the
accumulators, must be positive
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "AdaGrad".
References:
Efficient Learning using Forward-Backward Splitting. J. Duchi, Yoram
Singer, 2009.
Links:
[Paper](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf)
"""
def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1,
use_locking=False, name="AdaGrad"):
super(ProximalAdaGrad, self).__init__(learning_rate, use_locking, name)
self.initial_accumulator_value = initial_accumulator_value
def build(self, step_tensor=None):
self.built = True
self.tensor = tf.train.AdagradOptimizer(
self.learning_rate,
initial_accumulator_value=self.initial_accumulator_value,
use_locking=self.use_locking, name=self.name)
proximaladagrad = ProximalAdaGrad
class Nesterov(Optimizer):
""" Nesterov.
The main difference between classical momentum and nesterov is:
In classical momentum you first correct your velocity and
then make a big step according to that velocity (and then repeat),
but in Nesterov momentum you first making a step into velocity
direction and then make a correction to a velocity vector based on
new location (then repeat).
See [Sutskever et. al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf)
Examples:
```python
# With TFLearn estimators
nesterov = Nesterov(learning_rate=0.01, lr_decay=0.96, decay_step=100)
regression = regression(net, optimizer=nesterov)
# Without TFLearn estimators (returns tf.Optimizer)
mm = Neserov(learning_rate=0.01, lr_decay=0.96).get_tensor()
```
Arguments:
learning_rate: `float`. Learning rate.
momentum: `float`. Momentum.
lr_decay: `float`. The learning rate decay to apply.
decay_step: `int`. Apply decay every provided steps.
staircase: `bool`. It `True` decay learning rate at discrete intervals.
use_locking: `bool`. If True use locks for update operation.
name: `str`. Optional name prefix for the operations created when
applying gradients. Defaults to "Momentum".
"""
def __init__(self, learning_rate=0.001, momentum=0.9, lr_decay=0.,
decay_step=100, staircase=False, use_locking=False,
name="Nesterov"):
super(Nesterov, self).__init__(learning_rate, use_locking, name)
self.momentum = momentum
self.lr_decay = lr_decay
if self.lr_decay > 0.:
self.has_decay = True
self.decay_step = decay_step
self.staircase = staircase
def build(self, step_tensor=None):
self.built = True
if self.has_decay:
if not step_tensor:
raise Exception("Learning rate decay but no step_tensor "
"provided.")
self.learning_rate = tf.train.exponential_decay(
self.learning_rate, step_tensor,
self.decay_step, self.lr_decay,
staircase=self.staircase)
tf.add_to_collection(tf.GraphKeys.LR_VARIABLES, self.learning_rate)
self.tensor = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.momentum,
use_locking=self.use_locking,
name=self.name,use_nesterov=True)
nesterov = Nesterov
| 35.752197 | 105 | 0.647004 |
9ee6e98f29e801ad531b8aaad9845325ec4face8 | 4,888 | py | Python | tests/test_decode_attribute_types.py | jnothman/liac-arff | 45fc0a87fe31e165fd912ed9973c5de3c345787b | [
"MIT"
] | 1 | 2021-05-04T18:01:51.000Z | 2021-05-04T18:01:51.000Z | tests/test_decode_attribute_types.py | jnothman/liac-arff | 45fc0a87fe31e165fd912ed9973c5de3c345787b | [
"MIT"
] | null | null | null | tests/test_decode_attribute_types.py | jnothman/liac-arff | 45fc0a87fe31e165fd912ed9973c5de3c345787b | [
"MIT"
] | null | null | null | import unittest
import arff
class TestDecodeAttributeTypes(unittest.TestCase):
def get_decoder(self):
decoder = arff.ArffDecoder()
return decoder
def test_numeric(self):
'''Numeric attributes.'''
decoder = self.get_decoder()
# Simple case
fixture = u'@ATTRIBUTE attribute-name NUMERIC'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'NUMERIC')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
# Case insensitive
fixture = u'@ATTRIBUTE attribute-name NuMeriC'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'NUMERIC')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
def test_real(self):
'''Real attributes.'''
decoder = self.get_decoder()
# Simple case
fixture = u'@ATTRIBUTE attribute-name REAL'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'REAL')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
# Case insensitive
fixture = u'@ATTRIBUTE attribute-name ReAl'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'REAL')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
def test_integer(self):
'''Integer attributes.'''
decoder = self.get_decoder()
# Simple case
fixture = u'@ATTRIBUTE attribute-name INTEGER'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'INTEGER')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
# Case insensitive
fixture = u'@ATTRIBUTE attribute-name InteGeR'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'INTEGER')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
def test_string(self):
'''String attributes.'''
decoder = self.get_decoder()
# Simple case
fixture = u'@ATTRIBUTE attribute-name STRING'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'STRING')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
# Case insensitive
fixture = u'@ATTRIBUTE attribute-name stRing'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', u'STRING')
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(result[1], expected[1])
def test_nominal(self):
'''Nominal attributes.'''
decoder = self.get_decoder()
# Simple case
fixture = u'@ATTRIBUTE attribute-name {a, b, c}'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', [u'a', u'b', u'c'])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(len(result[1]), 3)
self.assertEqual(result[1][0], expected[1][0])
self.assertEqual(result[1][1], expected[1][1])
self.assertEqual(result[1][2], expected[1][2])
# Quoted/Spaced/Number case
fixture = u'@ATTRIBUTE attribute-name {"name with spce", 1, lol,2 }'
result = decoder._decode_attribute(fixture)
expected = (u'attribute-name', [u'name with spce', u'1', u'lol', u'2'])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], expected[0])
self.assertEqual(len(result[1]), 4)
self.assertEqual(result[1][0], expected[1][0])
self.assertEqual(result[1][1], expected[1][1])
self.assertEqual(result[1][2], expected[1][2])
self.assertEqual(result[1][3], expected[1][3])
def test_invalid_type(self):
'''Invalid type name or structure.'''
decoder = self.get_decoder()
# Invalid type name
fixture = u'@ATTRIBUTE attribute-name NON-EXIST'
self.assertRaises(
arff.BadAttributeType,
decoder._decode_attribute,
fixture
)
# Invalid nominal structure
fixture = u'@ATTRIBUTE attribute-name {1, 2] 3'
self.assertRaises(
arff.BadAttributeType,
decoder._decode_attribute,
fixture
)
| 33.251701 | 79 | 0.61027 |
55ab1662a009a8563438fcc14c34039775db935e | 6,729 | py | Python | wpkit/web/utils.py | Peiiii/wpkit | 23a07548be766b559b80e3114ecc24e3f2f65ea5 | [
"MIT"
] | null | null | null | wpkit/web/utils.py | Peiiii/wpkit | 23a07548be766b559b80e3114ecc24e3f2f65ea5 | [
"MIT"
] | null | null | null | wpkit/web/utils.py | Peiiii/wpkit | 23a07548be766b559b80e3114ecc24e3f2f65ea5 | [
"MIT"
] | null | null | null | from wpkit import piu
from wpkit import pkg_info
from wpkit.basic import join_path,IterObject,SecureDirPath,PointDict,Path,DirPath,PowerDirPath,Status,StatusSuccess,StatusError
from wpkit.basic import render_template as render
from flask import request,render_template,redirect,make_response,jsonify
import functools,inspect
from jinja2 import Environment,PackageLoader
env=Environment(loader=PackageLoader('wpkit.data','templates'))
import inspect
def log_func(msg="*** running %s ...."):
# def decorator(func):
# @functools.wraps(func)
# def wrapper(*args,**kwargs)
# print(msg%(func.__name__) if "%s" in msg else msg)
# func(*args,**kwargs)
# return wrapper
def before(func):
print(msg % (func.__name__) if "%s" in msg else msg)
decorator=config_run(before=before)
return decorator
def config_run(before=None,after=None):
def decorator(func):
def do_before():
dosome=before
if not dosome:return
if hasattr(dosome,'__call__'):
dosome_args = inspect.getfullargspec(dosome).args
if 'func' in dosome_args:
dosome(func=func)
else:
dosome()
else:
print(dosome)
def do_after():
dosome = after
if not dosome: return
if hasattr(dosome,'__call__'):
dosome_args = inspect.getfullargspec(dosome).args
if 'func' in dosome_args:
dosome(func=func)
else:
dosome()
else:
print(dosome)
@functools.wraps(func)
def wrapper(*args,**kwargs):
do_before()
res=func(*args,**kwargs)
do_after()
return res
# print("wrapper args:",inspect.getfullargspec(wrapper).args)
# print("func args:",inspect.getfullargspec(func).args)
return wrapper
return decorator
def rename_func(name):
def decorator(func):
func.__name__=name
@functools.wraps(func)
def new_func(*args,**kwargs):
return func(*args,**kwargs)
return new_func
return decorator
def parse_from(*refers):
def decorator(f):
fargs = inspect.getfullargspec(f).args
@functools.wraps(f)
def wrapper(*args,**kwargs):
dic={}
for ref in refers:
d = ref() if callable(ref) else dict(ref)
d = d or {}
if d:dic.update(d)
params = {}
for ag in fargs:
params[ag] = dic.get(ag, None)
# print("args:",fargs)
# print("params:",params)
params.update(kwargs)
return f(*args,**params)
return wrapper
return decorator
def get_form():return request.form
def get_json():return request.json
def get_cookies():return request.cookies
# parse_json is a decorator
parse_json_and_form=parse_from(get_json,get_form)
parse_json=parse_from(get_json)
parse_form=parse_from(get_form)
parse_cookies=parse_from(get_cookies)
parse_all=parse_from(get_cookies,get_form,get_json)
def log(*msgs):
print("log".center(10, '*') + ":" + ' '.join([str(msg) for msg in msgs]))
class UserManager:
__status_succeeded__='succeeded'
__status_failed__='failed'
def __init__(self,dbpath='./data/user_db',home_url='/'):
self.db=piu.Piu(dbpath)
self.home_url=home_url
def exists_user(self,email):
if not self.get_user(email):return False
return True
def users(self):
return self.db.keys()
def add_user(self,email,info={}):
self.db.add(email,info)
def get_user(self,email):
return self.db.get(email,None)
def update_user(self,email,info={}):
if not self.get_user(email):
self.add_user(email)
self.db.get(email).update(info)
def status(self,status,**kwargs):
return jsonify(dict(status=status,**kwargs))
def home_page(self,**kwargs):
return env.get_template('pan.html').render(signup=True, **kwargs)
def signup_page(self,**kwargs):
return env.get_template('sign3.html').render(signup=True, **kwargs)
def login_page(self,**kwargs):
return env.get_template('sign3.html').render(login=True,**kwargs)
def error_page(self,**kwargs):
return env.get_template('error.html').render(**kwargs)
def login_required(self,f):
@functools.wraps(f)
@parse_cookies
def wrapper(user_email,user_password,*args,**kwargs):
if not (user_email and user_password):
return self.login_page()
user=self.get_user(user_email)
user=PointDict.from_dict(user) if user else user
if not user:
return self.signup_page()
if user and (user.user_email == user_email ) and (user.user_password==user_password):
return f(*args,**kwargs)
else:
# return self.login_page()
return self.error_page()
return wrapper
def signup(self):
@parse_form
def do_signup(user_email,user_password):
log("sign up:",user_email,user_password)
# if self.db.get(user_email,None):return self.signup_page(msg='Email has been taken.')
if self.db.get(user_email,None):
msg="Email has been taken"
log(msg)
return jsonify(StatusError(msg=msg))
self.add_user(user_email,{'user_email':user_email,'user_password':user_password})
log(self.db.get(user_email))
resp=make_response(self.status(status=self.__status_succeeded__,redirect=self.home_url))
resp.set_cookie('user_email',user_email)
resp.set_cookie('user_password',user_password)
return resp
return do_signup()
def login(self,redirect_to=None):
def decorator():
@log_func()
@parse_form
def do_login(user_email, user_password):
log("log***:", user_email, user_password)
if not self.db.get(user_email, None):
msg = "Email doesn't exists."
print(msg)
return self.status(self.__status_failed__, msg=msg)
resp = make_response(self.home_page()) if not redirect_to else redirect_to
resp.set_cookie('user_email', user_email)
resp.set_cookie('user_password', user_password)
log("resp:",resp)
return resp
return do_login()
return decorator
| 36.372973 | 127 | 0.596671 |
4bf057b3259a2fa79c7ec42bc14bce97a8adff63 | 1,502 | py | Python | examples/pylab_examples/date_demo1.py | nkoep/matplotlib | 6ed04252994443a4cecf95f0da0efedb6d514b38 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2017-04-11T08:55:30.000Z | 2022-03-25T04:31:26.000Z | examples/pylab_examples/date_demo1.py | epgauss/matplotlib | c9898ea9a30c67c579ab27cd61b68e2abae0fb0e | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/date_demo1.py | epgauss/matplotlib | c9898ea9a30c67c579ab27cd61b68e2abae0fb0e | [
"MIT",
"BSD-3-Clause"
] | 14 | 2015-10-05T04:15:46.000Z | 2020-06-11T18:06:02.000Z | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
This example requires an active internet connection since it uses
yahoo finance to get the data for plotting
"""
import matplotlib.pyplot as plt
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
import datetime
date1 = datetime.date(1995, 1, 1)
date2 = datetime.date(2004, 4, 12)
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
quotes = quotes_historical_yahoo_ochl(
'INTC', date1, date2)
if len(quotes) == 0:
raise SystemExit
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
fig, ax = plt.subplots()
ax.plot_date(dates, opens, '-')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
def price(x): return '$%1.2f'%x
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = price
ax.grid(True)
fig.autofmt_xdate()
plt.show()
| 27.814815 | 70 | 0.767643 |
41063e45ec1a2246ffab8aabe339172c2236af90 | 354 | py | Python | demo/settings/settings-prod.py | terrluo/nginx_uwsgi_django | aa86e453316f5f7301f42571c4b9b1cd0b046d4e | [
"MIT"
] | null | null | null | demo/settings/settings-prod.py | terrluo/nginx_uwsgi_django | aa86e453316f5f7301f42571c4b9b1cd0b046d4e | [
"MIT"
] | 6 | 2020-04-23T06:31:27.000Z | 2022-02-10T09:56:09.000Z | demo/settings/settings-prod.py | terrluo/nginx_uwsgi_django | aa86e453316f5f7301f42571c4b9b1cd0b046d4e | [
"MIT"
] | 1 | 2019-08-23T09:15:50.000Z | 2019-08-23T09:15:50.000Z | #!/usr/bin/python
from .settings import *
DEBUG = False
ALLOWED_HOSTS = ['*']
_env = os.environ
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': _env['DB_NAME'],
'USER': _env['DB_USER'],
'PASSWORD': _env['DB_PASS'],
'HOST': _env['DB_HOST'],
'PORT': _env['DB_PORT'],
}
}
| 17.7 | 45 | 0.531073 |
b3e5df86351fbc86d90e12e988c3838846674998 | 1,573 | py | Python | ts.py | concreted/tspy | 3cab31833225da7daedc3ef457cd00ea9465f1d5 | [
"MIT"
] | 1 | 2016-11-07T01:03:42.000Z | 2016-11-07T01:03:42.000Z | ts.py | concreted/tspy | 3cab31833225da7daedc3ef457cd00ea9465f1d5 | [
"MIT"
] | null | null | null | ts.py | concreted/tspy | 3cab31833225da7daedc3ef457cd00ea9465f1d5 | [
"MIT"
] | 2 | 2016-06-21T19:25:54.000Z | 2018-09-21T01:10:18.000Z | import sys
from math import sqrt
def distance(a, b):
x,y = a
z,w = b
return sqrt((x-z)**2 + (y-w)**2)
def create_graph(count, points):
graph = []
for i in range(count):
row = []
p1 = points[i]
for j in range(count):
p2 = points[j]
row.append(distance(p1, p2))
graph.append(row)
return graph
def print_graph(count, graph):
print "Count: %i" % count
print graph
for i in range(count):
for j in range(count):
if i != j:
print "%i-->%i: %i" % (i, j, graph[i][j])
print
# B: calculate best result for a subgraph
def B(start, X, end, graph):
# X: subset of C (set of all nodes)
if X == set():
return graph[start][end]
else:
return min([B(start, X.difference(set([x])), x, graph) + graph[x][end] for x in X])
# Main TSP function
def TSP(num_nodes, graph):
# C: set of all nodes
C = set(range(num_nodes))
return min([B(0, C.difference(set([0,t])), t, graph) + graph[t][0] for t in C.difference(set([0]))])
def main():
if len(sys.argv) > 1:
points = []
for line in open(sys.argv[1]):
entry = line.rstrip('\n').split(' ')
entry = [float(x) for x in entry]
points.append(entry)
count = int(points.pop(0)[0])
graph = create_graph(count, points)
else:
count = 4
graph = [[0,2,1,3], [2,0,4,5], [1,4,0,6], [3,5,6,0]]
print_graph(count, graph)
print TSP(count, graph)
main()
| 22.797101 | 104 | 0.51494 |
fb6c5237abde9abad98c6eccc1072b81a415e24a | 3,710 | py | Python | contrib/macdeploy/custom_dsstore.py | microftech65/syscoin | f1009e323a153095e1151c3abe173c8fb801b16c | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | microftech65/syscoin | f1009e323a153095e1151c3abe173c8fb801b16c | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | microftech65/syscoin | f1009e323a153095e1151c3abe173c8fb801b16c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Syscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07syscoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00syscoinuser:\x00Documents:\x00syscoin:\x00syscoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/syscoinuser/Documents/syscoin/syscoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Syscoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.833333 | 1,817 | 0.724259 |
3977caea49e5441e59605613dab9ae554e8588cd | 438 | py | Python | server/server.py | cntnboys/410Lab3 | f8cbd2fd41cd99ac0e6c1047b5f574df4f3cf437 | [
"Apache-2.0"
] | null | null | null | server/server.py | cntnboys/410Lab3 | f8cbd2fd41cd99ac0e6c1047b5f574df4f3cf437 | [
"Apache-2.0"
] | null | null | null | server/server.py | cntnboys/410Lab3 | f8cbd2fd41cd99ac0e6c1047b5f574df4f3cf437 | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#got this from https://pointlessprogramming.wordpress.com/2011/02/13/python-cgi-tutorial-2/
import BaseHTTPServer
import CGIHTTPServer
import cgitb; cgitb.enable()
server = BaseHTTPServer.HTTPServer
handler = CGIHTTPServer.CGIHTTPRequestHandler
server_address = ("", 8888)
handler.cgi_directories = ["/cgi"]
httpd = server(server_address, handler)
print("Starting server..........")
httpd.serve_forever()
| 19.909091 | 91 | 0.760274 |
ef0a3cff789384f55c7648ecf1a3ab20b011d274 | 3,509 | py | Python | app.py | zujiancai/AccountAggregator | 01827a3382814d2253cef66d0fe63dd685cbf6fc | [
"MIT"
] | null | null | null | app.py | zujiancai/AccountAggregator | 01827a3382814d2253cef66d0fe63dd685cbf6fc | [
"MIT"
] | null | null | null | app.py | zujiancai/AccountAggregator | 01827a3382814d2253cef66d0fe63dd685cbf6fc | [
"MIT"
] | null | null | null | from decimal import Decimal
from flask import Flask, request, render_template
import json
from banking import Banking
from common import ColName, SummarizeName, CategoryName
from loader import range_query
app = Flask(__name__)
def error_response(message: str, error: int):
return render_template('error.html', code=error, desc=message), error
@app.template_filter()
def dectojson(input):
def decimal_default(data):
if isinstance(data, Decimal):
return float(data)
raise TypeError
return json.dumps(input, default=decimal_default)
@app.route('/')
@app.route('/banking')
def banking():
rg = request.args.get('date')
data = Banking(rg) if rg and len(rg) > 0 else Banking()
print('[{0}]'.format(rg if rg and len(rg) > 0 else SummarizeName.ALL))
return render_template('banking.html', bkg=data, curr=(rg if rg and len(rg) > 0 else SummarizeName.ALL))
@app.route('/records', methods=['GET', 'POST'])
def records():
query_str = ''
sort_by = ColName.ID
sort_asc = True
show_rows = 20
if request.method == 'POST':
query_str = request.form['querystr']
sort_by = request.form.get('sortby')
sort_asc = bool(request.form.getlist('sortasc'))
show_rows = int(request.form.get('showrows'))
else:
datestr = request.args.get('date')
category = request.args.get('cat')
iostr = request.args.get('type')
if datestr and len(datestr) > 0:
date_query = range_query(datestr, ColName.DATE)
if date_query == datestr:
return error_response('Cannot parse as date or time period: {}.'.format(datestr), 400)
else:
query_str = date_query
if category and len(category) > 0:
cats = [getattr(CategoryName, x) for x in dir(CategoryName) if not x.startswith('__')]
if query_str and len(query_str) > 0:
query_str += ' and '
else:
query_str = ''
category = category.lower()
if category in cats:
query_str += '{0} == "{1}"'.format(ColName.CATEGORY, category)
else:
return error_response('Unsupported category name: {}.'.format(category), 400)
if iostr and len(iostr) > 0:
if query_str and len(query_str) > 0:
query_str += ' and '
else:
query_str = ''
iostr = iostr.lower()
if iostr.lower() == 'income':
query_str += '{0} >= 0'.format(ColName.AMOUNT)
elif iostr.lower() == 'expense':
query_str += '{0} < 0'.format(ColName.AMOUNT)
else:
return error_response('Unsupported type name: {}. Please use income or expense.'.format(iostr), 400)
show_rows = 0 if len(query_str) > 0 else 20
if query_str and len(query_str) > 0:
trx = Banking(query_str).transactions
else:
trx = Banking().transactions
trx = trx.sort_values(by=[sort_by], ascending=sort_asc, ignore_index=True)
result_count = len(trx)
if show_rows > 0 and result_count > show_rows:
trx = trx.head(show_rows)
return render_template('records.html', cols=list(trx), data=list(trx.values), query=query_str.replace('"', "'"), sortby=sort_by, asc=sort_asc, \
rows=show_rows, total=result_count, showed=len(trx))
@app.route('/about')
def about():
return render_template('about.html')
| 36.936842 | 148 | 0.603306 |
30219bb7e48ff7dff026d7ef060024d72be39012 | 12,464 | py | Python | qa/rpc-tests/util.py | TeamGastroCoin/gastrocoin | dd0c6a5add7a5bec064943aca21a734c49cdd8ad | [
"MIT"
] | null | null | null | qa/rpc-tests/util.py | TeamGastroCoin/gastrocoin | dd0c6a5add7a5bec064943aca21a734c49cdd8ad | [
"MIT"
] | 1 | 2021-11-28T11:22:58.000Z | 2021-11-28T11:22:58.000Z | qa/rpc-tests/util.py | TeamGastroCoin/gastrocoin | dd0c6a5add7a5bec064943aca21a734c49cdd8ad | [
"MIT"
] | 3 | 2021-03-25T00:32:22.000Z | 2021-05-18T18:56:15.000Z | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2021-2021 The GastroCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "gastrocoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
gastrocoind and gastrocoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run gastrocoind:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "gastrocoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "gastrocoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in gastrocoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a gastrocoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "gastrocoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "gastrocoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple gastrocoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 36.023121 | 110 | 0.64915 |
0b36472812eded86549f77477a5d84d896695146 | 2,076 | py | Python | ccbot/scrapy-cluster-1.2.1/docker/redis-monitor/settings.py | avvinci/cccatalog-api | 523ce68ace7484cfb338ae81e6bcda3e37f63609 | [
"MIT"
] | 2 | 2019-04-22T11:47:57.000Z | 2021-01-04T14:59:08.000Z | ccbot/scrapy-cluster-1.2.1/docker/redis-monitor/settings.py | avvinci/cccatalog-api | 523ce68ace7484cfb338ae81e6bcda3e37f63609 | [
"MIT"
] | 32 | 2021-01-07T23:49:24.000Z | 2022-03-02T15:03:41.000Z | ccbot/scrapy-cluster-1.2.1/docker/redis-monitor/settings.py | avvinci/cccatalog-api | 523ce68ace7484cfb338ae81e6bcda3e37f63609 | [
"MIT"
] | null | null | null | # THIS FILE SHOULD STAY IN SYNC WITH /redis-monitor/settings.py
# This file houses all default settings for the Redis Monitor
# to override please use a custom localsettings.py file
import os
def str2bool(v):
return str(v).lower() in ('true', '1') if type(v) == str else bool(v)
# Redis host configuration
REDIS_HOST = os.getenv('REDIS_HOST', 'redis')
REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
REDIS_DB = int(os.getenv('REDIS_DB', 0))
KAFKA_HOSTS = [x.strip() for x in os.getenv('KAFKA_HOSTS', 'kafka:9092').split(',')]
KAFKA_TOPIC_PREFIX = os.getenv('KAFKA_TOPIC_PREFIX', 'ccbot')
KAFKA_CONN_TIMEOUT = 5
KAFKA_APPID_TOPICS = str2bool(os.getenv('KAFKA_APPID_TOPICS', False))
KAFKA_PRODUCER_BATCH_LINGER_MS = 25 # 25 ms before flush
KAFKA_PRODUCER_BUFFER_BYTES = 4 * 1024 * 1024 # 4MB before blocking
# Zookeeper Settings
ZOOKEEPER_ASSIGN_PATH = '/scrapy-cluster/crawler/'
ZOOKEEPER_ID = 'all'
ZOOKEEPER_HOSTS = os.getenv('ZOOKEEPER_HOSTS', 'zookeeper:2181')
PLUGIN_DIR = "plugins/"
PLUGINS = {
'plugins.info_monitor.InfoMonitor': 100,
'plugins.stop_monitor.StopMonitor': 200,
'plugins.expire_monitor.ExpireMonitor': 300,
'plugins.stats_monitor.StatsMonitor': 400,
'plugins.zookeeper_monitor.ZookeeperMonitor': 500,
}
# logging setup
LOGGER_NAME = 'redis-monitor'
LOG_DIR = os.getenv('LOG_DIR', 'logs')
LOG_FILE = 'redis_monitor.log'
LOG_MAX_BYTES = 10 * 1024 * 1024
LOG_BACKUPS = 5
LOG_STDOUT = str2bool(os.getenv('LOG_STDOUT', True))
LOG_JSON = str2bool(os.getenv('LOG_JSON', False))
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
# stats setup
STATS_TOTAL = True
STATS_PLUGINS = True
STATS_CYCLE = 5
STATS_DUMP = 60
STATS_DUMP_CRAWL = True
STATS_DUMP_QUEUE = True
# from time variables in scutils.stats_collector class
STATS_TIMES = [
'SECONDS_15_MINUTE',
'SECONDS_1_HOUR',
'SECONDS_6_HOUR',
'SECONDS_12_HOUR',
'SECONDS_1_DAY',
'SECONDS_1_WEEK',
]
# retry failures on actions
RETRY_FAILURES = True
RETRY_FAILURES_MAX = 3
REDIS_LOCK_EXPIRATION = 6
# main thread sleep time
SLEEP_TIME = 0.1
HEARTBEAT_TIMEOUT = 120
| 29.657143 | 84 | 0.743256 |
529f07f274dc1da176d09f5c4425cfe1c083ad3f | 907 | py | Python | cx_setup.py | AdvancedPhotonSource/xPlotUtil | 99c2d2f35ffda26a5cc13a55dbb0b81d4ee047d4 | [
"FSFAP"
] | 1 | 2021-08-07T13:44:26.000Z | 2021-08-07T13:44:26.000Z | cx_setup.py | AdvancedPhotonSource/xPlotUtil | 99c2d2f35ffda26a5cc13a55dbb0b81d4ee047d4 | [
"FSFAP"
] | 3 | 2019-02-26T18:04:29.000Z | 2019-04-09T20:14:31.000Z | cx_setup.py | AdvancedPhotonSource/xPlotUtil | 99c2d2f35ffda26a5cc13a55dbb0b81d4ee047d4 | [
"FSFAP"
] | 1 | 2017-03-23T17:40:05.000Z | 2017-03-23T17:40:05.000Z | from cx_Freeze import setup, Executable
import os
import sys
import scipy
import matplotlib
includefiles_list=[]
scipy_path = os.path.dirname(scipy.__file__)
includefiles_list.append(scipy_path)
matplotlib_path = os.path.dirname(matplotlib.__file__)
includefiles_list.append(matplotlib_path)
os_path = os.path.dirname(os.__file__)
includefiles_list.append(os_path)
includefiles_list.append("graph.ico")
base = 'Win32GUI' if sys.platform == 'win32' else None
options = {"packages": ["os", "idna", "numpy", "spec2nexus", "lmfit", "matplotlib", ],
"include_files": includefiles_list, "includes": ['os', 'lmfit.models']}
setup(name="xPlotUtil",
version="0.1",
options={"build_exe": options},
description="Allows fitting and plotting of point data from spec file.",
executables=[Executable("xPlotUtil/PlotWindow.py", icon='graph.ico', base=base, shortcutDir='xPlotUtil')]) | 34.884615 | 112 | 0.737596 |
0b30ebd7e5145f4c747ebb00da98db4ddd4146cd | 13,833 | py | Python | homeassistant/components/group/media_player.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/group/media_player.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 46 | 2021-04-21T08:11:44.000Z | 2022-03-31T06:10:12.000Z | homeassistant/components/group/media_player.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 1 | 2021-05-17T17:22:34.000Z | 2021-05-17T17:22:34.000Z | """This platform allows several media players to be grouped into one media player."""
from __future__ import annotations
from collections.abc import Callable
from typing import Any
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
PLATFORM_SCHEMA,
SERVICE_CLEAR_PLAYLIST,
SERVICE_PLAY_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
MediaPlayerEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
CONF_UNIQUE_ID,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_SHUFFLE_SET,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant, State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, EventType
KEY_CLEAR_PLAYLIST = "clear_playlist"
KEY_ON_OFF = "on_off"
KEY_PAUSE_PLAY_STOP = "play"
KEY_PLAY_MEDIA = "play_media"
KEY_SHUFFLE = "shuffle"
KEY_SEEK = "seek"
KEY_TRACKS = "tracks"
KEY_VOLUME = "volume"
DEFAULT_NAME = "Media Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITIES): cv.entities_domain(DOMAIN),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: Callable,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Media Group platform."""
async_add_entities(
[
MediaGroup(
config.get(CONF_UNIQUE_ID), config[CONF_NAME], config[CONF_ENTITIES]
)
]
)
class MediaGroup(MediaPlayerEntity):
"""Representation of a Media Group."""
def __init__(self, unique_id: str | None, name: str, entities: list[str]) -> None:
"""Initialize a Media Group entity."""
self._name = name
self._state: str | None = None
self._supported_features: int = 0
self._attr_unique_id = unique_id
self._entities = entities
self._features: dict[str, set[str]] = {
KEY_CLEAR_PLAYLIST: set(),
KEY_ON_OFF: set(),
KEY_PAUSE_PLAY_STOP: set(),
KEY_PLAY_MEDIA: set(),
KEY_SHUFFLE: set(),
KEY_SEEK: set(),
KEY_TRACKS: set(),
KEY_VOLUME: set(),
}
@callback
def async_on_state_change(self, event: EventType) -> None:
"""Update supported features and state when a new state is received."""
self.async_set_context(event.context)
self.async_update_supported_features(
event.data.get("entity_id"), event.data.get("new_state") # type: ignore
)
self.async_update_state()
@callback
def async_update_supported_features(
self,
entity_id: str,
new_state: State | None,
) -> None:
"""Update dictionaries with supported features."""
if not new_state:
for players in self._features.values():
players.discard(entity_id)
return
new_features = new_state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if new_features & SUPPORT_CLEAR_PLAYLIST:
self._features[KEY_CLEAR_PLAYLIST].add(entity_id)
else:
self._features[KEY_CLEAR_PLAYLIST].discard(entity_id)
if new_features & (SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK):
self._features[KEY_TRACKS].add(entity_id)
else:
self._features[KEY_TRACKS].discard(entity_id)
if new_features & (SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP):
self._features[KEY_PAUSE_PLAY_STOP].add(entity_id)
else:
self._features[KEY_PAUSE_PLAY_STOP].discard(entity_id)
if new_features & SUPPORT_PLAY_MEDIA:
self._features[KEY_PLAY_MEDIA].add(entity_id)
else:
self._features[KEY_PLAY_MEDIA].discard(entity_id)
if new_features & SUPPORT_SEEK:
self._features[KEY_SEEK].add(entity_id)
else:
self._features[KEY_SEEK].discard(entity_id)
if new_features & SUPPORT_SHUFFLE_SET:
self._features[KEY_SHUFFLE].add(entity_id)
else:
self._features[KEY_SHUFFLE].discard(entity_id)
if new_features & (SUPPORT_TURN_ON | SUPPORT_TURN_OFF):
self._features[KEY_ON_OFF].add(entity_id)
else:
self._features[KEY_ON_OFF].discard(entity_id)
if new_features & (
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP
):
self._features[KEY_VOLUME].add(entity_id)
else:
self._features[KEY_VOLUME].discard(entity_id)
async def async_added_to_hass(self) -> None:
"""Register listeners."""
for entity_id in self._entities:
new_state = self.hass.states.get(entity_id)
self.async_update_supported_features(entity_id, new_state)
async_track_state_change_event(
self.hass, self._entities, self.async_on_state_change
)
self.async_update_state()
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def state(self) -> str | None:
"""Return the state of the media group."""
return self._state
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a media group."""
return False
@property
def extra_state_attributes(self) -> dict:
"""Return the state attributes for the media group."""
return {ATTR_ENTITY_ID: self._entities}
async def async_clear_playlist(self) -> None:
"""Clear players playlist."""
data = {ATTR_ENTITY_ID: self._features[KEY_CLEAR_PLAYLIST]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_CLEAR_PLAYLIST,
data,
context=self._context,
)
async def async_media_next_track(self) -> None:
"""Send next track command."""
data = {ATTR_ENTITY_ID: self._features[KEY_TRACKS]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
data,
context=self._context,
)
async def async_media_pause(self) -> None:
"""Send pause command."""
data = {ATTR_ENTITY_ID: self._features[KEY_PAUSE_PLAY_STOP]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_PAUSE,
data,
context=self._context,
)
async def async_media_play(self) -> None:
"""Send play command."""
data = {ATTR_ENTITY_ID: self._features[KEY_PAUSE_PLAY_STOP]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_PLAY,
data,
context=self._context,
)
async def async_media_previous_track(self) -> None:
"""Send previous track command."""
data = {ATTR_ENTITY_ID: self._features[KEY_TRACKS]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_PREVIOUS_TRACK,
data,
context=self._context,
)
async def async_media_seek(self, position: int) -> None:
"""Send seek command."""
data = {
ATTR_ENTITY_ID: self._features[KEY_SEEK],
ATTR_MEDIA_SEEK_POSITION: position,
}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_SEEK,
data,
context=self._context,
)
async def async_media_stop(self) -> None:
"""Send stop command."""
data = {ATTR_ENTITY_ID: self._features[KEY_PAUSE_PLAY_STOP]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_MEDIA_STOP,
data,
context=self._context,
)
async def async_mute_volume(self, mute: bool) -> None:
"""Mute the volume."""
data = {
ATTR_ENTITY_ID: self._features[KEY_VOLUME],
ATTR_MEDIA_VOLUME_MUTED: mute,
}
await self.hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_MUTE,
data,
context=self._context,
)
async def async_play_media(
self, media_type: str, media_id: str, **kwargs: Any
) -> None:
"""Play a piece of media."""
data = {
ATTR_ENTITY_ID: self._features[KEY_PLAY_MEDIA],
ATTR_MEDIA_CONTENT_ID: media_id,
ATTR_MEDIA_CONTENT_TYPE: media_type,
}
await self.hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
data,
context=self._context,
)
async def async_set_shuffle(self, shuffle: bool) -> None:
"""Enable/disable shuffle mode."""
data = {
ATTR_ENTITY_ID: self._features[KEY_SHUFFLE],
ATTR_MEDIA_SHUFFLE: shuffle,
}
await self.hass.services.async_call(
DOMAIN,
SERVICE_SHUFFLE_SET,
data,
context=self._context,
)
async def async_turn_on(self) -> None:
"""Forward the turn_on command to all media in the media group."""
data = {ATTR_ENTITY_ID: self._features[KEY_ON_OFF]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
data,
context=self._context,
)
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level(s)."""
data = {
ATTR_ENTITY_ID: self._features[KEY_VOLUME],
ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await self.hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_SET,
data,
context=self._context,
)
async def async_turn_off(self) -> None:
"""Forward the turn_off command to all media in the media group."""
data = {ATTR_ENTITY_ID: self._features[KEY_ON_OFF]}
await self.hass.services.async_call(
DOMAIN,
SERVICE_TURN_OFF,
data,
context=self._context,
)
async def async_volume_up(self) -> None:
"""Turn volume up for media player(s)."""
for entity in self._features[KEY_VOLUME]:
volume_level = self.hass.states.get(entity).attributes["volume_level"] # type: ignore
if volume_level < 1:
await self.async_set_volume_level(min(1, volume_level + 0.1))
async def async_volume_down(self) -> None:
"""Turn volume down for media player(s)."""
for entity in self._features[KEY_VOLUME]:
volume_level = self.hass.states.get(entity).attributes["volume_level"] # type: ignore
if volume_level > 0:
await self.async_set_volume_level(max(0, volume_level - 0.1))
@callback
def async_update_state(self) -> None:
"""Query all members and determine the media group state."""
states = [self.hass.states.get(entity) for entity in self._entities]
states_values = [state.state for state in states if state is not None]
off_values = STATE_OFF, STATE_UNAVAILABLE, STATE_UNKNOWN
if states_values:
if states_values.count(states_values[0]) == len(states_values):
self._state = states_values[0]
elif any(state for state in states_values if state not in off_values):
self._state = STATE_ON
else:
self._state = STATE_OFF
else:
self._state = None
supported_features = 0
supported_features |= (
SUPPORT_CLEAR_PLAYLIST if self._features[KEY_CLEAR_PLAYLIST] else 0
)
supported_features |= (
SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
if self._features[KEY_TRACKS]
else 0
)
supported_features |= (
SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP
if self._features[KEY_PAUSE_PLAY_STOP]
else 0
)
supported_features |= (
SUPPORT_PLAY_MEDIA if self._features[KEY_PLAY_MEDIA] else 0
)
supported_features |= SUPPORT_SEEK if self._features[KEY_SEEK] else 0
supported_features |= SUPPORT_SHUFFLE_SET if self._features[KEY_SHUFFLE] else 0
supported_features |= (
SUPPORT_TURN_ON | SUPPORT_TURN_OFF if self._features[KEY_ON_OFF] else 0
)
supported_features |= (
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP
if self._features[KEY_VOLUME]
else 0
)
self._supported_features = supported_features
self.async_write_ha_state()
| 32.779621 | 98 | 0.623437 |
b89b9d2722d7915128bcecbdf827cb9c5a7116ff | 24,987 | py | Python | pycqed/analysis_v2/tomography_V2.py | DiCarloLab-Delft/PycQED_py3 | 0d4457cd5d3d097305c7ecee4dc9541c309dcda9 | [
"MIT"
] | 60 | 2016-08-03T10:00:18.000Z | 2021-11-10T11:46:16.000Z | pycqed/analysis_v2/tomography_V2.py | DiCarloLab-Delft/PycQED_py3 | 0d4457cd5d3d097305c7ecee4dc9541c309dcda9 | [
"MIT"
] | 512 | 2016-08-03T17:10:02.000Z | 2022-03-31T14:03:43.000Z | pycqed/analysis_v2/tomography_V2.py | DiCarloLab-Delft/PycQED_py3 | 0d4457cd5d3d097305c7ecee4dc9541c309dcda9 | [
"MIT"
] | 34 | 2016-10-19T12:00:52.000Z | 2022-03-19T04:43:26.000Z | import time
import numpy as np
import scipy as scipy
try:
import qutip as qt
except ImportError as e:
import logging
logging.warning('Could not import qutip, tomo code will not work')
from pycqed.analysis_v2 import pytomo as csdp_tomo
comp_projectors = [qt.ket2dm(qt.tensor(qt.basis(2,0), qt.basis(2,0))),
qt.ket2dm(qt.tensor(qt.basis(2,0), qt.basis(2,1))),
qt.ket2dm(qt.tensor(qt.basis(2,1), qt.basis(2,0))),
qt.ket2dm(qt.tensor(qt.basis(2,1), qt.basis(2,1)))]
class TomoAnalysis():
"""Performs state tomography based on an overcomplete set of measurements
and calibration measurements. Uses qutip to calculate resulting basis states
from applied rotations
Uses binary counting as general guideline in ordering states. Calculates
rotations by using the qutip library
BEFORE YOU USE THIS SET THE CORRECT ORDER BY CHANGING
'rotation_matrices'
'measurement_basis' + 'measurement_basis_labels'
to values corresponding to your experiment
and maybe 'readout_basis'
"""
# The set of single qubit rotation matrices used in the tomography
# measurement (will be assumed to be used on all qubits)
rotation_matrices = [qt.identity(2), qt.sigmax(),
qt.rotation(
qt.sigmay(), np.pi / 2), qt.rotation(qt.sigmay(), -1*np.pi / 2),
qt.rotation(qt.sigmax(), np.pi / 2), qt.rotation(qt.sigmax(), -np.pi / 2)]
measurement_operator_labels = ['I', 'X', 'y', '-y', 'x','-x']
#MAKE SURE THE LABELS CORRESPOND TO THE ROTATION MATRICES DEFINED ABOVE
# The set of single qubit basis operators and labels (normalized)
measurement_basis = [
qt.identity(2) , qt.sigmaz() , qt.sigmax() , qt.sigmay() ]
measurement_basis_labels = ['I', 'Z', 'X', 'Y']
# The operators used in the readout basis on each qubit
readout_basis = [qt.identity(2) , qt.sigmaz() ]
def __init__(self, n_qubits=2, check_labels=False):
"""
keyword arguments:
measurements_cal --- Should be an array of length 2 ** n_qubits
measurements_tomo --- Should be an array of length length(rotation_matrices) ** n_qubits
n_qubits --- default(2) the amount of qubits present in the expirement
n_quadratures --- default(1(either I or Q)) The amount of complete measurement data sets. For example a combined IQ measurement has 2 measurement sets.
tomo_vars : since this tomo does not have access to the original data, the vars should be given by
tomo_var_i = 1 / N_i * np.var(M_i) where i stands for the data corresponding to rotation i.
"""
self.n_qubits = n_qubits
self.n_states = 2 ** n_qubits
# Generate the vectors of matrices that correspond to all measurements,
# readout bases and rotations
self.basis_vector = self._calculate_matrix_set(
self.measurement_basis, n_qubits)
self.readout_vector = self._calculate_matrix_set(
self.readout_basis, n_qubits)
self.rotation_vector = self._calculate_matrix_set(
self.rotation_matrices, n_qubits)
# generate the basis change matrix from pauli to comp and back
A = np.zeros((self.n_states**2, self.n_states**2), dtype=complex)
for i in range(self.n_states**2):
# do an orthonormal transformation.
A[:,i] = np.ravel(self.basis_vector[i].full())
self.basis_pauli_to_comp_trafo_matrix= A / self.n_states
self.basis_comp_to_pauli_trafo_matrix= np.linalg.inv(A)
#get dims of qutip objects
self.qt_dims = [[2 for i in range(self.n_qubits)], [2 for i in range(self.n_qubits)]]
if check_labels is True:
# prints the order of basis set corresponding to the
# tomographic rotations
print(self.get_meas_operator_labels(n_qubits))
print(self.get_basis_labels(n_qubits))
def execute_pseudo_inverse_tomo(self, meas_operators, meas_tomo, use_pauli_basis=False,
verbose=False):
"""
Performs a linear tomography by simple inversion of the system of equations due to calibration points
TE_correction_matrix: a matrix multiplying the calibration points to correct for estimated mixture due to Thermal excitation.
"""
#some argument parsing to we allow more general input
meas_operators = [meas_operators] if type(meas_operators) == qt.Qobj else meas_operators
#for each independent set of measurements(with their own measurement operator, calculate the coeff matrix)
coeff_matrices = []
for measurement_operator in meas_operators:
coeff_matrices.append(self.calculate_LI_coefficient_matrix(measurement_operator, do_in_pauli=use_pauli_basis))
coefficient_matrix = np.vstack(coeff_matrices)
basis_decomposition = np.zeros(4 ** self.n_qubits, dtype=complex)
if use_pauli_basis:
# first skip beta0
#basis_decomposition[1:] = np.dot(np.linalg.pinv(coefficient_matrix[:, 1:]), meas_tomo)
basis_decomposition[:] = np.dot(np.linalg.pinv(coefficient_matrix[:, :]), meas_tomo)
# re-add beta0
#basis_decomposition[0] = betas[0]
rho = self.trans_pauli_to_comp(basis_decomposition)
else:
basis_decomposition = np.conj(np.linalg.pinv(coefficient_matrix)).dot(meas_tomo)
rho = qt.Qobj(np.reshape(basis_decomposition, [self.n_states, self.n_states]),
dims=self.qt_dims)
return (basis_decomposition, rho)
def execute_mle_T_matrix_tomo(self, measurement_operators, meas_tomo, weights_tomo =False,
show_time=True, ftol=0.01, xtol=0.001, full_output=0, max_iter=100,
TE_correction_matrix = None):
"""
Performs a least squares optimization using fmin_powell in order to get the closest physically realisable state.
This is done by constructing a lower triangular matrix T consisting of 4 ** n qubits params
Keyword arguments:
measurement_operators: list of meas operators
meas_tomo: list of n_rot measurements beloning to the measurmeent operator
use_weights : default(False) Weighs the quadrature data by the std in the estimator of the mean
: since this tomo does not have access to the original data, the vars should be given by
tomo_var_i = 1 / N_i * np.var(M_i) where i stands for the data corresponding to rotation i.
--- arguments for scipy fmin_powel method below, see the powell documentation
"""
# first we calculate the measurement matrices
tstart = time.time()
measurement_operators = [measurement_operators] if type(measurement_operators) == qt.Qobj else measurement_operators
measurement_vectors = []
for measurement_operator in measurement_operators:
measurement_vectors.append([m.full() for m in self.get_measurement_vector(measurement_operator)])
measurement_vector = np.vstack(measurement_vectors)
# initiate with equal weights
self.weights = weights_tomo if weights_tomo else np.ones(len(measurement_vector))
# save it in the object for use in optimization
self.measurements_tomo = meas_tomo
self.measurement_vector_numpy = measurement_vector
tlinear = time.time()
# find out the starting rho by the linear tomo
discard, rho0 = self.execute_pseudo_inverse_tomo(measurement_operators, meas_tomo)
# now fetch the starting t_params from the cholesky decomp of rho
tcholesky = time.time()
T0 = np.linalg.cholesky(scipy.linalg.sqrtm((rho0.dag() * rho0).full()))
t0 = np.zeros(4 ** self.n_qubits, dtype='complex' )
di = np.diag_indices(2 ** self.n_qubits)
tri = np.tril_indices(2 ** self.n_qubits, -1)
t0[0:2 ** self.n_qubits] = T0[di]
t0[2**self.n_qubits::2] = T0[tri].real
t0[2**self.n_qubits+1::2] = T0[tri].imag
topt = time.time()
# minimize the likelihood function using scipy
t_optimal = scipy.optimize.fmin_powell(
self._max_likelihood_optimization_function, t0, maxiter=max_iter, full_output=full_output, ftol=ftol, xtol=xtol)
if show_time is True:
print(" Time to calc rotation matrices %.2f " % (tlinear-tstart))
print(" Time to do linear tomo %.2f " % (tcholesky-tlinear))
print(" Time to build T %.2f " % (topt-tcholesky))
print(" Time to optimize %.2f" % (time.time()-topt))
return qt.Qobj(self.build_rho_from_triangular_params(t_optimal), dims=self.qt_dims)
def execute_SDPA_2qubit_tomo(self, measurement_operators, counts_tomo, N_total=1, used_bins=[0,3],
correct_measurement_operators=True, calc_chi_squared =False,
correct_zero_count_bins=True, TE_correction_matrix = None):
"""
Estimates a density matrix given single shot counts of 4 thresholded
bins using a custom C semidefinite solver from Nathan Langford
Each bin should correspond to a projection operator:
0: 00, 1: 01, 2: 10, 3: 11
The calibration counts are used in calculating corrections to the (ideal) measurement operators
The tomo counts are used for the actual reconstruction.
N_total is used for normalization. if counts_tomo is normalized use N_total=1
"""
# If the tomography bins have zero counts, they not satisfy gaussian noise. If N>>1 then turning them into 1 fixes
# convergence problems without screwing the total statistics/estimate.
# if(np.sum(np.where(np.array(counts_tomo) == 0)) > 0):
# print("WARNING: Some bins contain zero counts, this violates gaussian assumptions. \n \
# If correct_zero_count_bins=True these will be set to 1 to minimize errors")
if correct_zero_count_bins:
counts_tomo = np.array([[int(b) if b > 0 else 1 for b in bin_counts] for bin_counts in counts_tomo])
#Select the correct data based on the bins used
#(and therefore based on the projection operators used)
data = counts_tomo[:,used_bins].T.flatten()
#get the total number of counts per tomo
N = np.array([np.sum(counts_tomo, axis=1) for k in used_bins]).flatten()
# add weights based on the total number of data points kept each run
# N_total is a bit arbitrary but should be the average number of total counts of all runs, since in nathans code this
# average is estimated as a parameter.
weights = N/float(N_total)
#get the observables from the rotation operators and the bins kept(and their corresponding projection operators)
measurement_vectors = []
for k in used_bins:
measurement_vectors.append([m.full() for m in self.get_measurement_vector(measurement_operators[k])])
measurement_vector = np.vstack(measurement_vectors)
# print('length of the measurement vector'len(measurement_vector))
#calculate the density matrix using the csdp solver
a = time.time()
rho_nathan = csdp_tomo.tomo_state(data, measurement_vector, weights)
b=time.time()-a
# print("time solving csdp: ", b)
# print(rho_nathan)
n_estimate = rho_nathan.trace()
# print(n_estimate)
rho = qt.Qobj(rho_nathan / n_estimate,dims=self.qt_dims)
# if((np.abs(N_total - n_estimate) / N_total > 0.03)):
# print('WARNING estimated N(%d) is not close to provided N(%d) '% (n_estimate,N_total))
if calc_chi_squared:
chi_squared = self._state_tomo_goodness_of_fit(rho, data, N, measurement_vector)
return rho, chi_squared
else:
return rho
def execute_SDPA_MC_2qubit_tomo(self,
measurement_operators,
counts_tomo,
N_total,
used_bins = [0,2],
n_runs = 100,
array_like = False,
correct_measurement_operators = True,
TE_correction_matrix=None):
"""
Executes the SDPDA tomo n_runs times with data distributed via a Multinomial distribution
in order to get a list of rhos from which one can calculate errorbars on various derived quantities
returns a list of Qobjects (the rhos).
If array_like is set to true it will just return a 3D array of rhos
"""
rhos= []
for i in range(n_runs):
#generate a data set based on multinomial distribution with means according to the measured data
mc = [np.random.multinomial(sum(counts),(np.array(counts)+0.0) / sum(counts)) for counts in counts_tomo]
rhos.append(self.execute_SDPA_2qubit_tomo(measurement_operators,
mc,
N_total,
used_bins,
correct_measurement_operators,
TE_correction_matrix=TE_correction_matrix))
if array_like:
return np.array([rho.full() for rho in rhos])
else:
return rhos
def calculate_LI_coefficient_matrix(self, measurement_operator, do_in_pauli=False):
"""
Calculates the coefficient matrix used when inversing the linear system of equations needed to find rho
Requires a calibrated measurement operator
if do_in_pauli is true this will presume measurement operator is given in the basis.
"""
coefficient_matrix = np.zeros((len(self.rotation_matrices) ** self.n_qubits, 4 ** self.n_qubits), dtype=complex)
Ms = self.get_measurement_vector(measurement_operator, do_in_pauli=do_in_pauli)
for i in range(len(Ms)):
coefficient_matrix[i,:] = np.ravel(Ms[i].full())
return coefficient_matrix
def get_measurement_vector(self, measurement_operator, do_in_pauli=False):
"""
Returns a list of rotated measurement operator based on an initial measurement operator which should be obtained from the calibration
"""
n_rotations = len(self.rotation_matrices) ** self.n_qubits
measurement_vector = []
for i in range(n_rotations):
R = self.rotation_vector[i]
if do_in_pauli:
M = self.trans_comp_to_pauli(R.dag() * measurement_operator * R)
else:
M = R.dag() * measurement_operator * R
measurement_vector.append(M)
return measurement_vector
def calibrate_measurement_operator(self, cal_meas, calibration_points=None, TE_correction_matrix=None, transform_to_pauli=False):
"""
Calibrates the measurement operator in any basis. Assumes cal_meas are the eigenvalues of the calibration_points
defaults the calibration_points to be the computational basis projectors.
If TE_correction_matrix is set, this alters the computational basis states
"""
if calibration_points is None:
calibration_points = comp_projectors
M = sum([cal_meas[i] * calibration_points[i] for i in range(len(cal_meas))])
return M if not transform_to_pauli else self.trans_comp_to_pauli(M)
def calibrate_bin_operators(self, calibration_counts, calibration_points=None, normalize=False):
M_bins = []
#calculate bin probabilities normalized horizontally
if normalize:
cal_probs = (calibration_counts / np.sum(calibration_counts, axis = 1, dtype=float)[:,np.newaxis])
else:
cal_probs = np.array(calibration_counts)#(calibration_counts / np.sum(calibration_counts, axis = 1, dtype=float)[:,np.newaxis])
for probs in cal_probs.T:
#calibrate the measurement operator for each bin in the same way as done with average tomo.
M_bins.append(self.calibrate_measurement_operator(probs, calibration_points))
return M_bins
# HELPERS
def trans_pauli_to_comp(self, rho_pauli):
"""
Converts a rho in the pauli basis, or pauli basis vector or Qobj of rho in pauli basis to comp basis.
"""
if(rho_pauli.shape[0] == self.n_states):
basis_decomposition = np.ravel(rho_pauli.full()) if (type(rho_pauli) == qt.Qobj) else np.ravel(rho_pauli)
else:
basis_decomposition = rho_pauli
return qt.Qobj(np.reshape(self.basis_pauli_to_comp_trafo_matrix.dot(basis_decomposition), [self.n_states, self.n_states]),
dims=self.qt_dims)
def trans_comp_to_pauli(self, rho_comp):
"""
Converts a rho in the computational basis, or comp basis vector or Qobj of rho in computational basis to Pauli basis.
"""
if(rho_comp.shape[0] == self.n_states):
basis_decomposition = np.ravel(rho_comp.full()) if (type(rho_comp) == qt.Qobj) else np.ravel(rho_comp)
else:
basis_decomposition = rho_comp
return qt.Qobj(np.reshape(self.basis_comp_to_pauli_trafo_matrix.dot(basis_decomposition), [self.n_states, self.n_states]),
dims=self.qt_dims)
def _calculate_matrix_set(self, starting_set, n_qubits):
"""recursive function that returns len(starting_set) ** n_qubits
measurement_basis states tensored with eachother based
on the amount of qubits
So for 2 qubits assuming your basis set is {I, X, Y, Z} you get II IX IY IZ XI XX XY XZ ...
"""
if(n_qubits > 1):
return [qt.tensor(x, y) for x in self._calculate_matrix_set(starting_set, n_qubits - 1)
for y in starting_set]
else:
return starting_set
def get_basis_labels(self, n_qubits):
"""
Returns the basis labels in the same order as the basis vector is parsed.
Requires self.measurement_basis_labels to be set with the correct order corresponding to the matrices in self.measurement_basis
"""
if(n_qubits > 1):
return [x + y for x in self.get_basis_labels(n_qubits - 1)
for y in self.measurement_basis_labels]
else:
return self.measurement_basis_labels
def get_meas_operator_labels(self, n_qubits):
"""
Returns a vector of the rotations in order based on self.measurement_operator_labels
"""
if(n_qubits > 1):
return [x + y for x in self.get_meas_operator_labels(n_qubits - 1)
for y in self.measurement_operator_labels]
else:
return self.measurement_operator_labels
###############################3
# MLE T Matrix functions
#
def build_rho_from_triangular_params(self, t_params):
# build the lower triangular matrix T
T_mat = np.zeros(
(2 ** self.n_qubits, 2 ** self.n_qubits), dtype="complex")
di = np.diag_indices(2 ** self.n_qubits)
T_mat[di] = t_params[0:2**self.n_qubits]
tri = np.tril_indices(2 ** self.n_qubits, -1)
T_mat[tri] = t_params[2**self.n_qubits::2]
T_mat[tri] += 1j * t_params[2**self.n_qubits+1::2]
rho = np.dot(np.conj(T_mat.T), T_mat) / \
np.trace(np.dot(np.conj(T_mat.T), T_mat))
return rho
def _max_likelihood_optimization_function(self, t_params):
"""
Optimization function that is evaluated many times in the maximum likelihood method.
Calculates the difference between expected measurement values and the actual measurement values based on a guessed rho
Keyword arguments:
t_params : cholesky decomp parameters used to construct the initial rho
Requires:
self.weights : weights per measurement vector used in calculating the loss
"""
rho = self.build_rho_from_triangular_params(t_params)
L = 0 + 0j
for i in range(len(self.measurement_vector_numpy)):
expectation = np.trace(
np.dot(self.measurement_vector_numpy[i], rho))
L += ((expectation -
self.measurements_tomo[i]) ** 2) * self.weights[i]
return L
#############################################################################################
# CDSP tomo functions for likelihood.
def _state_tomo_likelihood_function(self, rho, data, normalisations, observables, fixedweight=False):
data_predicted = []
for ii in range(len(data)):
data_predicted.append((rho.full().dot(observables[ii])).trace()*normalisations[ii])
data_predicted = np.array(data_predicted)
if fixedweight:
likely_function = np.sum( (data-data_predicted)**2/data )
else:
likely_function = np.sum( (data-data_predicted)**2/data_predicted )
return likely_function
"""
Calculates the goodness of fit. It has a normalisation which is just
the sum of the counts in the superconducting case since there are no
missing counts like in the photon case.
"""
def _state_tomo_goodness_of_fit(self, rho, data, normalisations, observables,
fixedweight=False, eig_cutoff=1e-6):
likely_function = self._state_tomo_likelihood_function(rho, data, normalisations, observables,
fixedweight=fixedweight)
num_data = len(data)
num_eigs = np.sum(rho.eigenenergies()>eig_cutoff)
rho_dim = rho.shape[0]
num_dofs = num_eigs*(2*rho_dim-num_eigs)
out = {}
out['pure'] = likely_function / (num_data-(2*rho_dim-1))
out['mixed'] = likely_function / (num_data-rho_dim**2)
out['dofs'] = likely_function / (num_data-num_dofs)
return out
#################################################################
#
# Data Generation (currently for 2 qubits only)
#
##################################################################
def generate_tomo_data(rho, M, R, N, M_bins = None):
"""
Generates data for tomography. Both returns expectation values(used for average tomo)
or bin counts( if you use thresholded tomo). Generates a single multinomial set of counts to get both data types.
"""
#decompose the measurement operator in its spectrum
eigenvals, eigenstates = M.eigenstates()
if M_bins is None:
M_bins = comp_projectors
# now decompose the
probs = []
for state in eigenstates:
#calculate probability of ending up in this state
probs.append(((R.dag() * (qt.ket2dm(state) * R)) * rho).tr().real)
#run a multinomial distribution to determine the "experimental" measurement outcomes
counts = np.random.multinomial(N, probs)
# use the simulated percentages of states found to calc voltages
expectations = sum((counts / float(N)) * eigenvals)
#calcultate bin counts via the projections of original eigenstates onto bin measurement operator.
bin_counts = [sum([counts[j] * (M_bins[i] * qt.ket2dm(eigenstates[j])).tr().real
for j in range(len(eigenstates))])
for i in range(len(M_bins))]
return bin_counts, expectations
def get_TE_calibration_points(e_01, e_10, get_coefficient_matrix=False):
"""
Mixes the standard computational basis projectors to account for a certain thermal excitation fraction in qubit 1 and 2
get_coefficient_matrix : return a matrix so one can correct the normal measurement operators used in TOMO
If it is set to false, just return the mixed calibration points.
"""
P = comp_projectors
R = [ qt.tensor(qt.qeye(2), qt.qeye(2)),
qt.tensor(qt.qeye(2), qt.sigmax()),
qt.tensor(qt.sigmax(), qt.qeye(2)),
qt.tensor(qt.sigmax(), qt.sigmax())]
#calculate the effect TE on the 00 state using probabilities to be excited(or not)
c_00 = (1-e_01) * (1-e_10) * P[0] + e_01 * (1-e_10) * P[1] + e_10 * (1-e_01) * P[2] + e_01 * e_10 * P[3]
#find the other points via bit flip rotations
c_01 = R[1] * c_00 * R[1].dag()
c_10 = R[2] * c_00 * R[2].dag()
c_11 = R[3] * c_00 * R[3].dag()
if get_coefficient_matrix:
return np.array([np.diag(c_00.full()), np.diag(c_01.full()), np.diag(c_10.full()), np.diag(c_11.full())]).T
else:
return [c_00, c_01, c_10, c_11]
| 47.867816 | 159 | 0.636131 |
a78386a09fdcaaf01e20b4773cbfbf987adb5507 | 36,129 | py | Python | daemon/core/gui/graph/graph.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/graph/graph.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/graph/graph.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | import logging
import tkinter as tk
from typing import TYPE_CHECKING, List, Tuple
from PIL import Image, ImageTk
from core.api.grpc import core_pb2
from core.gui.dialogs.shapemod import ShapeDialog
from core.gui.graph import tags
from core.gui.graph.edges import EDGE_WIDTH, CanvasEdge, CanvasWirelessEdge
from core.gui.graph.enums import GraphMode, ScaleOption
from core.gui.graph.node import CanvasNode
from core.gui.graph.shape import Shape
from core.gui.graph.shapeutils import ShapeType, is_draw_shape, is_marker
from core.gui.images import ImageEnum, Images, TypeToImage
from core.gui.nodeutils import EdgeUtils, NodeUtils
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.coreclient import CoreClient
ZOOM_IN = 1.1
ZOOM_OUT = 0.9
ICON_SIZE = 48
class CanvasGraph(tk.Canvas):
def __init__(
self, master: "Application", core: "CoreClient", width: int, height: int
):
super().__init__(master, highlightthickness=0, background="#cccccc")
self.app = master
self.core = core
self.mode = GraphMode.SELECT
self.annotation_type = None
self.selection = {}
self.select_box = None
self.selected = None
self.node_draw = None
self.context = None
self.nodes = {}
self.edges = {}
self.shapes = {}
self.wireless_edges = {}
# map wireless/EMANE node to the set of MDRs connected to that node
self.wireless_network = {}
self.drawing_edge = None
self.grid = None
self.shape_drawing = False
self.default_dimensions = (width, height)
self.current_dimensions = self.default_dimensions
self.ratio = 1.0
self.offset = (0, 0)
self.cursor = (0, 0)
self.marker_tool = None
self.to_copy = []
# background related
self.wallpaper_id = None
self.wallpaper = None
self.wallpaper_drawn = None
self.wallpaper_file = ""
self.scale_option = tk.IntVar(value=1)
self.show_grid = tk.BooleanVar(value=True)
self.adjust_to_dim = tk.BooleanVar(value=False)
# throughput related
self.throughput_threshold = 250.0
self.throughput_width = 10
self.throughput_color = "#FF0000"
# bindings
self.setup_bindings()
# draw base canvas
self.draw_canvas()
self.draw_grid()
def draw_canvas(self, dimensions: Tuple[int, int] = None):
if self.grid is not None:
self.delete(self.grid)
if not dimensions:
dimensions = self.default_dimensions
self.current_dimensions = dimensions
self.grid = self.create_rectangle(
0,
0,
*dimensions,
outline="#000000",
fill="#ffffff",
width=1,
tags="rectangle",
)
self.configure(scrollregion=self.bbox(tk.ALL))
def reset_and_redraw(self, session: core_pb2.Session):
"""
Reset the private variables CanvasGraph object, redraw nodes given the new grpc
client.
:param session: session to draw
"""
# hide context
self.hide_context()
# delete any existing drawn items
for tag in tags.COMPONENT_TAGS:
self.delete(tag)
# set the private variables to default value
self.mode = GraphMode.SELECT
self.annotation_type = None
self.node_draw = None
self.selected = None
self.nodes.clear()
self.edges.clear()
self.shapes.clear()
self.wireless_edges.clear()
self.wireless_network.clear()
self.drawing_edge = None
self.draw_session(session)
def setup_bindings(self):
"""
Bind any mouse events or hot keys to the matching action
"""
self.bind("<ButtonPress-1>", self.click_press)
self.bind("<ButtonRelease-1>", self.click_release)
self.bind("<B1-Motion>", self.click_motion)
self.bind("<ButtonRelease-3>", self.click_context)
self.bind("<Delete>", self.press_delete)
self.bind("<Control-1>", self.ctrl_click)
self.bind("<Double-Button-1>", self.double_click)
self.bind("<MouseWheel>", self.zoom)
self.bind("<Button-4>", lambda e: self.zoom(e, ZOOM_IN))
self.bind("<Button-5>", lambda e: self.zoom(e, ZOOM_OUT))
self.bind("<ButtonPress-3>", lambda e: self.scan_mark(e.x, e.y))
self.bind("<B3-Motion>", lambda e: self.scan_dragto(e.x, e.y, gain=1))
def hide_context(self, event=None):
if self.context:
self.context.unpost()
self.context = None
def get_actual_coords(self, x: float, y: float) -> [float, float]:
actual_x = (x - self.offset[0]) / self.ratio
actual_y = (y - self.offset[1]) / self.ratio
return actual_x, actual_y
def get_scaled_coords(self, x: float, y: float) -> [float, float]:
scaled_x = (x * self.ratio) + self.offset[0]
scaled_y = (y * self.ratio) + self.offset[1]
return scaled_x, scaled_y
def inside_canvas(self, x: float, y: float) -> [bool, bool]:
x1, y1, x2, y2 = self.bbox(self.grid)
valid_x = x1 <= x <= x2
valid_y = y1 <= y <= y2
return valid_x and valid_y
def valid_position(self, x1: int, y1: int, x2: int, y2: int) -> [bool, bool]:
valid_topleft = self.inside_canvas(x1, y1)
valid_bottomright = self.inside_canvas(x2, y2)
return valid_topleft and valid_bottomright
def set_throughputs(self, throughputs_event: core_pb2.ThroughputsEvent):
for interface_throughput in throughputs_event.interface_throughputs:
node_id = interface_throughput.node_id
interface_id = interface_throughput.interface_id
throughput = interface_throughput.throughput
interface_to_edge_id = (node_id, interface_id)
token = self.core.interface_to_edge.get(interface_to_edge_id)
if not token:
continue
edge = self.edges.get(token)
if edge:
edge.set_throughput(throughput)
else:
del self.core.interface_to_edge[interface_to_edge_id]
def draw_grid(self):
"""
Create grid.
"""
width, height = self.width_and_height()
width = int(width)
height = int(height)
for i in range(0, width, 27):
self.create_line(i, 0, i, height, dash=(2, 4), tags=tags.GRIDLINE)
for i in range(0, height, 27):
self.create_line(0, i, width, i, dash=(2, 4), tags=tags.GRIDLINE)
self.tag_lower(tags.GRIDLINE)
self.tag_lower(self.grid)
def add_wireless_edge(self, src: CanvasNode, dst: CanvasNode):
"""
add a wireless edge between 2 canvas nodes
"""
token = EdgeUtils.get_token(src.id, dst.id)
x1, y1 = self.coords(src.id)
x2, y2 = self.coords(dst.id)
position = (x1, y1, x2, y2)
edge = CanvasWirelessEdge(token, position, src.id, dst.id, self)
self.wireless_edges[token] = edge
src.wireless_edges.add(edge)
dst.wireless_edges.add(edge)
self.tag_raise(src.id)
self.tag_raise(dst.id)
def delete_wireless_edge(self, src: CanvasNode, dst: CanvasNode):
token = EdgeUtils.get_token(src.id, dst.id)
edge = self.wireless_edges.pop(token)
edge.delete()
src.wireless_edges.remove(edge)
dst.wireless_edges.remove(edge)
def draw_session(self, session: core_pb2.Session):
"""
Draw existing session.
"""
# draw existing nodes
for core_node in session.nodes:
logging.debug("drawing node %s", core_node)
# peer to peer node is not drawn on the GUI
if NodeUtils.is_ignore_node(core_node.type):
continue
image = NodeUtils.node_image(
core_node, self.app.guiconfig, self.app.app_scale
)
# if the gui can't find node's image, default to the "edit-node" image
if not image:
image = Images.get(
ImageEnum.EDITNODE, int(ICON_SIZE * self.app.app_scale)
)
x = core_node.position.x
y = core_node.position.y
node = CanvasNode(self.master, x, y, core_node, image)
self.nodes[node.id] = node
self.core.canvas_nodes[core_node.id] = node
# draw existing links
for link in session.links:
logging.debug("drawing link: %s", link)
canvas_node_one = self.core.canvas_nodes[link.node_one_id]
node_one = canvas_node_one.core_node
canvas_node_two = self.core.canvas_nodes[link.node_two_id]
node_two = canvas_node_two.core_node
token = EdgeUtils.get_token(canvas_node_one.id, canvas_node_two.id)
if link.type == core_pb2.LinkType.WIRELESS:
self.add_wireless_edge(canvas_node_one, canvas_node_two)
else:
if token not in self.edges:
edge = CanvasEdge(
node_one.position.x,
node_one.position.y,
node_two.position.x,
node_two.position.y,
canvas_node_one.id,
self,
)
edge.token = token
edge.dst = canvas_node_two.id
edge.set_link(link)
edge.check_wireless()
canvas_node_one.edges.add(edge)
canvas_node_two.edges.add(edge)
self.edges[edge.token] = edge
self.core.links[edge.token] = edge
if link.HasField("interface_one"):
canvas_node_one.interfaces.append(link.interface_one)
edge.src_interface = link.interface_one
if link.HasField("interface_two"):
canvas_node_two.interfaces.append(link.interface_two)
edge.dst_interface = link.interface_two
elif link.options.unidirectional:
edge = self.edges[token]
edge.asymmetric_link = link
else:
logging.error("duplicate link received: %s", link)
# raise the nodes so they on top of the links
self.tag_raise(tags.NODE)
def stopped_session(self):
# clear wireless edges
for edge in self.wireless_edges.values():
edge.delete()
src_node = self.nodes[edge.src]
src_node.wireless_edges.remove(edge)
dst_node = self.nodes[edge.dst]
dst_node.wireless_edges.remove(edge)
self.wireless_edges.clear()
# clear all middle edge labels
for edge in self.edges.values():
edge.reset()
def canvas_xy(self, event: tk.Event) -> [float, float]:
"""
Convert window coordinate to canvas coordinate
"""
x = self.canvasx(event.x)
y = self.canvasy(event.y)
return x, y
def get_selected(self, event: tk.Event) -> int:
"""
Retrieve the item id that is on the mouse position
"""
x, y = self.canvas_xy(event)
overlapping = self.find_overlapping(x, y, x, y)
selected = None
for _id in overlapping:
if self.drawing_edge and self.drawing_edge.id == _id:
continue
if _id in self.nodes:
selected = _id
break
if _id in self.shapes:
selected = _id
return selected
def click_release(self, event: tk.Event):
"""
Draw a node or finish drawing an edge according to the current graph mode
"""
logging.debug("click release")
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
if self.context:
self.hide_context()
else:
if self.mode == GraphMode.ANNOTATION:
self.focus_set()
if self.shape_drawing:
shape = self.shapes[self.selected]
shape.shape_complete(x, y)
self.shape_drawing = False
elif self.mode == GraphMode.SELECT:
self.focus_set()
if self.select_box:
x0, y0, x1, y1 = self.coords(self.select_box.id)
inside = [
x
for x in self.find_enclosed(x0, y0, x1, y1)
if "node" in self.gettags(x) or "shape" in self.gettags(x)
]
for i in inside:
self.select_object(i, True)
self.select_box.disappear()
self.select_box = None
else:
self.focus_set()
self.selected = self.get_selected(event)
logging.debug(
f"click release selected({self.selected}) mode({self.mode})"
)
if self.mode == GraphMode.EDGE:
self.handle_edge_release(event)
elif self.mode == GraphMode.NODE:
self.add_node(x, y)
elif self.mode == GraphMode.PICKNODE:
self.mode = GraphMode.NODE
self.selected = None
def handle_edge_release(self, event: tk.Event):
edge = self.drawing_edge
self.drawing_edge = None
# not drawing edge return
if edge is None:
return
# edge dst must be a node
logging.debug("current selected: %s", self.selected)
dst_node = self.nodes.get(self.selected)
if not dst_node:
edge.delete()
return
# edge dst is same as src, delete edge
if edge.src == self.selected:
edge.delete()
return
# ignore repeated edges
token = EdgeUtils.get_token(edge.src, self.selected)
if token in self.edges:
edge.delete()
return
# set dst node and snap edge to center
edge.complete(self.selected)
self.edges[edge.token] = edge
node_src = self.nodes[edge.src]
node_src.edges.add(edge)
node_dst = self.nodes[edge.dst]
node_dst.edges.add(edge)
self.core.create_link(edge, node_src, node_dst)
def select_object(self, object_id: int, choose_multiple: bool = False):
"""
create a bounding box when a node is selected
"""
if not choose_multiple:
self.clear_selection()
# draw a bounding box if node hasn't been selected yet
if object_id not in self.selection:
x0, y0, x1, y1 = self.bbox(object_id)
selection_id = self.create_rectangle(
(x0 - 6, y0 - 6, x1 + 6, y1 + 6),
activedash=True,
dash="-",
tags=tags.SELECTION,
)
self.selection[object_id] = selection_id
else:
selection_id = self.selection.pop(object_id)
self.delete(selection_id)
def clear_selection(self):
"""
Clear current selection boxes.
"""
for _id in self.selection.values():
self.delete(_id)
self.selection.clear()
def move_selection(self, object_id: int, x_offset: float, y_offset: float):
select_id = self.selection.get(object_id)
if select_id is not None:
self.move(select_id, x_offset, y_offset)
def delete_selection_objects(self) -> List[CanvasNode]:
edges = set()
nodes = []
for object_id in self.selection:
# delete selection box
selection_id = self.selection[object_id]
self.delete(selection_id)
# delete node and related edges
if object_id in self.nodes:
canvas_node = self.nodes.pop(object_id)
canvas_node.delete()
nodes.append(canvas_node)
is_wireless = NodeUtils.is_wireless_node(canvas_node.core_node.type)
# delete related edges
for edge in canvas_node.edges:
if edge in edges:
continue
edges.add(edge)
self.edges.pop(edge.token, None)
edge.delete()
# update node connected to edge being deleted
other_id = edge.src
other_interface = edge.src_interface
if edge.src == object_id:
other_id = edge.dst
other_interface = edge.dst_interface
other_node = self.nodes[other_id]
other_node.edges.remove(edge)
try:
other_node.interfaces.remove(other_interface)
except ValueError:
pass
if is_wireless:
other_node.delete_antenna()
# delete shape
if object_id in self.shapes:
shape = self.shapes.pop(object_id)
shape.delete()
self.selection.clear()
return nodes
def zoom(self, event: tk.Event, factor: float = None):
if not factor:
factor = ZOOM_IN if event.delta > 0 else ZOOM_OUT
event.x, event.y = self.canvasx(event.x), self.canvasy(event.y)
self.scale(tk.ALL, event.x, event.y, factor, factor)
self.configure(scrollregion=self.bbox(tk.ALL))
self.ratio *= float(factor)
self.offset = (
self.offset[0] * factor + event.x * (1 - factor),
self.offset[1] * factor + event.y * (1 - factor),
)
logging.info("ratio: %s", self.ratio)
logging.info("offset: %s", self.offset)
self.app.statusbar.zoom.config(text="%s" % (int(self.ratio * 100)) + "%")
if self.wallpaper:
self.redraw_wallpaper()
def click_press(self, event: tk.Event):
"""
Start drawing an edge if mouse click is on a node
"""
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
self.cursor = x, y
selected = self.get_selected(event)
logging.debug("click press(%s): %s", self.cursor, selected)
x_check = self.cursor[0] - self.offset[0]
y_check = self.cursor[1] - self.offset[1]
logging.debug("click press offset(%s, %s)", x_check, y_check)
is_node = selected in self.nodes
if self.mode == GraphMode.EDGE and is_node:
x, y = self.coords(selected)
self.drawing_edge = CanvasEdge(x, y, x, y, selected, self)
if self.mode == GraphMode.ANNOTATION:
if is_marker(self.annotation_type):
r = self.app.toolbar.marker_tool.radius
self.create_oval(
x - r,
y - r,
x + r,
y + r,
fill=self.app.toolbar.marker_tool.color,
outline="",
tags=tags.MARKER,
)
return
if selected is None:
shape = Shape(self.app, self, self.annotation_type, x, y)
self.selected = shape.id
self.shape_drawing = True
self.shapes[shape.id] = shape
if selected is not None:
if selected not in self.selection:
if selected in self.shapes:
shape = self.shapes[selected]
self.select_object(shape.id)
self.selected = selected
elif selected in self.nodes:
node = self.nodes[selected]
self.select_object(node.id)
self.selected = selected
logging.debug(
"selected node(%s), coords: (%s, %s)",
node.core_node.name,
node.core_node.position.x,
node.core_node.position.y,
)
else:
if self.mode == GraphMode.SELECT:
shape = Shape(self.app, self, ShapeType.RECTANGLE, x, y)
self.select_box = shape
self.clear_selection()
def ctrl_click(self, event: tk.Event):
# update cursor location
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
self.cursor = x, y
# handle multiple selections
logging.debug("control left click: %s", event)
selected = self.get_selected(event)
if (
selected not in self.selection
and selected in self.shapes
or selected in self.nodes
):
self.select_object(selected, choose_multiple=True)
def click_motion(self, event: tk.Event):
"""
Redraw drawing edge according to the current position of the mouse
"""
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
if self.select_box:
self.select_box.delete()
self.select_box = None
if is_draw_shape(self.annotation_type) and self.shape_drawing:
shape = self.shapes.pop(self.selected)
shape.delete()
self.shape_drawing = False
return
x_offset = x - self.cursor[0]
y_offset = y - self.cursor[1]
self.cursor = x, y
if self.mode == GraphMode.EDGE and self.drawing_edge is not None:
x1, y1, _, _ = self.coords(self.drawing_edge.id)
self.coords(self.drawing_edge.id, x1, y1, x, y)
if self.mode == GraphMode.ANNOTATION:
if is_draw_shape(self.annotation_type) and self.shape_drawing:
shape = self.shapes[self.selected]
shape.shape_motion(x, y)
elif is_marker(self.annotation_type):
r = self.app.toolbar.marker_tool.radius
self.create_oval(
x - r,
y - r,
x + r,
y + r,
fill=self.app.toolbar.marker_tool.color,
outline="",
tags="marker",
)
return
if self.mode == GraphMode.EDGE:
return
# move selected objects
if self.selection:
for selected_id in self.selection:
if selected_id in self.shapes:
shape = self.shapes[selected_id]
shape.motion(x_offset, y_offset)
if selected_id in self.nodes:
node = self.nodes[selected_id]
node.motion(x_offset, y_offset, update=self.core.is_runtime())
else:
if self.select_box and self.mode == GraphMode.SELECT:
self.select_box.shape_motion(x, y)
def click_context(self, event: tk.Event):
if not self.context:
selected = self.get_selected(event)
canvas_node = self.nodes.get(selected)
if canvas_node:
logging.debug("node context: %s", selected)
self.context = canvas_node.create_context()
self.context.bind("<Leave>", self.hide_context)
self.context.post(event.x_root, event.y_root)
# else:
# self.hide_context()
def press_delete(self, event: tk.Event):
"""
delete selected nodes and any data that relates to it
"""
logging.debug("press delete key")
if not self.app.core.is_runtime():
nodes = self.delete_selection_objects()
self.core.delete_graph_nodes(nodes)
else:
logging.info("node deletion is disabled during runtime state")
def double_click(self, event: tk.Event):
selected = self.get_selected(event)
if selected is not None and selected in self.shapes:
shape = self.shapes[selected]
dialog = ShapeDialog(self.app, self.app, shape)
dialog.show()
def add_node(self, x: float, y: float) -> CanvasNode:
if self.selected is None or self.selected in self.shapes:
actual_x, actual_y = self.get_actual_coords(x, y)
core_node = self.core.create_node(
actual_x, actual_y, self.node_draw.node_type, self.node_draw.model
)
try:
self.node_draw.image = Images.get(
self.node_draw.image_enum, int(ICON_SIZE * self.app.app_scale)
)
except AttributeError:
self.node_draw.image = Images.get_custom(
self.node_draw.image_file, int(ICON_SIZE * self.app.app_scale)
)
node = CanvasNode(self.master, x, y, core_node, self.node_draw.image)
self.core.canvas_nodes[core_node.id] = node
self.nodes[node.id] = node
return node
def width_and_height(self):
"""
retrieve canvas width and height in pixels
"""
x0, y0, x1, y1 = self.coords(self.grid)
canvas_w = abs(x0 - x1)
canvas_h = abs(y0 - y1)
return canvas_w, canvas_h
def get_wallpaper_image(self) -> Image.Image:
width = int(self.wallpaper.width * self.ratio)
height = int(self.wallpaper.height * self.ratio)
image = self.wallpaper.resize((width, height), Image.ANTIALIAS)
return image
def draw_wallpaper(
self, image: ImageTk.PhotoImage, x: float = None, y: float = None
):
if x is None and y is None:
x1, y1, x2, y2 = self.bbox(self.grid)
x = (x1 + x2) / 2
y = (y1 + y2) / 2
self.wallpaper_id = self.create_image((x, y), image=image, tags=tags.WALLPAPER)
self.wallpaper_drawn = image
def wallpaper_upper_left(self):
self.delete(self.wallpaper_id)
# create new scaled image, cropped if needed
width, height = self.width_and_height()
image = self.get_wallpaper_image()
cropx = image.width
cropy = image.height
if image.width > width:
cropx = image.width
if image.height > height:
cropy = image.height
cropped = image.crop((0, 0, cropx, cropy))
image = ImageTk.PhotoImage(cropped)
# draw on canvas
x1, y1, _, _ = self.bbox(self.grid)
x = (cropx / 2) + x1
y = (cropy / 2) + y1
self.draw_wallpaper(image, x, y)
def wallpaper_center(self):
"""
place the image at the center of canvas
"""
self.delete(self.wallpaper_id)
# dimension of the cropped image
width, height = self.width_and_height()
image = self.get_wallpaper_image()
cropx = 0
if image.width > width:
cropx = (image.width - width) / 2
cropy = 0
if image.height > height:
cropy = (image.height - height) / 2
x1 = 0 + cropx
y1 = 0 + cropy
x2 = image.width - cropx
y2 = image.height - cropy
cropped = image.crop((x1, y1, x2, y2))
image = ImageTk.PhotoImage(cropped)
self.draw_wallpaper(image)
def wallpaper_scaled(self):
"""
scale image based on canvas dimension
"""
self.delete(self.wallpaper_id)
canvas_w, canvas_h = self.width_and_height()
image = self.wallpaper.resize((int(canvas_w), int(canvas_h)), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
self.draw_wallpaper(image)
def resize_to_wallpaper(self):
self.delete(self.wallpaper_id)
image = ImageTk.PhotoImage(self.wallpaper)
self.redraw_canvas((image.width(), image.height()))
self.draw_wallpaper(image)
def redraw_canvas(self, dimensions: Tuple[int, int] = None):
logging.info("redrawing canvas to dimensions: %s", dimensions)
# reset scale and move back to original position
logging.info("resetting scaling: %s %s", self.ratio, self.offset)
factor = 1 / self.ratio
self.scale(tk.ALL, self.offset[0], self.offset[1], factor, factor)
self.move(tk.ALL, -self.offset[0], -self.offset[1])
# reset ratio and offset
self.ratio = 1.0
self.offset = (0, 0)
# redraw canvas rectangle
self.draw_canvas(dimensions)
# redraw gridlines to new canvas size
self.delete(tags.GRIDLINE)
self.draw_grid()
self.update_grid()
def redraw_wallpaper(self):
if self.adjust_to_dim.get():
logging.info("drawing wallpaper to canvas dimensions")
self.resize_to_wallpaper()
else:
option = ScaleOption(self.scale_option.get())
logging.info("drawing canvas using scaling option: %s", option)
if option == ScaleOption.UPPER_LEFT:
self.wallpaper_upper_left()
elif option == ScaleOption.CENTERED:
self.wallpaper_center()
elif option == ScaleOption.SCALED:
self.wallpaper_scaled()
elif option == ScaleOption.TILED:
logging.warning("tiled background not implemented yet")
# raise items above wallpaper
for component in tags.ABOVE_WALLPAPER_TAGS:
self.tag_raise(component)
def update_grid(self):
logging.debug("updating grid show grid: %s", self.show_grid.get())
if self.show_grid.get():
self.itemconfig(tags.GRIDLINE, state=tk.NORMAL)
else:
self.itemconfig(tags.GRIDLINE, state=tk.HIDDEN)
def set_wallpaper(self, filename: str):
logging.debug("setting wallpaper: %s", filename)
if filename:
img = Image.open(filename)
self.wallpaper = img
self.wallpaper_file = filename
self.redraw_wallpaper()
else:
if self.wallpaper_id is not None:
self.delete(self.wallpaper_id)
self.wallpaper = None
self.wallpaper_file = None
def is_selection_mode(self) -> bool:
return self.mode == GraphMode.SELECT
def create_edge(self, source: CanvasNode, dest: CanvasNode):
"""
create an edge between source node and destination node
"""
if (source.id, dest.id) not in self.edges:
pos0 = source.core_node.position
x0 = pos0.x
y0 = pos0.y
edge = CanvasEdge(x0, y0, x0, y0, source.id, self)
edge.complete(dest.id)
self.edges[edge.token] = edge
self.nodes[source.id].edges.add(edge)
self.nodes[dest.id].edges.add(edge)
self.core.create_link(edge, source, dest)
def copy(self):
if self.app.core.is_runtime():
logging.info("copy is disabled during runtime state")
return
if self.selection:
logging.debug("to copy %s nodes", len(self.selection))
self.to_copy = self.selection.keys()
def paste(self):
if self.app.core.is_runtime():
logging.info("paste is disabled during runtime state")
return
# maps original node canvas id to copy node canvas id
copy_map = {}
# the edges that will be copy over
to_copy_edges = []
for canvas_nid in self.to_copy:
core_node = self.nodes[canvas_nid].core_node
actual_x = core_node.position.x + 50
actual_y = core_node.position.y + 50
scaled_x, scaled_y = self.get_scaled_coords(actual_x, actual_y)
copy = self.core.create_node(
actual_x, actual_y, core_node.type, core_node.model
)
node = CanvasNode(
self.master, scaled_x, scaled_y, copy, self.nodes[canvas_nid].image
)
# add new node to modified_service_nodes set if that set contains the to_copy node
if self.app.core.service_been_modified(core_node.id):
self.app.core.modified_service_nodes.add(copy.id)
copy_map[canvas_nid] = node.id
self.core.canvas_nodes[copy.id] = node
self.nodes[node.id] = node
self.core.copy_node_config(core_node.id, copy.id)
edges = self.nodes[canvas_nid].edges
for edge in edges:
if edge.src not in self.to_copy or edge.dst not in self.to_copy:
if canvas_nid == edge.src:
self.create_edge(node, self.nodes[edge.dst])
elif canvas_nid == edge.dst:
self.create_edge(self.nodes[edge.src], node)
else:
to_copy_edges.append(edge)
# copy link and link config
for edge in to_copy_edges:
source_node_copy = self.nodes[copy_map[edge.token[0]]]
dest_node_copy = self.nodes[copy_map[edge.token[1]]]
self.create_edge(source_node_copy, dest_node_copy)
copy_edge = self.edges[
EdgeUtils.get_token(source_node_copy.id, dest_node_copy.id)
]
copy_link = copy_edge.link
options = edge.link.options
copy_link.options.CopyFrom(options)
interface_one = None
if copy_link.HasField("interface_one"):
interface_one = copy_link.interface_one.id
interface_two = None
if copy_link.HasField("interface_two"):
interface_two = copy_link.interface_two.id
if not options.unidirectional:
copy_edge.asymmetric_link = None
else:
asym_interface_one = None
if interface_one:
asym_interface_one = core_pb2.Interface(id=interface_one)
asym_interface_two = None
if interface_two:
asym_interface_two = core_pb2.Interface(id=interface_two)
copy_edge.asymmetric_link = core_pb2.Link(
node_one_id=copy_link.node_two_id,
node_two_id=copy_link.node_one_id,
interface_one=asym_interface_one,
interface_two=asym_interface_two,
options=edge.asymmetric_link.options,
)
self.itemconfig(
copy_edge.id,
width=self.itemcget(edge.id, "width"),
fill=self.itemcget(edge.id, "fill"),
)
def scale_graph(self):
for nid, canvas_node in self.nodes.items():
img = None
if NodeUtils.is_custom(
canvas_node.core_node.type, canvas_node.core_node.model
):
for custom_node in self.app.guiconfig["nodes"]:
if custom_node["name"] == canvas_node.core_node.model:
img = Images.get_custom(
custom_node["image"], int(ICON_SIZE * self.app.app_scale)
)
else:
image_enum = TypeToImage.get(
canvas_node.core_node.type, canvas_node.core_node.model
)
img = Images.get(image_enum, int(ICON_SIZE * self.app.app_scale))
self.itemconfig(nid, image=img)
canvas_node.image = img
canvas_node.scale_text()
canvas_node.scale_antennas()
for edge_id in self.find_withtag(tags.EDGE):
self.itemconfig(edge_id, width=int(EDGE_WIDTH * self.app.app_scale))
| 37.439378 | 94 | 0.560132 |
c9b026954258a443ff6e808b33f9267cf33b01c3 | 1,607 | py | Python | tikz/nps/datas_and_predictions.py | wesselb/wesselb.github.io | d3bc6d5566fa40db3873c3b4797c47fbdb81037c | [
"MIT"
] | 1 | 2022-03-08T02:58:21.000Z | 2022-03-08T02:58:21.000Z | tikz/nps/datas_and_predictions.py | wesselb/wesselb.github.io | d3bc6d5566fa40db3873c3b4797c47fbdb81037c | [
"MIT"
] | null | null | null | tikz/nps/datas_and_predictions.py | wesselb/wesselb.github.io | d3bc6d5566fa40db3873c3b4797c47fbdb81037c | [
"MIT"
] | 1 | 2017-04-02T14:37:38.000Z | 2017-04-02T14:37:38.000Z | from stheno import GP, Matern52
import matplotlib.pyplot as plt
import matplotlib
from wbml.plot import tweak, pdfcrop
import numpy as np
np.random.seed(8)
x = np.linspace(0, 6, 100)
f = GP(Matern52())
prefix = ['', '', '\\tilde']
context_label = ['^{(c)}_1', '^{(c)}_n', '']
target_label = ['^{(t)}_1', '^{(t)}_n', '']
for i in range(3):
n_obs = np.random.randint(3, 5 + 1)
x_obs = np.random.rand(n_obs) * x.max()
y_obs = f(x_obs).sample().flatten()
mean, lower, upper = (f | (x_obs, y_obs))(x).marginals()
n_test = np.random.randint(3, 5 + 1)
x_test = np.random.rand(n_test) * x.max()
y_test = (f | (x_obs, y_obs))(x_test).sample().flatten()
plt.figure(figsize=(2.5, 1.25))
plt.scatter(x_obs, y_obs, style='train', s=15,
label=f'${prefix[i]} D{context_label[i]}$')
plt.ylim(lower.min() - 0.1, upper.max() + 0.1)
plt.xlim(x.min(), x.max())
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
matplotlib.rc('legend', fontsize=8)
tweak(legend=True)
plt.savefig(f'datas_and_predictions/data{i + 1}.pdf')
pdfcrop(f'datas_and_predictions/data{i + 1}.pdf')
plt.plot(x, mean, style='pred')
plt.fill_between(x, lower, upper, style='pred')
plt.savefig(f'datas_and_predictions/pred{i + 1}.pdf')
pdfcrop(f'datas_and_predictions/pred{i + 1}.pdf')
plt.scatter(x_test, y_test, style='test', s=15,
label=f'${prefix[i]} D{target_label[i]}$')
tweak(legend=True)
plt.savefig(f'datas_and_predictions/test{i + 1}.pdf')
pdfcrop(f'datas_and_predictions/test{i + 1}.pdf')
| 28.192982 | 60 | 0.614188 |
7552542137fa8c531b324f417152d1f8d4f90799 | 670 | py | Python | python/AlgoritmosPython/idade.py | jonfisik/Projects | 7847f32c9e333cfca31cc127db175d9b4080ed0f | [
"MIT"
] | 2 | 2020-09-05T22:25:37.000Z | 2021-06-01T21:34:54.000Z | python/AlgoritmosPython/idade.py | jonfisik/Projects | 7847f32c9e333cfca31cc127db175d9b4080ed0f | [
"MIT"
] | null | null | null | python/AlgoritmosPython/idade.py | jonfisik/Projects | 7847f32c9e333cfca31cc127db175d9b4080ed0f | [
"MIT"
] | null | null | null | __author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = 'jonfisik@hotmail.com'
__date__ = '04/05/2021'
def traco():
return print('-----'*10)
print('')
print("IDADE, QUANDO?")
traco()
numero = int(input('Digite o número de pessoas: '))
cont = 0
print('')
print('Informe')
while cont < numero:
nome = str(input('Nome: '))
dia = int(input('Dia de nascimento: '))
mes = int(input('Mês de nascimento: '))
ano = int(input('Ano de nascimento: '))
idade = int(input('Idade a ser completada: '))
print('')
print(f'{nome}, você fará {idade} anos nos dia {dia}/{mes}/{ano + idade}.')
cont += 1
print('')
traco()
print('')
# END | 22.333333 | 79 | 0.592537 |
780535150988b607bc87f81e35c85068c4765e50 | 915 | py | Python | var/spack/repos/builtin/packages/hsf-cmaketools/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/hsf-cmaketools/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/hsf-cmaketools/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class HsfCmaketools(Package):
"""CMake 'Find' modules for commonly used HEP Packages"""
homepage = "https://github.com/HSF/cmaketools/"
url = "https://github.com/HSF/cmaketools/archive/1.8.tar.gz"
git = "https://github.com/HSF/cmaketools.git"
maintainers = ['vvolkl']
version('master', branch='master')
version('1.8', sha256='91af30f5701dadf80a5d7e0d808c224c934f0784a3aff2d3b69aff24f7e1db41')
# this package only needs to be installed in CMAKE_PREFIX_PATH
# which is set by spack
def install(self, spec, prefix):
mkdir(prefix.modules)
install_tree('modules', prefix.modules)
install("CMakeToolsConfig.cmake", prefix)
| 33.888889 | 93 | 0.706011 |
7107940e1a1939e8db711de1d2e52ab7d1cfac19 | 19,955 | py | Python | examples/nlp/question-answering.py | I-Anirban/keras-io | 9fdf6bec8e849471d484576ba81a74c334d8f6f9 | [
"Apache-2.0"
] | null | null | null | examples/nlp/question-answering.py | I-Anirban/keras-io | 9fdf6bec8e849471d484576ba81a74c334d8f6f9 | [
"Apache-2.0"
] | null | null | null | examples/nlp/question-answering.py | I-Anirban/keras-io | 9fdf6bec8e849471d484576ba81a74c334d8f6f9 | [
"Apache-2.0"
] | null | null | null | """
Title: Question Answering with Hugging Face Transformers
Author: Matthew Carrigan and Merve Noyan
Date created: 13/01/2022
Last modified: 13/01/2022
Description: Question answering implementation using Keras and Hugging Face Transformers.
"""
"""
## Introduction to Question Answering
Question answering is a common NLP task with several variants. In some variants, the task
is multiple-choice:
A list of possible answers are supplied with each question, and the model simply needs to
return a probability distribution over the options. A more challenging variant of
question answering, which is more applicable to real-life tasks, is when the options are
not provided. Instead, the model is given an input document -- called context -- and a
question about the document, and it must extract the span of text in the document that
contains the answer. In this case, the model is not computing a probability distribution
over answers, but two probability distributions over the tokens in the document text,
representing the start and end of the span containing the answer. This variant is called
"extractive question answering".
Extractive question answering is a very challenging NLP task, and the dataset size
required to train such a model from scratch when the questions and answers are natural
language is prohibitively huge. As a result, question answering (like almost all NLP
tasks) benefits enormously from starting from a strong pretrained foundation model -
starting from a strong pretrained language model can reduce the dataset size required to
reach a given accuracy by multiple orders of magnitude, enabling you to reach very strong
performance with surprisingly reasonable datasets.
Starting with a pretrained model adds difficulties, though - where do you get the model
from? How do you ensure that your input data is preprocessed and tokenized the same way
as the original model? How do you modify the model to add an output head that matches
your task of interest?
In this example, we'll show you how to load a model from the Hugging Face
[🤗Transformers](https://github.com/huggingface/transformers) library to tackle this
challenge. We'll also load a benchmark question answering dataset from the
[🤗Datasets](https://github.com/huggingface/datasets) library - this is another open-source
repository containing a wide range of datasets across many modalities, from NLP to vision
and beyond. Note, though, that there is no requirement that these libraries must be used
with each other. If you want to train a model from
[🤗Transformers](https://github.com/huggingface/transformers) on your own data, or you want
to load data from [🤗 Datasets](https://github.com/huggingface/datasets) and train your
own entirely unrelated models with it, that is of course possible (and highly
encouraged!)
"""
"""
## Installing the requirements
"""
"""shell
pip install git+https://github.com/huggingface/transformers.git
pip install datasets
pip install huggingface-hub
"""
"""
## Loading the dataset
"""
"""
We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download
the SQUAD question answering dataset using `load_dataset()`.
"""
from datasets import load_dataset
datasets = load_dataset("squad")
"""
The `datasets` object itself is a
`DatasetDict`, which contains one key for the training, validation and test set. We can see
the training, validation and test sets all have a column for the context, the question
and the answers to those questions. To access an actual element, you need to select a
split first, then give an index. We can see the answers are indicated by their start
position in the text and their full text, which is a substring of the context as we
mentioned above. Let's take a look at what a single training example looks like.
"""
print(datasets["train"][0])
"""
## Preprocessing the training data
"""
"""
Before we can feed those texts to our model, we need to preprocess them. This is done by
a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs
(including converting the tokens to their corresponding IDs in the pretrained vocabulary)
and put it in a format the model expects, as well as generate the other inputs that model
requires.
To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained`
method, which will ensure:
- We get a tokenizer that corresponds to the model architecture we want to use.
- We download the vocabulary used when pretraining this specific checkpoint.
That vocabulary will be cached, so it's not downloaded again the next time we run the
cell.
The `from_pretrained()` method expects the name of a model. If you're unsure which model to
pick, don't panic! The list of models to choose from can be bewildering, but in general
there is a simple tradeoff: Larger models are slower and consume more memory, but usually
yield slightly better final accuracies after fine-tuning. For this example, we have
chosen the (relatively) lightweight `"distilbert"`, a smaller, distilled version of the
famous BERT language model. If you absolutely must have the highest possible accuracy for
an important task, though, and you have the GPU memory (and free time) to handle it, you
may prefer to use a larger model, such as `"roberta-large"`. Newer and even larger models
than `"roberta"` exist in [🤗 Transformers](https://github.com/huggingface/transformers),
but we leave the task of finding and training them as an exercise to readers who are
either particularly masochistic or have 40GB of VRAM to throw around.
"""
from transformers import AutoTokenizer
model_checkpoint = "distilbert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
"""
Depending on the model you selected, you will see different keys in the dictionary
returned by the cell above. They don't matter much for what we're doing here (just know
they are required by the model we will instantiate later), but you can learn more about
them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're
interested.
One specific issue for the preprocessing in question answering is how to deal with very
long documents. We usually truncate them in other tasks, when they are longer than the
model maximum sentence length, but here, removing part of the the context might result in
losing the answer we are looking for. To deal with this, we will allow one (long) example
in our dataset to give several input features, each of length shorter than the maximum
length of the model (or the one we set as a hyper-parameter). Also, just in case the
answer lies at the point we split a long context, we allow some overlap between the
features we generate controlled by the hyper-parameter `doc_stride`.
If we simply truncate with a fixed size (`max_length`), we will lose information. We want to
avoid truncating the question, and instead only truncate the context to ensure the task
remains solvable. To do that, we'll set `truncation` to `"only_second"`, so that only the
second sequence (the context) in each pair is truncated. To get the list of features
capped by the maximum length, we need to set `return_overflowing_tokens` to True and pass
the `doc_stride` to `stride`. To see which feature of the original context contain the
answer, we can return `"offset_mapping"`.
"""
max_length = 384 # The maximum length of a feature (question and context)
doc_stride = (
128 # The authorized overlap between two part of the context when splitting
)
# it is needed.
"""
In the case of impossible answers (the answer is in another feature given by an example
with a long context), we set the cls index for both the start and end position. We could
also simply discard those examples from the training set if the flag
`allow_impossible_answers` is `False`. Since the preprocessing is already complex enough
as it is, we've kept is simple for this part.
"""
def prepare_train_features(examples):
# Tokenize our examples with truncation and padding, but keep the overflows using a
# stride. This results in one example possible giving several features when a context is long,
# each of those features having a context that overlaps a bit the context of the previous
# feature.
examples["question"] = [q.lstrip() for q in examples["question"]]
examples["context"] = [c.lstrip() for c in examples["context"]]
tokenized_examples = tokenizer(
examples["question"],
examples["context"],
truncation="only_second",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a
# map from a feature to its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original
# context. This will help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what
# is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this
# span of text.
sample_index = sample_mapping[i]
answers = examples["answers"][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the
# CLS index).
if not (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the
# answer.
# Note: we could go after the last offset if the answer is the last word (edge
# case).
while (
token_start_index < len(offsets)
and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
"""
To apply this function on all the sentences (or pairs of sentences) in our dataset, we
just use the `map()` method of our `Dataset` object, which will apply the function on all
the elements of.
We'll use `batched=True` to encode the texts in batches together. This is to leverage the
full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to
treat the texts in a batch concurrently. We also use the `remove_columns` argument to
remove the columns that existed before tokenization was applied - this ensures that the
only features remaining are the ones we actually want to pass to our model.
"""
tokenized_datasets = datasets.map(
prepare_train_features,
batched=True,
remove_columns=datasets["train"].column_names,
num_proc=3,
)
"""
Even better, the results are automatically cached by the 🤗 Datasets library to avoid
spending time on this step the next time you run your notebook. The 🤗 Datasets library is
normally smart enough to detect when the function you pass to map has changed (and thus
requires to not use the cache data). For instance, it will properly detect if you change
the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses
cached files, you can pass `load_from_cache_file=False` in the call to `map()` to not use
the cached files and force the preprocessing to be applied again.
Because all our data has been padded or truncated to the same length, and it is not too
large, we can now simply convert it to a dict of numpy arrays, ready for training.
Although we will not use it here, 🤗 Datasets have a `to_tf_dataset()` helper method
designed to assist you when the data cannot be easily converted to arrays, such as when
it has variable sequence lengths, or is too large to fit in memory. This method wraps a
`tf.data.Dataset` around the underlying 🤗 Dataset, streaming samples from the underlying
dataset and batching them on the fly, thus minimizing wasted memory and computation from
unnecessary padding. If your use-case requires it, please see the
[docs](https://huggingface.co/docs/transformers/custom_datasets#finetune-with-tensorflow)
on to_tf_dataset and data collator for an example. If not, feel free to follow this example
and simply convert to dicts!
"""
train_set = tokenized_datasets["train"].with_format("numpy")[
:
] # Load the whole dataset as a dict of numpy arrays
validation_set = tokenized_datasets["validation"].with_format("numpy")[:]
"""
## Fine-tuning the model
"""
"""
That was a lot of work! But now that our data is ready, everything is going to run very
smoothly. First, we download the pretrained model and fine-tune it. Since our task is
question answering, we use the `TFAutoModelForQuestionAnswering` class. Like with the
tokenizer, the `from_pretrained()` method will download and cache the model for us:
"""
from transformers import TFAutoModelForQuestionAnswering
model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
"""
The warning is telling us we are throwing away some weights and newly initializing some
others. Don't panic! This is absolutely normal. Recall that models like BERT and
Distilbert are trained on a **language modeling** task, but we're loading the model as
a `TFAutoModelForQuestionAnswering`, which means we want the model to perform a
**question answering** task. This change requires the final output layer or "head" to be
removed and replaced with a new head suited for the new task. The `from_pretrained`
method will handle all of this for us, and the warning is there simply to remind us that
some model surgery has been performed, and that the model will not generate useful
predictions until the newly-initialized layers have been fine-tuned on some data.
Next, we can create an optimizer and specify a loss function. You can usually get
slightly better performance by using learning rate decay and decoupled weight decay, but
for the purposes of this example the standard `Adam` optimizer will work fine. Note,
however, that when fine-tuning a pretrained transformer model you will generally want to
use a low learning rate! We find the best results are obtained with values in the range
1e-5 to 1e-4, and training may completely diverge at the default Adam learning rate of 1e-3.
"""
import tensorflow as tf
from tensorflow import keras
optimizer = keras.optimizers.Adam(learning_rate=5e-5)
"""
And now we just compile and fit the model. As a convenience, all 🤗 Transformers models
come with a default loss which matches their output head, although you're of course free
to use your own. Because the built-in loss is computed internally during the forward
pass, when using it you may find that some Keras metrics misbehave or give unexpected
outputs. This is an area of very active development in 🤗 Transformers, though, so
hopefully we'll have a good solution to that issue soon!
For now, though, let's use the built-in loss without any metrics. To get the built-in
loss, simply leave out the `loss` argument to `compile`.
"""
# Optionally uncomment the next line for float16 training
keras.mixed_precision.set_global_policy("mixed_float16")
model.compile(optimizer=optimizer)
"""
And now we can train our model. Note that we're not passing separate labels - the labels
are keys in the input dict, to make them visible to the model during the forward pass so
it can compute the built-in loss.
"""
model.fit(train_set, validation_data=validation_set, epochs=1)
"""
And we're done! Let's give it a try, using some text from the keras.io frontpage:
"""
context = """Keras is an API designed for human beings, not machines. Keras follows best
practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes
the number of user actions required for common use cases, and it provides clear &
actionable error messages. It also has extensive documentation and developer guides. """
question = "What is Keras?"
inputs = tokenizer([context], [question], return_tensors="np")
outputs = model(inputs)
start_position = tf.argmax(outputs.start_logits, axis=1)
end_position = tf.argmax(outputs.end_logits, axis=1)
print(int(start_position), int(end_position[0]))
"""
Looks like our model thinks the answer is the span from tokens 1 to 12 (inclusive). No
prizes for guessing which tokens those are!
"""
answer = inputs["input_ids"][0, int(start_position) : int(end_position) + 1]
print(answer)
"""
And now we can use the `tokenizer.decode()` method to turn those token IDs back into text:
"""
print(tokenizer.decode(answer))
"""
And that's it! Remember that this example was designed to be quick to run rather than
state-of-the-art, and the model trained here will certainly make mistakes. If you use a
larger model to base your training on, and you take time to tune the hyperparameters
appropriately, you'll find that you can achieve much better losses (and correspondingly
more accurate answers).
Finally, you can push the model to the HuggingFace Hub. By pushing this model you will
have:
- A nice model card generated for you containing hyperparameters and metrics of the model
training,
- A web API for inference calls,
- A widget in the model page that enables others to test your model.
This model is currently hosted [here](https://huggingface.co/keras-io/transformers-qa)
and we have prepared a separate neat UI for you
[here](https://huggingface.co/spaces/keras-io/keras-qa).
```python
model.push_to_hub("transformers-qa", organization="keras-io")
tokenizer.push_to_hub("transformers-qa", organization="keras-io")
```
If you have non-Transformers based Keras models, you can also push them with
`push_to_hub_keras`. You can use `from_pretrained_keras` to load easily.
```python
from huggingface_hub.keras_mixin import push_to_hub_keras
push_to_hub_keras(
model=model, repo_url="https://huggingface.co/your-username/your-awesome-model"
)
from_pretrained_keras("your-username/your-awesome-model") # load your model
```
"""
| 46.952941 | 101 | 0.754197 |
7213eb686727f06f566e50a1924291247cc309f8 | 10,313 | py | Python | decavision/model_testing/testing.py | Decathlon/decavision | b79d2b5611025a5f8519b9154acf04844ef8fb91 | [
"MIT"
] | 3 | 2020-12-08T19:14:46.000Z | 2021-03-30T15:12:44.000Z | decavision/model_testing/testing.py | Decathlon/decavision | b79d2b5611025a5f8519b9154acf04844ef8fb91 | [
"MIT"
] | 13 | 2021-03-30T15:12:49.000Z | 2022-03-12T00:51:57.000Z | decavision/model_testing/testing.py | Decathlon/decavision | b79d2b5611025a5f8519b9154acf04844ef8fb91 | [
"MIT"
] | 1 | 2021-03-30T15:20:56.000Z | 2021-03-30T15:20:56.000Z | import glob
import math
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sn
from sklearn.metrics import confusion_matrix, classification_report
import tensorflow as tf
from tensorflow.keras.models import load_model
import tensorflow_hub as hub
from decavision.utils import data_utils
from decavision.utils import utils
class ModelTester:
"""
Class to use a trained image classification model on some images. Using this
when working with a TPU disables eager execution.
Arguments:
model (str): path to trained model
"""
def __init__(self, model):
use_tpu, use_gpu = utils.check_PU()
if use_tpu:
# necessary because keras generators don'T work with TPUs...
tf.compat.v1.disable_eager_execution()
try:
self.model = load_model(model, custom_objects={"KerasLayer": hub.KerasLayer})
# efficientnets have the scaling included in them so no need to rescale the images when loading
if self.model.name[0] == 'B':
self.rescaling = 1
else:
self.rescaling = 255
print('Model loaded correctly')
except Exception as e:
print('There was a problem when trying to load your model: {}'.format(e))
self.input_shape = self.model.input_shape[1:3]
def _load_dataset(self, path):
"""
Load dataset into a keras generator. Images must be contained in separate
folders for each class.
Arguments:
path (str): location of the dataset
Returns:
generator: images plus information about them (labels, paths, etc)
"""
datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / self.rescaling)
generator = datagen.flow_from_directory(directory=path,
target_size=self.input_shape,
shuffle=False,
interpolation='bilinear',
color_mode='rgb',
class_mode='sparse',
batch_size=1)
return generator
def confusion_matrix(self, path, normalize=None):
"""
Compute and plot the confusion matrix resulting from predictions on a dataset of images.
Images must be located in separate folders for each class.
Arguments:
path (str): location of the images
normalize ('true', 'pred', 'all' or None): normalizes confusion matrix over the true (rows), predicted (columns) conditions or
all the population. If None, confusion matrix will not be normalized.
"""
generator = self._load_dataset(path)
cls_true = generator.classes
labels = list(generator.class_indices.keys())
cls_pred = self.model.predict(generator)
cls_pred = np.argmax(cls_pred, axis=1)
print('Labels loaded')
# Calculate the confusion matrix.
cm = confusion_matrix(y_true=cls_true, # True class for test-set.
y_pred=cls_pred, # Predicted class.
normalize=normalize)
# Print the confusion matrix
ax = plt.subplot()
sn.heatmap(cm, annot=True, ax=ax)
ax.set_xlabel('Predicted')
ax.set_ylabel('True')
ax.set_title('Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels)
def _plot_images(self, images, categories, cls_true, cls_pred=None, smooth=True):
"""
Plot images along with their true and optionally predicted labels.
Inspired by https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/10_Fine-Tuning.ipynb.
Arguments:
images (list[numpy arrays]): list of images to plot as arrays
categories (List[str]): list of categories that model predicts
cls_true (np.array[int]): true labels of the images
cls_pred (np.array[int]): predicted labels of the images
smooth (bool): smooth out images or not when plotting
"""
assert len(images) == len(cls_true)
num_images = len(images)
# Create figure with sub-plots.
if math.sqrt(num_images).is_integer():
nrows = ncols = int(math.sqrt(num_images))
else:
for i in reversed(range(math.ceil(math.sqrt(num_images)))):
if not num_images % i:
nrows = int(num_images / i)
ncols = int(i)
break
fig, axes = plt.subplots(nrows, ncols)
# Adjust vertical spacing.
if cls_pred:
hspace = 0.6
else:
hspace = 0.3
fig.subplots_adjust(hspace=hspace, wspace=0.3)
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i], interpolation=interpolation)
# Name of the true class.
cls_true_name = categories[cls_true[i]]
# Show true and predicted classes.
if cls_pred:
# Name of the predicted class.
cls_pred_name = categories[cls_pred[i]]
xlabel = "True: {0}\nPred: {1}".format(
cls_true_name, cls_pred_name)
else:
xlabel = "True: {0}".format(cls_true_name)
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.tight_layout()
plt.show()
def plot_errors(self, path, num_pictures=9):
"""
Plot images that were not classified correctly by the model. Images to test must
be located in separate folders for each class, for example a validation dataset.
Arguments:
path (str): location of the images
num_pictures (int): maximum number of errors to show
"""
generator = self._load_dataset(path)
cls_true = generator.classes
image_paths = generator.filepaths
labels = list(generator.class_indices.keys())
cls_pred = self.model.predict(generator)
cls_pred = np.argmax(cls_pred, axis=1)
print('Labels loaded')
# get all errors index
errors = []
for i in range(len(cls_pred)):
if cls_pred[i] != cls_true[i]:
errors.append(i)
# Load images randomly picked
num_pictures = min(num_pictures, len(errors))
random_errors = sorted(random.sample(errors, num_pictures))
# Plot the images we have loaded and their corresponding classes.
self._plot_images(images=[data_utils.prepare_image(image_paths[i], self.input_shape)[0] for i in random_errors],
categories=labels,
cls_true=[cls_true[i] for i in random_errors],
cls_pred=[cls_pred[i] for i in random_errors])
def classify_images(self, image_path, categories, plot=True):
"""
Classify images located directly in a folder. Plots the images and the first three predictions.
Arguments:
image_path (str): location of the images
categories (list[str]): list of potential categories that the model can return
plot (bool): plot or not the images, if False, only results are printed
"""
images = glob.glob(os.path.join(image_path, '*.jpg'))
for image_path in images:
# prepare the image
image_tensor = data_utils.prepare_image(image_path, self.input_shape, self.rescaling)
# make and decode the prediction
result = self.model.predict(image_tensor)[0]
# print image and top predictions
top_pred = np.argsort(result)[::-1][:3]
# Name of the true class.
cls_pred_name = np.array(categories)[top_pred]
cls_pred_perc = result[top_pred] * 100
if plot:
plt.imshow(image_tensor[0], interpolation='spline16')
xlabel = 'Prediction :\n'
for (x, y) in zip(cls_pred_name, cls_pred_perc):
xlabel += '{0}, {1:.2f}%\n'.format(x, y)
plt.xlabel(xlabel)
plt.xticks([])
plt.yticks([])
plt.show()
else:
print('\nImage: ', image_path)
for i in range(len(top_pred)):
print('Prediction: {} (probability {}%)'.format(cls_pred_name[i], round(cls_pred_perc[i])))
def evaluate(self, path):
"""
Calculate the accuracy of the model on a dataset of images. The images must be
in separate folders for each class.
Arguments:
path (str): location of the dataset
"""
generator = self._load_dataset(path)
results = self.model.evaluate(generator)
print('Accuracy of', results[1] * 100, '%')
def generate_classification_report(self, path):
"""
Computes classification report resulting from predictions on a dataset of images
and prints the results. Images must be located in separate folders for each class. The report shows average accuracy, precision, recall and f1-scores. Precision, recall and f1-scores are also computed for each class.
Arguments:
path (str): location of the images
"""
generator = self._load_dataset(path)
cls_true = generator.classes
labels = list(generator.class_indices.keys())
cls_pred = self.model.predict(generator)
cls_pred = np.argmax(cls_pred, axis=1)
print('Labels loaded')
# Show classification report
print(classification_report(cls_true, cls_pred, target_names=labels, digits=4))
| 40.285156 | 224 | 0.591002 |
8cb8e5407ef24ec617dea6652f08e46426f70974 | 780 | py | Python | src/run_integ_test.py | nsri19/opensearch-build | 90ad75b51a08f199981a4a46b7f3e6e49371415a | [
"Apache-2.0"
] | null | null | null | src/run_integ_test.py | nsri19/opensearch-build | 90ad75b51a08f199981a4a46b7f3e6e49371415a | [
"Apache-2.0"
] | 13 | 2021-10-02T00:22:47.000Z | 2022-02-08T17:49:38.000Z | src/run_integ_test.py | peternied/opensearch-build | 4c7c2e5ffc61e8a5ca263d7a2fff2ad3f73fdbe6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import sys
from manifests.test_manifest import TestManifest
from system import console
from test_workflow.integ_test.integ_test_runners import IntegTestRunners
from test_workflow.test_args import TestArgs
def main():
args = TestArgs()
console.configure(level=args.logging_level)
test_manifest = TestManifest.from_path(args.test_manifest_path)
all_results = IntegTestRunners.from_test_manifest(args, test_manifest).run()
all_results.log()
if all_results.failed():
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| 23.636364 | 80 | 0.765385 |
d9cd2a800fc70b766d97731393a2c3ca48a9edbb | 5,951 | py | Python | mythril/analysis/callgraph.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | 1 | 2018-09-07T10:17:35.000Z | 2018-09-07T10:17:35.000Z | mythril/analysis/callgraph.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | null | null | null | mythril/analysis/callgraph.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | 1 | 2018-06-14T08:36:03.000Z | 2018-06-14T08:36:03.000Z | import re
from jinja2 import Environment, PackageLoader, select_autoescape
from mythril.laser.ethereum.svm import NodeFlags
import z3
default_opts = {
'autoResize': True,
'height': '100%',
'width': '100%',
'manipulation': False,
'layout': {
'improvedLayout': True,
'hierarchical': {
'enabled': True,
'levelSeparation': 450,
'nodeSpacing': 200,
'treeSpacing': 100,
'blockShifting': True,
'edgeMinimization': True,
'parentCentralization': False,
'direction': 'LR',
'sortMethod': 'directed'
}
},
'nodes': {
'color': '#000000',
'borderWidth': 1,
'borderWidthSelected': 2,
'chosen': True,
'shape': 'box',
'font': {'align': 'left', 'color': '#FFFFFF'},
},
'edges': {
'font': {
'color': '#FFFFFF',
'face': 'arial',
'background': 'none',
'strokeWidth': 0,
'strokeColor': '#ffffff',
'align': 'horizontal',
'multi': False,
'vadjust': 0,
}
},
'physics': {'enabled': False}
}
phrack_opts = {
'nodes': {
'color': '#000000',
'borderWidth': 1,
'borderWidthSelected': 1,
'shapeProperties': {
'borderDashes': False,
'borderRadius': 0,
},
'chosen': True,
'shape': 'box',
'font': {'face': 'courier new', 'align': 'left', 'color': '#000000'},
},
'edges': {
'font': {
'color': '#000000',
'face': 'courier new',
'background': 'none',
'strokeWidth': 0,
'strokeColor': '#ffffff',
'align': 'horizontal',
'multi': False,
'vadjust': 0,
}
}
}
default_colors = [
{'border': '#26996f', 'background': '#2f7e5b', 'highlight': {'border': '#26996f', 'background': '#28a16f'}},
{'border': '#9e42b3', 'background': '#842899', 'highlight': {'border': '#9e42b3', 'background': '#933da6'}},
{'border': '#b82323', 'background': '#991d1d', 'highlight': {'border': '#b82323', 'background': '#a61f1f'}},
{'border': '#4753bf', 'background': '#3b46a1', 'highlight': {'border': '#4753bf', 'background': '#424db3'}},
{'border': '#26996f', 'background': '#2f7e5b', 'highlight': {'border': '#26996f', 'background': '#28a16f'}},
{'border': '#9e42b3', 'background': '#842899', 'highlight': {'border': '#9e42b3', 'background': '#933da6'}},
{'border': '#b82323', 'background': '#991d1d', 'highlight': {'border': '#b82323', 'background': '#a61f1f'}},
{'border': '#4753bf', 'background': '#3b46a1', 'highlight': {'border': '#4753bf', 'background': '#424db3'}},
]
phrack_color = {'border': '#000000', 'background': '#ffffff',
'highlight': {'border': '#000000', 'background': '#ffffff'}}
def extract_nodes(statespace, color_map):
nodes = []
for node_key in statespace.nodes:
node = statespace.nodes[node_key]
instructions = [state.get_current_instruction() for state in node.states]
code_split = []
for instruction in instructions:
if instruction['opcode'].startswith("PUSH"):
code_line = "%d %s %s" % (instruction['address'], instruction['opcode'], instruction['argument'])
elif instruction['opcode'].startswith("JUMPDEST") and NodeFlags.FUNC_ENTRY in node.flags and instruction['address'] == node.start_addr:
code_line = node.function_name
else:
code_line = "%d %s" % (instruction['address'], instruction['opcode'])
code_line = re.sub("([0-9a-f]{8})[0-9a-f]+", lambda m: m.group(1) + "(...)", code_line)
code_split.append(code_line)
truncated_code = '\n'.join(code_split) if (len(code_split) < 7) \
else '\n'.join(code_split[:6]) + "\n(click to expand +)"
nodes.append({
'id': str(node_key),
'color': color_map[node.get_cfg_dict()['contract_name']],
'size': 150,
'fullLabel': '\n'.join(code_split),
'label': truncated_code,
'truncLabel': truncated_code,
'isExpanded': False
})
return nodes
def extract_edges(statespace):
edges = []
for edge in statespace.edges:
if edge.condition is None:
label = ""
else:
try:
label = str(z3.simplify(edge.condition)).replace("\n", "")
except z3.Z3Exception:
label = str(edge.condition).replace("\n", "")
label = re.sub(r'([^_])([\d]{2}\d+)', lambda m: m.group(1) + hex(int(m.group(2))), label)
edges.append({
'from': str(edge.as_dict['from']),
'to': str(edge.as_dict['to']),
'arrows': 'to',
'label': label,
'smooth': {'type': 'cubicBezier'}
})
return edges
def generate_graph(statespace, title="Mythril / Ethereum LASER Symbolic VM", physics=False, phrackify=False):
env = Environment(loader=PackageLoader('mythril.analysis'), autoescape=select_autoescape(['html', 'xml']))
template = env.get_template('callgraph.html')
graph_opts = default_opts
accounts = statespace.accounts
if phrackify:
color_map = {accounts[k].contract_name: phrack_color for k in accounts}
graph_opts.update(phrack_opts)
else:
color_map = {accounts[k].contract_name: default_colors[i % len(default_colors)] for i, k in enumerate(accounts)}
graph_opts['physics']['enabled'] = physics
return template.render(title=title,
nodes=extract_nodes(statespace, color_map),
edges=extract_edges(statespace),
phrackify=phrackify,
opts=graph_opts
)
| 35.634731 | 147 | 0.532179 |
e1e342ceb82d5dc530d7d7dafbe8813594f0778e | 4,328 | py | Python | fdn/fdnlib/utils.py | hobbymarks/ufdn | 1864775b060df1aec5e9c115930d0ebdff3bab44 | [
"MIT"
] | 1 | 2021-02-16T04:42:10.000Z | 2021-02-16T04:42:10.000Z | fdn/fdnlib/utils.py | hobbymarks/ufdn | 1864775b060df1aec5e9c115930d0ebdff3bab44 | [
"MIT"
] | 3 | 2021-04-28T08:29:14.000Z | 2021-05-08T01:34:50.000Z | fdn/fdnlib/utils.py | hobbymarks/UFn | 02f18be74b94749effdb9a6a82a0d7e7db8aaccf | [
"MIT"
] | null | null | null | import difflib
import hashlib
import os
from pathlib import Path
from typing import Generator, List, Optional, Tuple, Union
# From Third Party
import click
from colorama import Fore
from wcwidth import wcswidth
def is_hidden(f_path: Path) -> bool:
"""
Check file is hidden or not
:param f_path: string,file path
:return: True if is hidden file,False if is not hidden file
"""
if os.name == "nt":
import win32api
import win32con
if os.name == "nt":
attribute = win32api.GetFileAttributes(f_path)
return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN
| win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return os.path.basename(f_path).startswith(".") # linux, osx
def depth_walk(
top_path: Path,
top_down: bool = False,
follow_links: bool = False,
max_depth: int = 1
) -> Optional[Generator[Tuple[str], List[str], List[str]]]:
if str(max_depth).isnumeric():
max_depth = int(max_depth)
else:
max_depth = 1
try:
names = os.listdir(top_path)
except FileNotFoundError:
click.echo(f"Warning:{top_path} not found.")
return None
except PermissionError:
click.echo(f"Warning:{top_path} no permissions.")
return None
dirs, non_dirs = [], []
for name in names:
if os.path.isdir(os.path.join(top_path, name)):
dirs.append(name)
else:
non_dirs.append(name)
if top_down:
yield top_path, dirs, non_dirs
if max_depth > 1:
for name in dirs:
new_path = Path(os.path.join(top_path, name))
if follow_links or not os.path.islink(new_path):
for x in depth_walk(new_path, top_down, follow_links,
1 if max_depth == 1 else max_depth - 1):
yield x
if not top_down:
yield top_path, dirs, non_dirs
def rich_style(
original: str,
processed: str,
pretty: bool = False,
enhanced_display: bool = False
) -> Union[Tuple[None, None], Tuple[str, str]]:
if (type(original) is not str) or (type(processed) is not str):
return None, None
def _f_d(s: str = "", f_d=None):
return f_d + s + Fore.RESET
def _c_f(s: str = "") -> str:
if pretty:
return s
else:
return ""
a = original
b = processed
a_list = []
b_list = []
c_f = ' '
e = enhanced_display
for tag, i1, i2, j1, j2 in difflib.SequenceMatcher(None, a,
b).get_opcodes():
if tag == "delete":
a_list.append(_f_d(a[i1:i2].replace(" ", "▯"), Fore.RED))
b_list.append(_c_f(c_f * wcswidth(a[i1:i2])))
elif tag == "equal":
a_list.append(_f_d(a[i1:i2], Fore.BLACK) if e else a[i1:i2])
b_list.append(_f_d(b[j1:j2], Fore.BLACK) if e else b[j1:j2])
elif tag == "replace":
a_w = wcswidth(a[i1:i2])
b_w = wcswidth(b[j1:j2])
if a_w > b_w:
a_list.append(_f_d(a[i1:i2].replace(" ", "▯"), Fore.RED))
b_list.append(
_c_f(c_f * (a_w - b_w)) +
_f_d(b[j1:j2].replace(" ", "▯"), Fore.GREEN))
elif a_w < b_w:
a_list.append(
_c_f(c_f * (b_w - a_w)) +
_f_d(a[i1:i2].replace(" ", "▯"), Fore.RED))
b_list.append(_f_d(b[j1:j2].replace(" ", "▯"), Fore.GREEN))
else:
a_list.append(_f_d(a[i1:i2].replace(" ", "▯"), Fore.RED))
b_list.append(_f_d(b[j1:j2].replace(" ", "▯"), Fore.GREEN))
elif tag == "insert":
a_list.append(_c_f(c_f * wcswidth(b[j1:j2])))
b_list.append(_f_d(b[j1:j2].replace(" ", "▯"), Fore.GREEN))
return "".join(a_list), "".join(b_list)
def unify_confirm(x: str = "") -> str:
return {
"y": "yes",
"yes": "yes",
"n": "no",
"no": "no",
"A": "all",
"all": "all",
"q": "quit",
"quit": "quit"
}.get(x, "no")
###############################################################################
def sha2_id(s: str) -> str:
return hashlib.sha256(s.encode("UTF-8")).hexdigest()
| 30.695035 | 79 | 0.51964 |
9cc9aaec05c45ecd11e7f414b7d1f259efa423ea | 6,425 | py | Python | attendance_management_bot/common/utils.py | ja3toa/samplebot_attendance_management_bot | aafbc4baa9d95f6485e709c0fd6af003811d0813 | [
"Apache-2.0"
] | 4 | 2021-05-07T01:16:04.000Z | 2022-03-27T07:15:59.000Z | attendance_management_bot/common/utils.py | ja3toa/samplebot_attendance_management_bot | aafbc4baa9d95f6485e709c0fd6af003811d0813 | [
"Apache-2.0"
] | 4 | 2021-04-09T02:11:39.000Z | 2021-09-12T08:57:12.000Z | attendance_management_bot/common/utils.py | ja3toa/samplebot_attendance_management_bot | aafbc4baa9d95f6485e709c0fd6af003811d0813 | [
"Apache-2.0"
] | 108 | 2020-02-10T01:44:36.000Z | 2022-03-07T03:00:19.000Z | #!/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2020-present Works Mobile Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
HTTP method providing authentication
"""
__all__ = ['auth_post', 'auth_get', 'auth_del', 'auth_put']
import requests
import logging
from tornado.web import HTTPError
from attendance_management_bot.common.token import generate_token
from attendance_management_bot.common.global_data import get_value, set_value
LOGGER = logging.getLogger("attendance_management_bot")
def refresh_token():
my_token = generate_token()
set_value("token", my_token)
return my_token
def get_token():
return get_value("token", None)
def replace_url_bot_no(url):
bot_no = get_value("bot_no", None)
if bot_no is None:
LOGGER.info("internal error. bot no is None")
raise HTTPError(500, "internal error. bot no is None")
url = url.replace("_BOT_NO_", bot_no)
return url
def auth_post(url, data=None, headers=None, files=None,
params=None, json=None, refresh_token_flag=False):
"""
Encapsulates the post method of adding token to headers.
Check also: attendance_management_bot/common/token.py
parameters and return values, refer to:
reference
- https://3.python-requests.org/user/advanced/#request-and-response-objects
"""
if headers is not None and not refresh_token_flag:
my_token = get_token()
if my_token is None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.post(url, data=data, headers=headers,
files=files, params=params, json=json)
if response.status_code == 401 or response.status_code == 403:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.post(url, data=data, headers=headers,
files=files, params=params, json=json)
return response
else:
if refresh_token_flag and headers is not None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
return requests.post(url, data=data, headers=headers,
files=files, params=params, json=json)
return None
def auth_get(url, headers=None, refresh_token_flag=False):
"""
Encapsulates the get method of adding token to headers.
Check also: attendance_management_bot/common/token.py
parameters and return values, refer to:
reference
- https://3.python-requests.org/user/advanced/#request-and-response-objects
"""
if headers is not None and not refresh_token_flag:
my_token = get_token()
if my_token is None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.get(url, headers=headers)
if response.status_code == 401 or response.status_code == 403:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.get(url, headers=headers)
return response
else:
if refresh_token_flag and headers is not None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
return requests.get(url, headers=headers)
return None
def auth_del(url, headers=None, refresh_token_flag=False):
"""
Encapsulates the delete method of adding token to headers.
Check also: attendance_management_bot/common/token.py
parameters and return values, refer to:
reference
- https://3.python-requests.org/user/advanced/#request-and-response-objects
"""
if headers is not None and not refresh_token_flag:
my_token = get_token()
if my_token is None:
my_token = init_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.delete(url, headers=headers)
if response.status_code == 401 or response.status_code == 403:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.delete(url, headers=headers)
return response
else:
if refresh_token_flag and headers is not None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
return requests.delete(url, headers=headers)
return None
def auth_put(url, data=None, headers=None, files=None,
params=None, json=None, refresh_token_flag=False):
"""
Encapsulates the put method of adding token to headers.
Check also: attendance_management_bot/common/token.py
parameters and return values, refer to:
reference
- https://3.python-requests.org/user/advanced/#request-and-response-objects
"""
if headers is not None and not refresh_token_flag:
my_token = get_token()
if my_token is None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.put(url, data=data, headers=headers,
files=files, params=params, json=json)
if response.status_code == 401 or response.status_code == 403:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
response = requests.put(url, data=data, headers=headers,
files=files, params=params, json=json)
return response
else:
if refresh_token_flag and headers is not None:
my_token = refresh_token()
headers["Authorization"] = "Bearer " + my_token
return requests.put(url, data=data, headers=headers,
files=files, params=params, json=json)
return None
| 34.358289 | 83 | 0.651673 |
415e46396cea2f65d557776ac7b19710a5e53d81 | 11,726 | py | Python | tests/hardware/dummy_modules.py | zea2/qupulse | 49a930bfe95f3f0c878daab5e52fd0c8aa8a4ff6 | [
"MIT"
] | null | null | null | tests/hardware/dummy_modules.py | zea2/qupulse | 49a930bfe95f3f0c878daab5e52fd0c8aa8a4ff6 | [
"MIT"
] | null | null | null | tests/hardware/dummy_modules.py | zea2/qupulse | 49a930bfe95f3f0c878daab5e52fd0c8aa8a4ff6 | [
"MIT"
] | null | null | null | """Import dummy modules if actual modules not installed. Sets dummy modules in sys so subsequent imports
use the dummies"""
import sys
from typing import Set
import unittest.mock
class dummy_package:
pass
class dummy_pytabor(dummy_package):
@staticmethod
def open_session(*args, **kwargs):
return None
class dummy_pyvisa(dummy_package):
class resources(dummy_package):
class messagebased(dummy_package):
class MessageBasedResource:
def __init__(self, *args, **kwargs):
self.logged_writes = []
self.logged_asks = []
self.answers = dict()
self.default_answer = '0, bla'
def write(self, *args, **kwargs):
self.logged_writes.append((args, kwargs))
def ask(self, *args, **kwargs):
self.logged_asks.append((args, kwargs))
ques = args[0].split(';')
ques = [q.strip(' ?') for q in ques if q.strip().endswith('?')]
answers = [self.answers[q] if q in self.answers else self.default_answer
for q in ques]
return ';'.join(answers)
def query(self, *args, **kwargs):
self.logged_asks.append((args, kwargs))
ques = args[0].split(';')
ques = [q.strip(' ?') for q in ques if q.strip().endswith('?')]
answers = [self.answers[q] if q in self.answers else self.default_answer
for q in ques]
return ';'.join(answers)
dummy_pyvisa.resources.MessageBasedResource = dummy_pyvisa.resources.messagebased.MessageBasedResource
class dummy_teawg(dummy_package):
# WX2184 Properties
_wx2184_properties = {
'model_name': 'WX2184', # the model name
'fw_ver': 0.0, # the firmware version
'serial_num': '0' * 9, # serial number
'num_parts': 2, # number of instrument parts
'chan_per_part': 2, # number of channels per part
'seg_quantum': 16, # segment-length quantum
'min_seg_len': 192, # minimal segment length
'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)
'min_dac_val': 0, # minimal DAC value
'max_dac_val': 2 ** 14 - 1, # maximal DAC value
'max_num_segs': 32E+3, # maximal number of segments
'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)
'min_seq_len': 3, # minimal sequencer-table length (# rows)
'max_num_seq': 1000, # maximal number of sequencer-table
'max_aseq_len': 48 * 1024 - 2, # maximal advanced-sequencer table length
'min_aseq_len': 3, # minimal advanced-sequencer table length
'min_sclk': 75e6, # minimal sampling-rate (samples/seconds)
'max_sclk': 2300e6, # maximal sampling-rate (samples/seconds)
'digital_support': False, # is digital-wave supported?
}
# WX1284 Definitions
_wx1284_properties = {
'model_name': 'WX1284', # the model name
'fw_ver': 0.0, # the firmware version
'serial_num': '0' * 9, # serial number
'num_parts': 2, # number of instrument parts
'chan_per_part': 2, # number of channels per part
'seg_quantum': 16, # segment-length quantum
'min_seg_len': 192, # minimal segment length
'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)
'min_dac_val': 0, # minimal DAC value
'max_dac_val': 2 ** 14 - 1, # maximal DAC value
'max_num_segs': 32E+3, # maximal number of segments
'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)
'min_seq_len': 3, # minimal sequencer-table length (# rows)
'max_num_seq': 1000, # maximal number of sequencer-table
'max_aseq_len': 48 * 1024 - 2, # maximal advanced-sequencer table length
'min_aseq_len': 3, # minimal advanced-sequencer table length
'min_sclk': 75e6, # minimal sampling-rate (samples/seconds)
'max_sclk': 1250e6, # maximal sampling-rate (samples/seconds)
'digital_support': False, # is digital-wave supported?
}
# WX2182C Definitions
_wx2182C_properties = {
'model_name': 'WX2182C', # the model name
'fw_ver': 0.0, # the firmware version
'serial_num': '0' * 9, # serial number
'num_parts': 2, # number of instrument parts
'chan_per_part': 1, # number of channels per part
'seg_quantum': 16, # segment-length quantum
'min_seg_len': 192, # minimal segment length
'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)
'min_dac_val': 0, # minimal DAC value
'max_dac_val': 2 ** 14 - 1, # maximal DAC value
'max_num_segs': 32E+3, # maximal number of segments
'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)
'min_seq_len': 3, # minimal sequencer-table length (# rows)
'max_num_seq': 1000, # maximal number of sequencer-table
'max_aseq_len': 1000, # maximal advanced-sequencer table length
'min_aseq_len': 3, # minimal advanced-sequencer table length
'min_sclk': 10e6, # minimal sampling-rate (samples/seconds)
'max_sclk': 2.3e9, # maximal sampling-rate (samples/seconds)
'digital_support': False, # is digital-wave supported?
}
# WX1282C Definitions
_wx1282C_properties = {
'model_name': 'WX1282C', # the model name
'fw_ver': 0.0, # the firmware version
'serial_num': '0' * 9, # serial number
'num_parts': 2, # number of instrument parts
'chan_per_part': 1, # number of channels per part
'seg_quantum': 16, # segment-length quantum
'min_seg_len': 192, # minimal segment length
'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)
'min_dac_val': 0, # minimal DAC value
'max_dac_val': 2 ** 14 - 1, # maximal DAC value
'max_num_segs': 32E+3, # maximal number of segments
'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)
'min_seq_len': 3, # minimal sequencer-table length (# rows)
'max_num_seq': 1000, # maximal number of sequencer-table
'max_aseq_len': 1000, # maximal advanced-sequencer table length
'min_aseq_len': 3, # minimal advanced-sequencer table length
'min_sclk': 10e6, # minimal sampling-rate (samples/seconds)
'max_sclk': 1.25e9, # maximal sampling-rate (samples/seconds)
'digital_support': False, # is digital-wave supported?
}
# dictionary of supported-models' properties
model_properties_dict = {
'WX2184': _wx2184_properties,
'WX2184C': _wx2184_properties,
'WX1284': _wx2184_properties,
'WX1284C': _wx2184_properties,
'WX2182C': _wx2182C_properties,
'WX1282C': _wx1282C_properties,
}
class TEWXAwg:
_make_combined_wave_calls = []
def __init__(self, *args, paranoia_level=1, model='WX2184C', **kwargs):
self.logged_commands = []
self.logged_queries = []
self._visa_inst = dummy_pyvisa.resources.MessageBasedResource()
self.paranoia_level = paranoia_level
self.dev_properties = dummy_teawg.model_properties_dict[model]
self._download_segment_lengths_calls = []
self._send_binary_data_calls = []
self._download_adv_seq_table_calls = []
self._download_sequencer_table_calls = []
@property
def is_simulator(self):
return False
@property
def visa_inst(self):
return self._visa_inst
def send_cmd(self, *args, **kwargs):
self.logged_commands.append((args, kwargs))
def send_query(self, *args, **kwargs):
return self._visa_inst.ask(*args, **kwargs)
def download_segment_lengths(self, seg_len_list, pref='dummy_pref', paranoia_level='dummy_paranoia'):
self._download_segment_lengths_calls.append((seg_len_list, pref, paranoia_level))
def send_binary_data(self, pref, bin_dat, paranoia_level='dummy_paranoia'):
self._send_binary_data_calls.append((pref, bin_dat, paranoia_level))
def download_adv_seq_table(self, advanced_sequencer_table, pref=':ASEQ:DATA', paranoia_level=None):
self._download_adv_seq_table_calls.append((advanced_sequencer_table, pref, paranoia_level))
def download_sequencer_table(self, *args, **kwargs):
self._download_sequencer_table_calls.append((args, kwargs))
@staticmethod
def make_combined_wave(wav1, wav2, dest_array, dest_array_offset=0, add_idle_pts=False, quantum=16):
dummy_teawg.TEWXAwg._make_combined_wave_calls.append((wav1, wav2, dest_array, dest_array_offset, add_idle_pts, quantum))
class dummy_atsaverage(dummy_package):
class atsaverage(dummy_package):
pass
class alazar(dummy_package):
pass
class core(dummy_package):
class AlazarCard:
model = 'DUMMY'
minimum_record_size = 256
def __init__(self):
self._startAcquisition_calls = []
self._applyConfiguration_calls = []
def startAcquisition(self, x: int):
self._startAcquisition_calls.append(x)
def applyConfiguration(self, config):
self._applyConfiguration_calls.append(config)
class config(dummy_package):
class CaptureClockConfig:
def numeric_sample_rate(self, card):
return 10**8
class ScanlineConfiguration:
def __init__(self):
self._apply_calls = []
def apply(self, card, print_debug_output):
self._apply_calls.append((card, print_debug_output))
aimedBufferSize = unittest.mock.PropertyMock(return_value=2**22)
ScanlineConfiguration.captureClockConfiguration = CaptureClockConfig()
class operations(dummy_package):
class OperationDefinition:
pass
class masks(dummy_package):
class Mask:
pass
class CrossBufferMask:
pass
def import_package(name, package=None) -> Set[dummy_package]:
if package is None:
package_dict = dict(atsaverage=dummy_atsaverage,
pyvisa=dummy_pyvisa,
pytabor=dummy_pytabor,
teawg=dummy_teawg)
if name in package_dict:
package = package_dict[name]
else:
raise KeyError('Unknown package', name)
imported = set()
sys.modules[name] = package
imported.add(package)
for attr in dir(package):
if isinstance(getattr(package, attr), type) and issubclass(getattr(package, attr), dummy_package):
imported |= import_package(name + '.' + attr, getattr(package, attr))
return imported
def replace_missing():
failed_imports = set()
try:
import pytabor
except ImportError:
failed_imports |= import_package('pytabor', dummy_pytabor)
try:
import pyvisa
except ImportError:
failed_imports |= import_package('pyvisa', dummy_pyvisa)
try:
import teawg
except ImportError:
failed_imports |= import_package('teawg', dummy_teawg)
try:
import atsaverage
import atsaverage.config
except ImportError:
failed_imports |= import_package('atsaverage', dummy_atsaverage)
return failed_imports
| 43.42963 | 132 | 0.621098 |
8461b416e3ec2b1bc20418606020140b44eeba95 | 2,031 | py | Python | test/azure/Expected/AcceptanceTests/AzureResource/azureresource/models/flattened_product_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureResource/azureresource/models/flattened_product_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureResource/azureresource/models/flattened_product_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class FlattenedProduct(Resource):
"""FlattenedProduct.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar type: Resource Type
:vartype type: str
:param tags:
:type tags: dict[str, str]
:param location: Resource Location
:type location: str
:ivar name: Resource Name
:vartype name: str
:param pname:
:type pname: str
:param lsize:
:type lsize: int
:param provisioning_state:
:type provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pname': {'key': 'properties.pname', 'type': 'str'},
'lsize': {'key': 'properties.lsize', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, *, tags=None, location: str=None, pname: str=None, lsize: int=None, provisioning_state: str=None, **kwargs) -> None:
super(FlattenedProduct, self).__init__(tags=tags, location=location, **kwargs)
self.pname = pname
self.lsize = lsize
self.provisioning_state = provisioning_state
| 33.295082 | 139 | 0.568685 |
78c8e8eefe210e192eee3bd1a335e5904788560e | 9,110 | py | Python | fidesctl/src/fidesctl/cli/cli.py | Tannahooks/fides | d4e1b3bdf3490a31ae5a7876b65ce527884d25d8 | [
"Apache-2.0"
] | null | null | null | fidesctl/src/fidesctl/cli/cli.py | Tannahooks/fides | d4e1b3bdf3490a31ae5a7876b65ce527884d25d8 | [
"Apache-2.0"
] | 7 | 2022-01-06T21:44:11.000Z | 2022-02-01T23:43:59.000Z | fidesctl/src/fidesctl/cli/cli.py | Tannahooks/fides | d4e1b3bdf3490a31ae5a7876b65ce527884d25d8 | [
"Apache-2.0"
] | 2 | 2022-01-11T21:14:50.000Z | 2022-01-12T17:43:01.000Z | """Contains all of the CLI commands for Fides."""
import click
import requests
from fidesctl.cli.options import (
dry_flag,
fides_key_argument,
manifests_dir_argument,
resource_type_argument,
yes_flag,
verbose_flag,
)
from fidesctl.cli.utils import (
handle_cli_response,
pretty_echo,
)
from fidesctl.core import (
api as _api,
apply as _apply,
evaluate as _evaluate,
generate_dataset as _generate_dataset,
parse as _parse,
)
from fidesctl.core.utils import echo_green, echo_red
@click.command()
@click.pass_context
@dry_flag
@click.option(
"--diff",
is_flag=True,
help="Print the diff between the server's old and new states in Python DeepDiff format",
)
@manifests_dir_argument
def apply(ctx: click.Context, dry: bool, diff: bool, manifests_dir: str) -> None:
"""
Validates local manifest files and then sends them to the server to be persisted.
"""
config = ctx.obj["CONFIG"]
taxonomy = _parse.parse(manifests_dir)
_apply.apply(
url=config.cli.server_url,
taxonomy=taxonomy,
headers=config.user.request_headers,
dry=dry,
diff=diff,
)
@click.command()
@click.pass_context
@resource_type_argument
@fides_key_argument
def delete(ctx: click.Context, resource_type: str, fides_key: str) -> None:
"""
Delete a resource on the server.
"""
config = ctx.obj["CONFIG"]
handle_cli_response(
_api.delete(
url=config.cli.server_url,
resource_type=resource_type,
resource_id=fides_key,
headers=config.user.request_headers,
)
)
@click.command()
@click.pass_context
@manifests_dir_argument
@click.option(
"-k",
"--fides-key",
default="",
help="The fides_key of the single policy that you wish to evaluate.",
)
@click.option(
"-m",
"--message",
help="A message that you can supply to describe the context of this evaluation.",
)
@dry_flag
def evaluate(
ctx: click.Context,
manifests_dir: str,
fides_key: str,
message: str,
dry: bool,
) -> None:
"""
Compare your System's Privacy Declarations with your Organization's Policy Rules.
All local resources are applied to the server before evaluation.
If your policy evaluation fails, it is expected that you will need to
either adjust your Privacy Declarations, Datasets, or Policies before trying again.
"""
config = ctx.obj["CONFIG"]
if config.cli.local_mode:
dry = True
else:
taxonomy = _parse.parse(manifests_dir)
_apply.apply(
url=config.cli.server_url,
taxonomy=taxonomy,
headers=config.user.request_headers,
dry=dry,
)
_evaluate.evaluate(
url=config.cli.server_url,
headers=config.user.request_headers,
manifests_dir=manifests_dir,
policy_fides_key=fides_key,
message=message,
local=config.cli.local_mode,
dry=dry,
)
@click.command()
@click.pass_context
@click.argument("connection_string", type=str)
@click.argument("output_filename", type=str)
def generate_dataset(
ctx: click.Context, connection_string: str, output_filename: str
) -> None:
"""
Connect to a database directly via a SQLAlchemy-stlye connection string and
generate a dataset manifest file that consists of every schema/table/field.
This is a one-time operation that does not track the state of the database.
It will need to be run again if the database schema changes.
"""
_generate_dataset.generate_dataset(connection_string, output_filename)
@click.command()
@click.pass_context
@click.argument("source_type", type=click.Choice(["database"]))
@click.argument("connection_string", type=str)
@click.option("-m", "--manifest-dir", type=str, default="")
@click.option("-c", "--coverage-threshold", type=click.IntRange(0, 100), default=100)
def scan(
ctx: click.Context,
source_type: str,
connection_string: str,
manifest_dir: str,
coverage_threshold: int,
) -> None:
"""
Connect to a database directly via a SQLAlchemy-stlye connection string and
compare the database objects to existing datasets.
If there are fields within the database that aren't listed and categorized
within one of the datasets, this counts as lacking coverage.
Outputs missing fields and has a non-zero exit if coverage is
under the stated threshold.
"""
config = ctx.obj["CONFIG"]
_generate_dataset.database_coverage(
connection_string=connection_string,
manifest_dir=manifest_dir,
coverage_threshold=coverage_threshold,
url=config.cli.server_url,
headers=config.user.request_headers,
)
@click.command()
@click.pass_context
@click.argument("input_filename", type=str)
@click.option(
"-a",
"--all-members",
is_flag=True,
help="Annotate all dataset members, not just fields",
)
@click.option(
"-v",
"--validate",
is_flag=True,
default=False,
help="Strictly validate annotation inputs.",
)
def annotate_dataset(
ctx: click.Context, input_filename: str, all_members: bool, validate: bool
) -> None:
"""
Guided flow for annotating datasets. The dataset file will be edited in-place.
"""
try:
from fidesctl.core import annotate_dataset as _annotate_dataset
except ModuleNotFoundError:
echo_red('Packages not found, try: pip install "fidesctl[webserver]"')
raise SystemExit
_annotate_dataset.annotate_dataset(
input_filename, annotate_all=all_members, validate=validate
)
@click.command()
@click.pass_context
@resource_type_argument
@fides_key_argument
def get(ctx: click.Context, resource_type: str, fides_key: str) -> None:
"""
View a resource from the server as a JSON object.
"""
config = ctx.obj["CONFIG"]
handle_cli_response(
_api.get(
url=config.cli.server_url,
resource_type=resource_type,
resource_id=fides_key,
headers=config.user.request_headers,
)
)
@click.command()
@click.pass_context
def init_db(ctx: click.Context) -> None:
"""
Initialize the Fidesctl database.
"""
config = ctx.obj["CONFIG"]
handle_cli_response(_api.db_action(config.cli.server_url, "init"))
@click.command()
@click.pass_context
@resource_type_argument
def ls(ctx: click.Context, resource_type: str) -> None: # pylint: disable=invalid-name
"""
Get a list of all resources of this type from the server and display them as JSON.
"""
config = ctx.obj["CONFIG"]
handle_cli_response(
_api.ls(
url=config.cli.server_url,
resource_type=resource_type,
headers=config.user.request_headers,
)
)
@click.command()
@click.pass_context
@manifests_dir_argument
@verbose_flag
def parse(ctx: click.Context, manifests_dir: str, verbose: bool = False) -> None:
"""
Reads the resource files that are stored in MANIFESTS_DIR and its subdirectories to verify
the validity of all manifest files.
If the taxonomy is invalid, this command prints the error messages and triggers a non-zero exit code.
"""
taxonomy = _parse.parse(manifests_dir)
if verbose:
pretty_echo(taxonomy.dict(), color="green")
@click.command()
@click.pass_context
def ping(ctx: click.Context, config_path: str = "") -> None:
"""
Sends a request to the Fidesctl API healthcheck endpoint and prints the response.
"""
config = ctx.obj["CONFIG"]
healthcheck_url = config.cli.server_url + "/health"
echo_green(f"Pinging {healthcheck_url}...")
try:
handle_cli_response(_api.ping(healthcheck_url))
except requests.exceptions.ConnectionError:
echo_red("Connection failed, webserver is unreachable.")
@click.command()
@click.pass_context
@yes_flag
def reset_db(ctx: click.Context, yes: bool) -> None:
"""
Wipes all user-created data and resets the database back to its freshly initialized state.
"""
config = ctx.obj["CONFIG"]
if yes:
are_you_sure = "y"
else:
echo_red(
"This will drop all data from the Fides database and reload the default taxonomy!"
)
are_you_sure = input("Are you sure [y/n]? ")
if are_you_sure.lower() == "y":
handle_cli_response(_api.db_action(config.cli.server_url, "reset"))
else:
print("Aborting!")
@click.command()
@click.pass_context
def view_config(ctx: click.Context) -> None:
"""
Prints the current fidesctl configuration values.
"""
config = ctx.obj["CONFIG"]
pretty_echo(config.dict(), color="green")
@click.command()
@click.pass_context
def webserver(ctx: click.Context) -> None:
"""
Starts the fidesctl API server using Uvicorn on port 8080.
"""
try:
from fidesapi.main import start_webserver
except ModuleNotFoundError:
echo_red('Packages not found, try: pip install "fidesctl[webserver]"')
raise SystemExit
start_webserver()
| 27.522659 | 105 | 0.678156 |
44f6f088a7fc4d22b3732126985ea57ff8bf8562 | 1,834 | py | Python | module1/s2_sparql_neloial.py | neloial/tac | b4d92629c293a15016fed0ce80a7dfde4bf68b19 | [
"MIT"
] | null | null | null | module1/s2_sparql_neloial.py | neloial/tac | b4d92629c293a15016fed0ce80a7dfde4bf68b19 | [
"MIT"
] | null | null | null | module1/s2_sparql_neloial.py | neloial/tac | b4d92629c293a15016fed0ce80a7dfde4bf68b19 | [
"MIT"
] | null | null | null | """Query Wikidata for Belgian cities and towns"""
import argparse
from SPARQLWrapper import SPARQLWrapper, JSON
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filter', type=str, help='Filtering on name')
parser.add_argument('-n', '--number', type=int, help='Number of rows to display')
def get_rows():
"""Retrieve results from SPARQL"""
endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
sparql = SPARQLWrapper(endpoint)
statement = """
#Population of cities and towns in Belgium
SELECT DISTINCT ?city ?cityLabel ?population WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
VALUES ?town_or_city {
wd:Q3957
wd:Q515
}
?city (wdt:P31/(wdt:P279*)) ?town_or_city;
wdt:P17 wd:Q31.
OPTIONAL { ?city wdt:P1082 ?population. }
}
ORDER BY ?cityLabel
"""
sparql.setQuery(statement)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
rows = results['results']['bindings']
print(f"\n{len(rows)} Belgian cities and towns found\n")
return rows
def show(rows, name_filter=None, n=20):
"""Display n towns or cities in Belgium (default=20)"""
if name_filter:
rows = [row for row in rows if name_filter.lower() in row['cityLabel']['value'].lower()]
print(f"Displaying the first {n}:\n")
for row in rows[:n]:
try:
pop = row['population']['value']
except KeyError:
pop = "????"
print(f"{row['cityLabel']['value']} ({pop})")
if __name__ == "__main__":
args = parser.parse_args()
my_rows = get_rows()
my_filter = args.filter if args.filter else None
number = args.number if args.number else 20
show(my_rows, my_filter, number)
| 31.62069 | 96 | 0.633588 |
9fff4904b7b636efc637d202816c5ef41995015a | 7,956 | py | Python | pysnmp/HM2-PLATFORM-MMRP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/HM2-PLATFORM-MMRP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/HM2-PLATFORM-MMRP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HM2-PLATFORM-MMRP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HM2-PLATFORM-MMRP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:19:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
hm2AgentDot1qMrpMxrp, = mibBuilder.importSymbols("HM2-PLATFORM-MRP-MIB", "hm2AgentDot1qMrpMxrp")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Integer32, iso, Counter32, Bits, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ObjectIdentity, TimeTicks, Unsigned32, MibIdentifier, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Integer32", "iso", "Counter32", "Bits", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ObjectIdentity", "TimeTicks", "Unsigned32", "MibIdentifier", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
hm2PlatformMMRP = ModuleIdentity((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1))
hm2PlatformMMRP.setRevisions(('2013-04-10 00:00',))
if mibBuilder.loadTexts: hm2PlatformMMRP.setLastUpdated('201304100000Z')
if mibBuilder.loadTexts: hm2PlatformMMRP.setOrganization('Hirschmann Automation and Control GmbH')
hm2AgentDot1qMmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1))
hm2AgentDot1qMrpMmrpStats = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2))
hm2AgentDot1qPortMmrpTable = MibTable((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 1), )
if mibBuilder.loadTexts: hm2AgentDot1qPortMmrpTable.setStatus('current')
hm2AgentDot1qPortMmrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 1, 1), ).setIndexNames((0, "HM2-PLATFORM-MMRP-MIB", "hm2AgentDot1qMmrpPort"))
if mibBuilder.loadTexts: hm2AgentDot1qPortMmrpEntry.setStatus('current')
hm2AgentDot1qMmrpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: hm2AgentDot1qMmrpPort.setStatus('current')
hm2AgentDot1qPortMmrpMode = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 1, 1, 2), EnabledStatus().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2AgentDot1qPortMmrpMode.setStatus('current')
hm2AgentDot1qBridgeMmrpMode = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 2), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2AgentDot1qBridgeMmrpMode.setStatus('current')
hm2AgentDot1qBridgeMrpPeriodicStateMachineForMmrp = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 1, 3), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2AgentDot1qBridgeMrpPeriodicStateMachineForMmrp.setStatus('current')
hm2AgentDot1qMrpMmrpPktTx = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPktTx.setStatus('current')
hm2AgentDot1qMrpMmrpPktRx = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPktRx.setStatus('current')
hm2AgentDot1qMrpMmrpPktRxBadHeader = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPktRxBadHeader.setStatus('current')
hm2AgentDot1qMrpMmrpPktRxBadFormat = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPktRxBadFormat.setStatus('current')
hm2AgentDot1qMrpMmrpPktTxFailure = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPktTxFailure.setStatus('current')
hm2AgentDot1qMrpMmrpStatsTable = MibTable((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6), )
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpStatsTable.setStatus('current')
hm2AgentDot1qMrpMmrpStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1), ).setIndexNames((0, "HM2-PLATFORM-MMRP-MIB", "hm2AgentDot1qMrpMmrpIntf"))
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpStatsEntry.setStatus('current')
hm2AgentDot1qMrpMmrpIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpIntf.setStatus('current')
hm2AgentDot1qMrpMmrpPortPktTx = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPortPktTx.setStatus('current')
hm2AgentDot1qMrpMmrpPortPktRx = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPortPktRx.setStatus('current')
hm2AgentDot1qMrpMmrpPortPktRxBadHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPortPktRxBadHeader.setStatus('current')
hm2AgentDot1qMrpMmrpPortPktRxBadFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPortPktRxBadFormat.setStatus('current')
hm2AgentDot1qMrpMmrpPortPktTxFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpPortPktTxFailure.setStatus('current')
hm2AgentDot1qMrpMmrpDynamicAddrCount = MibScalar((1, 3, 6, 1, 4, 1, 248, 12, 60, 2, 1, 2, 248), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2AgentDot1qMrpMmrpDynamicAddrCount.setStatus('current')
mibBuilder.exportSymbols("HM2-PLATFORM-MMRP-MIB", PYSNMP_MODULE_ID=hm2PlatformMMRP, hm2AgentDot1qMrpMmrpPktTxFailure=hm2AgentDot1qMrpMmrpPktTxFailure, hm2AgentDot1qMrpMmrpPortPktRx=hm2AgentDot1qMrpMmrpPortPktRx, hm2AgentDot1qMrpMmrpStatsEntry=hm2AgentDot1qMrpMmrpStatsEntry, hm2AgentDot1qBridgeMmrpMode=hm2AgentDot1qBridgeMmrpMode, hm2AgentDot1qMrpMmrpStats=hm2AgentDot1qMrpMmrpStats, hm2AgentDot1qMmrp=hm2AgentDot1qMmrp, hm2AgentDot1qBridgeMrpPeriodicStateMachineForMmrp=hm2AgentDot1qBridgeMrpPeriodicStateMachineForMmrp, hm2AgentDot1qMrpMmrpPortPktTx=hm2AgentDot1qMrpMmrpPortPktTx, hm2AgentDot1qMrpMmrpPktRxBadFormat=hm2AgentDot1qMrpMmrpPktRxBadFormat, hm2AgentDot1qMrpMmrpIntf=hm2AgentDot1qMrpMmrpIntf, hm2AgentDot1qMrpMmrpPortPktRxBadFormat=hm2AgentDot1qMrpMmrpPortPktRxBadFormat, hm2AgentDot1qMrpMmrpPortPktRxBadHeader=hm2AgentDot1qMrpMmrpPortPktRxBadHeader, hm2PlatformMMRP=hm2PlatformMMRP, hm2AgentDot1qMrpMmrpDynamicAddrCount=hm2AgentDot1qMrpMmrpDynamicAddrCount, hm2AgentDot1qMrpMmrpPortPktTxFailure=hm2AgentDot1qMrpMmrpPortPktTxFailure, hm2AgentDot1qMrpMmrpStatsTable=hm2AgentDot1qMrpMmrpStatsTable, hm2AgentDot1qPortMmrpMode=hm2AgentDot1qPortMmrpMode, hm2AgentDot1qMrpMmrpPktRx=hm2AgentDot1qMrpMmrpPktRx, hm2AgentDot1qPortMmrpEntry=hm2AgentDot1qPortMmrpEntry, hm2AgentDot1qMmrpPort=hm2AgentDot1qMmrpPort, hm2AgentDot1qPortMmrpTable=hm2AgentDot1qPortMmrpTable, hm2AgentDot1qMrpMmrpPktTx=hm2AgentDot1qMrpMmrpPktTx, hm2AgentDot1qMrpMmrpPktRxBadHeader=hm2AgentDot1qMrpMmrpPktRxBadHeader)
| 126.285714 | 1,494 | 0.793741 |
341f7eafc13bf36a748d9e9fa75651c5775c3fbd | 368 | py | Python | malcolm/modules/ca/parts/calongpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/ca/parts/calongpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/ca/parts/calongpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | from malcolm.modules.builtin.vmetas import NumberMeta
from .capart import CAPart
class CALongPart(CAPart):
"""Defines an int32 `Attribute` that talks to a DBR_LONG longout PV"""
def create_meta(self, description, tags):
return NumberMeta("int32", description=description, tags=tags)
def get_datatype(self):
return self.catools.DBR_LONG
| 28.307692 | 74 | 0.733696 |
e0ae23b149bca6082c783ed2ec0e8742e7b3e857 | 796 | py | Python | checkov/terraform/checks/resource/azure/RedisCacheMinTLSVersion.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 1 | 2022-02-20T21:20:39.000Z | 2022-02-20T21:20:39.000Z | checkov/terraform/checks/resource/azure/RedisCacheMinTLSVersion.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/azure/RedisCacheMinTLSVersion.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class RedisCacheMinTLSVersion(BaseResourceValueCheck):
def __init__(self):
name = "Ensure Redis Cache is using the latest version of TLS encryption"
id = "CKV_AZURE_148"
supported_resources = ['azurerm_redis_cache']
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
missing_block_result=CheckResult.FAILED)
def get_inspected_key(self):
return "minimum_tls_version"
def get_expected_value(self):
return '1.2'
check = RedisCacheMinTLSVersion()
| 36.181818 | 106 | 0.738693 |
92408d34f2d652b6200b47f1d840e3b47254ce41 | 10,520 | py | Python | Lib/site-packages/qutebrowser/mainwindow/statusbar/command.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/qutebrowser/mainwindow/statusbar/command.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/qutebrowser/mainwindow/statusbar/command.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""The commandline in the statusbar."""
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QSizePolicy, QWidget
from qutebrowser.keyinput import modeman, modeparsers
from qutebrowser.api import cmdutils
from qutebrowser.misc import cmdhistory, editor
from qutebrowser.misc import miscwidgets as misc
from qutebrowser.utils import usertypes, log, objreg, message, utils
from qutebrowser.config import config
class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):
"""The commandline part of the statusbar.
Attributes:
_win_id: The window ID this widget is associated with.
Signals:
got_cmd: Emitted when a command is triggered by the user.
arg: The command string and also potentially the count.
got_search: Emitted when a search should happen.
clear_completion_selection: Emitted before the completion widget is
hidden.
hide_completion: Emitted when the completion widget should be hidden.
update_completion: Emitted when the completion should be shown/updated.
show_cmd: Emitted when command input should be shown.
hide_cmd: Emitted when command input can be hidden.
"""
got_cmd = pyqtSignal([str], [str, int])
got_search = pyqtSignal(str, bool) # text, reverse
clear_completion_selection = pyqtSignal()
hide_completion = pyqtSignal()
update_completion = pyqtSignal()
show_cmd = pyqtSignal()
hide_cmd = pyqtSignal()
def __init__(self, *, win_id: int,
private: bool,
parent: QWidget = None) -> None:
misc.CommandLineEdit.__init__(self, parent=parent)
misc.MinimalLineEditMixin.__init__(self)
self._win_id = win_id
if not private:
command_history = objreg.get('command-history')
self.history.history = command_history.data
self.history.changed.connect(command_history.changed)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)
self.cursorPositionChanged.connect(self.update_completion)
self.textChanged.connect(self.update_completion)
self.textChanged.connect(self.updateGeometry)
self.textChanged.connect(self._incremental_search)
def _handle_search(self) -> bool:
"""Check if the currently entered text is a search, and if so, run it.
Return:
True if a search was executed, False otherwise.
"""
if self.prefix() == '/':
self.got_search.emit(self.text()[1:], False)
return True
elif self.prefix() == '?':
self.got_search.emit(self.text()[1:], True)
return True
else:
return False
def prefix(self) -> str:
"""Get the currently entered command prefix."""
text = self.text()
if not text:
return ''
elif text[0] in modeparsers.STARTCHARS:
return text[0]
else:
return ''
def set_cmd_text(self, text: str) -> None:
"""Preset the statusbar to some text.
Args:
text: The text to set as string.
"""
self.setText(text)
log.modes.debug("Setting command text, focusing {!r}".format(self))
modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')
self.setFocus()
self.show_cmd.emit()
@cmdutils.register(instance='status-command', name='set-cmd-text',
scope='window', maxsplit=0)
@cmdutils.argument('count', value=cmdutils.Value.count)
def set_cmd_text_command(self, text: str,
count: int = None,
space: bool = False,
append: bool = False,
run_on_count: bool = False) -> None:
"""Preset the statusbar to some text.
//
Wrapper for set_cmd_text to check the arguments and allow multiple
strings which will get joined.
Args:
text: The commandline to set.
count: The count if given.
space: If given, a space is added to the end.
append: If given, the text is appended to the current text.
run_on_count: If given with a count, the command is run with the
given count rather than setting the command text.
"""
if space:
text += ' '
if append:
if not self.text():
raise cmdutils.CommandError("No current text!")
text = self.text() + text
if not text or text[0] not in modeparsers.STARTCHARS:
raise cmdutils.CommandError(
"Invalid command text '{}'.".format(text))
if run_on_count and count is not None:
self.got_cmd[str, int].emit(text, count)
else:
self.set_cmd_text(text)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_history_prev(self) -> None:
"""Go back in the commandline history."""
try:
if not self.history.is_browsing():
item = self.history.start(self.text().strip())
else:
item = self.history.previtem()
except (cmdhistory.HistoryEmptyError,
cmdhistory.HistoryEndReachedError):
return
if item:
self.set_cmd_text(item)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_history_next(self) -> None:
"""Go forward in the commandline history."""
if not self.history.is_browsing():
return
try:
item = self.history.nextitem()
except cmdhistory.HistoryEndReachedError:
return
if item:
self.set_cmd_text(item)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_accept(self, rapid: bool = False) -> None:
"""Execute the command currently in the commandline.
Args:
rapid: Run the command without closing or clearing the command bar.
"""
was_search = self._handle_search()
text = self.text()
if not (self.prefix() == ':' and text[1:].startswith(' ')):
self.history.append(text)
if not rapid:
modeman.leave(self._win_id, usertypes.KeyMode.command,
'cmd accept')
if not was_search:
self.got_cmd[str].emit(text[1:])
@cmdutils.register(instance='status-command', scope='window')
def edit_command(self, run: bool = False) -> None:
"""Open an editor to modify the current command.
Args:
run: Run the command if the editor exits successfully.
"""
ed = editor.ExternalEditor(parent=self)
def callback(text: str) -> None:
"""Set the commandline to the edited text."""
if not text or text[0] not in modeparsers.STARTCHARS:
message.error('command must start with one of {}'
.format(modeparsers.STARTCHARS))
return
self.set_cmd_text(text)
if run:
self.command_accept()
ed.file_updated.connect(callback)
ed.edit(self.text())
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode: usertypes.KeyMode) -> None:
"""Clear up when command mode was left.
- Clear the statusbar text if it's explicitly unfocused.
- Clear completion selection
- Hide completion
Args:
mode: The mode which was left.
"""
if mode == usertypes.KeyMode.command:
self.setText('')
self.history.stop()
self.hide_cmd.emit()
self.clear_completion_selection.emit()
self.hide_completion.emit()
def setText(self, text: str) -> None:
"""Extend setText to set prefix and make sure the prompt is ok."""
if not text:
pass
elif text[0] in modeparsers.STARTCHARS:
super().set_prompt(text[0])
else:
raise utils.Unreachable("setText got called with invalid text "
"'{}'!".format(text))
super().setText(text)
def keyPressEvent(self, e: QKeyEvent) -> None:
"""Override keyPressEvent to ignore Return key presses.
If this widget is focused, we are in passthrough key mode, and
Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished
without command_accept to be called.
"""
text = self.text()
if text in modeparsers.STARTCHARS and e.key() == Qt.Key_Backspace:
e.accept()
modeman.leave(self._win_id, usertypes.KeyMode.command,
'prefix deleted')
return
if e.key() == Qt.Key_Return:
e.ignore()
return
else:
super().keyPressEvent(e)
def sizeHint(self) -> QSize:
"""Dynamically calculate the needed size."""
height = super().sizeHint().height()
text = self.text()
if not text:
text = 'x'
width = self.fontMetrics().width(text)
return QSize(width, height)
@pyqtSlot()
def _incremental_search(self) -> None:
if not config.val.search.incremental:
return
self._handle_search()
| 36.527778 | 79 | 0.603612 |
9003999c457416899b2a041f1f30a74d2ef82358 | 2,155 | py | Python | src/signatures/glcm.py | imagexdsearch/imagesearch | 7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d | [
"BSD-2-Clause"
] | null | null | null | src/signatures/glcm.py | imagexdsearch/imagesearch | 7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d | [
"BSD-2-Clause"
] | null | null | null | src/signatures/glcm.py | imagexdsearch/imagesearch | 7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d | [
"BSD-2-Clause"
] | null | null | null | '''
Created on 4 de mar de 2016
Compute the GLCM features with or without mask
@author: romue
'''
import numpy as np
from skimage.feature import greycomatrix,greycoprops
def glcm(imagem,name,label,d,grayLevels_new, grayLevels_old):
if grayLevels_new != grayLevels_old:
imagem = categorizar(imagem,grayLevels_new,grayLevels_old)
matrix0 = greycomatrix(imagem, [d], [0], levels=2**grayLevels_new,normed=True)
matrix1 = greycomatrix(imagem, [d], [np.pi/4], levels=2**grayLevels_new,normed=True)
matrix2 = greycomatrix(imagem, [d], [np.pi/2], levels=2**grayLevels_new,normed=True)
matrix3 = greycomatrix(imagem, [d], [3*np.pi/4], levels=2**grayLevels_new,normed=True)
matrix = (matrix0+matrix1+matrix2+matrix3)/4 #isotropic glcm
props = np.zeros((6))
props[0] = greycoprops(matrix,'contrast')
props[1] = greycoprops(matrix,'dissimilarity')
props[2] = greycoprops(matrix,'homogeneity')
props[3] = greycoprops(matrix,'energy')
props[4] = greycoprops(matrix,'correlation')
props[5] = greycoprops(matrix,'ASM')
return props,name,label
#function to change the number of gray scale values
"""
def categorizar(imagem,nbits=8):
L,C = imagem.shape;
limites = np.arange(0,256,256/nbits)
for z in range(0,len(limites)-1):
aux = ((imagem >= limites[z]) & (imagem < limites[z+1]))
imagem[aux==True] = z
aux = (imagem >= limites[nbits-1])
imagem[aux==True] = nbits-1
return imagem
"""
def categorizar(image, new, old):
L,C = image.shape;
image = np.array(image,dtype = np.float64)
for i in range(L):
for j in range(C):
image[i,j] = (((2**new)-1)*image[i,j])/((2**old)-1)
image = np.array(image,dtype = np.int)
return image
def teste():
from skimage.io import imread_collection
import pp2
im = imread_collection('/Users/romuere/Desktop/als/kyager_data_raw/2011Jan28-BrentCarey/*')
for i in im:
img = pp2.preprocessing(i, '', 0)[0]
features = glcm(img, '', 0, 1, 8,8)[0]
#for f in features:
# print(f)
#teste()
| 34.758065 | 96 | 0.634803 |
35d0356f1cdabe73c660d1db99ee4a1b0963ad37 | 2,638 | py | Python | Proj/2048/demo.py | PiscesDream/Ideas | 9ba710e62472f183ae4525f35659cd265c71392e | [
"Apache-2.0"
] | null | null | null | Proj/2048/demo.py | PiscesDream/Ideas | 9ba710e62472f183ae4525f35659cd265c71392e | [
"Apache-2.0"
] | null | null | null | Proj/2048/demo.py | PiscesDream/Ideas | 9ba710e62472f183ae4525f35659cd265c71392e | [
"Apache-2.0"
] | null | null | null | from _2048 import _2048
from numpy.random import choice, randint
from numpy import argmax, all
def show(game): #visualize the chessboard
print game.get_point() #or just game.point
print game.get_board() #or just game.board
def judge_function(board):
pass
def greedy(board, u, d, l, r):
p = [u[1]+1, d[1]+1, l[1]+1, r[1]+1]
# p = map(lambda x: float(x) / sum(p), p)
if all(board == u[0]):
p[0] = 0
if all(board == d[0]):
p[1] = 0
if all(board == l[0]):
p[2] = 0
if all(board == r[0]):
p[3] = 0
# p = map(lambda x: float(x) / sum(p), p)
return argmax(p)
return choice(range(4), p = p)
if __name__ == '__main__':
game = _2048(length = 4) #create a length * length board
'''
while True:
show(game)
order = raw_input()
if order == 'reset':
game.reset() #reset the game
elif order == 'quit':
break
else:
game.move(order) #here we set {'u'(up):0, 'd'(down):1, 'l'(left):2, 'r'(right):3}
#if the move is valid, it will return the total point until now,
# otherwise it will return -1 when the move is invalid
# and -2 when game over
'''
'''
#===================the greedy strategy===================== #believe it or not, it once reachs 4400+
game.reset()
maximum = 0
while True:
t = []
for i in xrange(4):
t.append( game[i][1] ) #game[ind] return the prediction(board, delta_point) after the movement ind without execute it
t = map(lambda x: x+4, t) #avoid the situation that sum(t) == 0
t = map(lambda x: float(x)/sum(t), t)
if game.move(int(choice(range(4), p = t))) == -2:
if maximum < game.point:
maximum = game.point
game.reset()
print maximum
show(game)
'''
# game.mul_test(test_num = 100, f = lambda x: choice([0, 1, 2, 3], p = [0.4, 0.15, 0.05, 0.4]) )
game.mul_test(test_num = 100, f = greedy, addition_arg = True)
#-------------------------------------------------------------------------
#max round: 0713 | avr round: 270.79
#max point: 11836 | avr point: 3306.44
#max block: 1024 | avr block: 261.76
| 32.170732 | 138 | 0.444276 |
70504d513c93d92e0bfc570769212ad5fb6a335c | 4,566 | py | Python | qa/rpc-tests/test_framework/netutil.py | weiqitong/LMCOIN | 6feed8ba8267d79cd2d4a793ada08ad8de7cf57e | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/netutil.py | weiqitong/LMCOIN | 6feed8ba8267d79cd2d4a793ada08ad8de7cf57e | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/netutil.py | weiqitong/LMCOIN | 6feed8ba8267d79cd2d4a793ada08ad8de7cf57e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Lmcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| 32.614286 | 113 | 0.599869 |
9440b45aff2d1d2bdbe6c7eafe228a9613e1ce57 | 65 | py | Python | tests/asl_Map/workflow_map_state_delivery_test/checkAvailability.py | lhoste-bell/knix | 8d3f0ed3b8a09e62b7d5098b10851b9fa42d7ce1 | [
"Apache-2.0"
] | 167 | 2020-04-20T22:16:29.000Z | 2022-03-15T22:53:43.000Z | tests/asl_Map/workflow_map_state_delivery_test/checkAvailability.py | lhoste-bell/knix | 8d3f0ed3b8a09e62b7d5098b10851b9fa42d7ce1 | [
"Apache-2.0"
] | 98 | 2020-05-07T03:34:44.000Z | 2022-01-04T21:30:49.000Z | tests/asl_Map/workflow_map_state_delivery_test/checkAvailability.py | lhoste-bell/knix | 8d3f0ed3b8a09e62b7d5098b10851b9fa42d7ce1 | [
"Apache-2.0"
] | 20 | 2020-04-29T14:45:29.000Z | 2021-09-26T09:51:04.000Z | #!/usr/bin/python
def handle(event, context):
return event
| 10.833333 | 27 | 0.676923 |
11fb6b61af559048fcda7138eda2287d0ecd558d | 3,781 | py | Python | firebase_admin/instance_id.py | kushal12345/firebase-admin-python | 14e5dc4721f9908e132f137c87bf0dc6b8709f63 | [
"Apache-2.0"
] | 4 | 2019-02-17T17:52:55.000Z | 2020-05-06T06:45:56.000Z | firebase_admin/instance_id.py | kushal12345/firebase-admin-python | 14e5dc4721f9908e132f137c87bf0dc6b8709f63 | [
"Apache-2.0"
] | null | null | null | firebase_admin/instance_id.py | kushal12345/firebase-admin-python | 14e5dc4721f9908e132f137c87bf0dc6b8709f63 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase Instance ID module.
This module enables deleting instance IDs associated with Firebase projects.
"""
import requests
import six
from firebase_admin import _http_client
from firebase_admin import _utils
_IID_SERVICE_URL = 'https://console.firebase.google.com/v1/'
_IID_ATTRIBUTE = '_iid'
def _get_iid_service(app):
return _utils.get_app_service(app, _IID_ATTRIBUTE, _InstanceIdService)
def delete_instance_id(instance_id, app=None):
"""Deletes the specified instance ID from Firebase.
This can be used to delete an instance ID and associated user data from a Firebase project,
pursuant to the General Data Protection Regulation (GDPR).
Args:
instance_id: A non-empty instance ID string.
app: An App instance (optional).
Raises:
InstanceIdError: If an error occurs while invoking the backend instance ID service.
ValueError: If the specified instance ID or app is invalid.
"""
_get_iid_service(app).delete_instance_id(instance_id)
class ApiCallError(Exception):
"""Represents an Exception encountered while invoking the Firebase instance ID service."""
def __init__(self, message, error):
Exception.__init__(self, message)
self.detail = error
class _InstanceIdService(object):
"""Provides methods for interacting with the remote instance ID service."""
error_codes = {
400: 'Malformed instance ID argument.',
401: 'Request not authorized.',
403: 'Project does not match instance ID or the client does not have '
'sufficient privileges.',
404: 'Failed to find the instance ID.',
409: 'Already deleted.',
429: 'Request throttled out by the backend server.',
500: 'Internal server error.',
503: 'Backend servers are over capacity. Try again later.'
}
def __init__(self, app):
project_id = app.project_id
if not project_id:
raise ValueError(
'Project ID is required to access Instance ID service. Either set the projectId '
'option, or use service account credentials. Alternatively, set the '
'GOOGLE_CLOUD_PROJECT environment variable.')
self._project_id = project_id
self._client = _http_client.JsonHttpClient(
credential=app.credential.get_credential(), base_url=_IID_SERVICE_URL)
def delete_instance_id(self, instance_id):
if not isinstance(instance_id, six.string_types) or not instance_id:
raise ValueError('Instance ID must be a non-empty string.')
path = 'project/{0}/instanceId/{1}'.format(self._project_id, instance_id)
try:
self._client.request('delete', path)
except requests.exceptions.RequestException as error:
raise ApiCallError(self._extract_message(instance_id, error), error)
def _extract_message(self, instance_id, error):
if error.response is None:
return str(error)
status = error.response.status_code
msg = self.error_codes.get(status)
if msg:
return 'Instance ID "{0}": {1}'.format(instance_id, msg)
else:
return str(error)
| 36.355769 | 97 | 0.694525 |
8f1a34b3326ee64f975e7b3c3f170c4a407f1ec5 | 18,682 | py | Python | NSFW_photo_classification.py | i2cy/lewdity_CNN | f616a040c90069b0bd36501794abf1f6ab6ff55b | [
"MIT"
] | null | null | null | NSFW_photo_classification.py | i2cy/lewdity_CNN | f616a040c90069b0bd36501794abf1f6ab6ff55b | [
"MIT"
] | null | null | null | NSFW_photo_classification.py | i2cy/lewdity_CNN | f616a040c90069b0bd36501794abf1f6ab6ff55b | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: i2cy(i2cy@outlook.com)
# Filename: NSFW_classification
# Created on: 2020/8/28
import os, time, psutil
import random
import pathlib
import numpy as np
# *屏蔽tensorflow警告信息输出
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
# *RTX硬件兼容性修改配置
tf.config.experimental.set_memory_growth(
tf.config.list_physical_devices('GPU')[0], True)
DATASET_ROOT = "datasets/nsfw-image-classification"
TEST_RATE = 0.15
BATCH_SIZE = 16
EPOCHES = 5
BUFF_RATE = 0.1
LEARNING_RATE = 0.0001
MODEL_FILE = "Models/NSFW_photo_model.h5"
NAME = "NSFW_photo_detect"
class customNN:
def __init__(self, model_name="MLP"):
self.name = model_name
self.train_db = None
self.test_db = None
self.model = None
self.train_size = 0
self.test_size = 0
self.data_shape = []
self.batch_size = 8
self.train_history = None
self.tensorboard_enable = False
self.log_root = "./tensorflow_log"
self.callbacks = []
self.callback_file_writer = None
self.base_model = None
self.epoch = 0
self.model_file = "{}.h5".format(self.name)
self.autosave = False
self.output_counts = 0
def _get_freeRAM(self):
free_ram = psutil.virtual_memory().free
return free_ram
def _init_tensorboard(self):
log_dir = os.path.join(self.log_root,
time.strftime("%Y%m%d-%H:%M:%S_") +
self.name
)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir,
histogram_freq=1)
self.callbacks.append(tensorboard_callback)
self.callback_file_writer = tf.summary.create_file_writer(os.path.join(
log_dir, "train"))
self.callback_file_writer.set_as_default()
def load_dataset(self, trainset, testset=None,
mapFunc=None, testRate=0.15, batchSize=8,
shufflePercentage=0.3, mapFuncTest=None,
mapFuncLabel=None, mapFuncLabelTest=None): # dataset has to be formated tensors: (data, labels)
self.batch_size = batchSize
if testset == None:
# randomly split trainset and testset
datasets = [ele for ele in trainset]
train_size = len(datasets[0]) - int(len(datasets[0]) * testRate)
all_indexs = list(range(len(datasets[0])))
random.shuffle(all_indexs)
features = []
labels = []
if (type(datasets[1][0]) in (type([0]), type((0,)))) and len(datasets[1][0]) == len(all_indexs):
for i in enumerate(datasets[1]):
labels.append([])
self.output_counts += 1
for index in all_indexs[:train_size]:
data = datasets[0][index]
features.append(data)
for i, l in enumerate(datasets[1]):
label = datasets[1][i][index]
labels[i].append(label)
if type(labels[0]) == type([0]):
labels = tuple(labels)
else:
self.output_counts += 1
for index in all_indexs[:train_size]:
features.append(datasets[0][index])
labels.append(datasets[1][index])
trainset = (features, labels)
features = []
labels = []
if (type(datasets[1][0]) in (type([0]), type((0,)))) and len(datasets[1][0]) == len(all_indexs):
for i in enumerate(datasets[1]):
labels.append([])
for index in all_indexs[train_size:]:
data = datasets[0][index]
features.append(data)
for i, l in enumerate(datasets[1]):
label = datasets[1][i][index]
labels[i].append(label)
if type(labels[0]) == type([0]):
labels = tuple(labels)
else:
for index in all_indexs[train_size:]:
features.append(datasets[0][index])
labels.append(datasets[1][index])
testset = (features, labels)
self.data_shape = tf.constant(trainset[0][0]).shape
self.train_size = len(trainset[0])
self.test_size = len(testset[0])
print("trainset sample number: {}".format(str(self.train_size)))
print("testset sample number: {}".format(str(self.test_size)))
if mapFunc == None:
if mapFuncLabel == None:
train_db = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(trainset[0]),
tf.data.Dataset.from_tensor_slices(trainset[1])))
test_db = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(testset[0]),
tf.data.Dataset.from_tensor_slices(testset[1])))
else:
if mapFuncLabelTest == None:
mapFuncLabelTest = mapFuncLabel
train_db = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(trainset[0]), tf.data.Dataset.from_tensor_slices(
trainset[1]).map(mapFuncLabel, num_parallel_calls=tf.data.experimental.AUTOTUNE)))
test_db = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(testset[0]), tf.data.Dataset.from_tensor_slices(
testset[1]).map(mapFuncLabelTest, num_parallel_calls=tf.data.experimental.AUTOTUNE)))
else:
if mapFuncTest == None:
mapFuncTest = mapFunc
self.data_shape = mapFunc(trainset[0][0]).shape
train_db = tf.data.Dataset.from_tensor_slices(trainset[0])
train_db = train_db.map(mapFunc, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_db = tf.data.Dataset.from_tensor_slices(testset[0])
test_db = test_db.map(mapFuncTest)
if mapFuncLabel == None:
train_db = tf.data.Dataset.zip((
train_db, tf.data.Dataset.from_tensor_slices(trainset[1])))
test_db = tf.data.Dataset.zip((
test_db, tf.data.Dataset.from_tensor_slices(testset[1])))
else:
if mapFuncLabelTest == None:
mapFuncLabelTest = mapFuncLabel
train_db = tf.data.Dataset.zip((
train_db, tf.data.Dataset.from_tensor_slices(
trainset[1]).map(mapFuncLabel, num_parallel_calls=tf.data.experimental.AUTOTUNE)))
test_db = tf.data.Dataset.zip((
train_db, tf.data.Dataset.from_tensor_slices(
testset[1]).map(mapFuncLabelTest, num_parallel_calls=tf.data.experimental.AUTOTUNE)))
datasize = 1
for size in self.data_shape:
datasize *= size
freeRAM = int(self._get_freeRAM() * shufflePercentage)
print("free RAM size: {} MB".format(str(freeRAM // 1048576)))
shuffle_MaxbuffSize = int((freeRAM * 0.8) // datasize)
prefetch_buffSize = int((freeRAM * 0.2) // (datasize * self.batch_size))
print("automatically allocated data buffer size: {} MB".format(str(shuffle_MaxbuffSize * datasize // 1048576)))
shuffle_buffSize = shuffle_MaxbuffSize
if shuffle_MaxbuffSize > self.train_size:
shuffle_buffSize = self.train_size
train_db = train_db.shuffle(shuffle_buffSize).repeat().batch(self.batch_size).prefetch(prefetch_buffSize)
shuffle_buffSize = shuffle_MaxbuffSize
if shuffle_MaxbuffSize > self.test_size:
shuffle_buffSize = self.test_size
test_db = test_db.shuffle(shuffle_buffSize).repeat().batch(self.batch_size).prefetch(prefetch_buffSize)
self.train_db = train_db
self.test_db = test_db
def set_model_file(self, path):
self.model_file = path
def enable_tensorboard(self, log_dir_root="./tensorflow_log"):
self.log_root = log_dir_root
self.tensorboard_enable = True
def enable_checkpointAutosave(self, path=None):
if path != None:
self.model_file = path
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=self.model_file)
self.add_callback(checkpoint)
self.autosave = True
def add_callback(self, callback_func): # all callbacks added will be reset after training
self.callbacks.append(callback_func)
def init_model(self): # 神经网络模型
inputs = tf.keras.Input(shape=self.data_shape)
self.base_model = tf.keras.applications.Xception(input_shape=(256, 256, 3),
include_top=False,
weights="imagenet")
x = self.base_model(inputs)
x = tf.keras.layers.GlobalAveragePooling2D()(x) # 全局平均池化层
x = tf.keras.layers.Dense(2048, activation="relu")(x)
x = tf.keras.layers.Dense(2048, activation="relu")(x)
out = tf.keras.layers.Dense(3, activation="softmax",
name="out")(x)
model = tf.keras.Model(inputs=inputs,
outputs=out,
name=self.name)
self.model = model
self.compile_model()
def postProc_model(self): # 模型后期处理(微调)
model = self.model
fine_tune_at = -33
self.base_model.trainable = True
for layer in self.base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(optimizer="adam",
loss="binary_crossentropy", # 2分类问题
metrics=["acc"]
)
self.model = model
print(model.summary())
def compile_model(self):
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
loss="sparse_categorical_crossentropy",
metrics=["acc"]
)
def save_model(self, path=None):
if path != None:
self.model_file = path
self.model.save(self.model_file)
def load_model(self, path=None):
if path != None:
self.model_file = path
self.model = tf.keras.models.load_model(self.model_file, compile=True)
self.compile_model()
def train(self, epochs=100, verbose=1, validation=True):
if self.tensorboard_enable and self.epoch == 0:
self._init_tensorboard()
try:
if validation:
self.train_history = self.model.fit(self.train_db,
epochs=epochs,
initial_epoch=self.epoch,
steps_per_epoch=self.train_size // self.batch_size,
validation_data=self.test_db,
validation_steps=self.test_size // self.batch_size,
callbacks=self.callbacks,
verbose=verbose
)
else:
self.train_history = self.model.fit(self.train_db,
epochs=epochs,
initial_epoch=self.epoch,
steps_per_epoch=self.train_size // self.batch_size,
callbacks=self.callbacks,
verbose=verbose
)
self.epoch += epochs
except KeyboardInterrupt:
print("\ntraining process stopped manually")
if self.autosave:
self.load_model(self.model_file)
def evaluate(self):
print("evaluating model with test datasets...")
acc = self.model.evaluate(self.test_db, return_dict=True,
steps=self.test_size // self.batch_size)
return acc
def predict(self, data):
if len(data.shape) != len(self.data_shape) + 1:
data = tf.expand_dims(data, 0)
res = self.model.predict(data)
return res
def read_preprocess_image(img_path): # 定义数据集map函数
img = tf.io.read_file(img_path)
if tf.image.is_jpeg(img):
img = tf.image.decode_jpeg(img, channels=3)
else:
img = tf.image.decode_png(img, channels=3)
img = tf.image.random_flip_left_right(img)
img = tf.image.resize(img, [256, 256])
img = tf.cast(img, tf.float32)
img = img / 127.5 - 1 # 图像归一化,使得输入数据在(-1,1)区间范围内
return img
def read_preprocess_image_test(img_path):
img = tf.io.read_file(img_path)
if tf.image.is_jpeg(img):
img = tf.image.decode_jpeg(img, channels=3)
else:
img = tf.image.decode_png(img, channels=3)
if img.shape[0] == None:
img = tf.image.resize(img, [300, 300])
else:
if img.shape[0] <= 256 and img.shape[1] <= 256:
img = tf.image.resize(img, [300, 300])
else:
if img.shape[0] > img.shape[1]:
rate = 300 / img.shape[1]
img = tf.image.resize(img, [int(img.shape[0] * rate), 300])
else:
rate = 300 / img.shape[0]
img = tf.image.resize(img, [300, int(img.shape[1] * rate)])
img = tf.image.random_crop(img, [256, 256, 3])
img = tf.cast(img, tf.float32)
img = img / 127.5 - 1 # 图像归一化,使得输入数据在(-1,1)区间范围内
return img
class predictor:
def __init__(self, dnn):
self.dnn = dnn
self.labels_converter = []
self.pre_process_func = self._default_preprocess
def _default_preprocess(self, data):
return data
def load_labels(self, label_names): # label_names 必须为列表 [[标签列表1], [标签列表2]]
for label in label_names:
converter = dict((index, name)
for index, name in enumerate(label))
self.labels_converter.append(converter)
def set_preprocess_func(self, func):
self.pre_process_func = func
def predict(self, data):
res_raw = self.dnn.predict(
self.pre_process_func(data)
)
res = []
for index, converter in enumerate(self.labels_converter):
res.append(converter.get(tf.argmax(res_raw[index][0]).numpy()))
return res
def read_preprocess_image_check(img_path):
img = tf.io.read_file(img_path)
if tf.image.is_jpeg(img):
img = tf.image.decode_jpeg(img, channels=3)
else:
img = tf.image.decode_png(img, channels=3)
img = tf.image.resize(img, [300, 300])
img = tf.image.random_crop(img, [256, 256, 3])
return img
def check_data_safety(paths):
unsafe = []
stamp = time.time()
total = len(paths)
print("checked {} file, total {}".format(0, total), end="")
for index, i in enumerate(paths):
try:
t = read_preprocess_image_check(i)
except Exception as err:
print("\ngot an unsafe file \"{}\"".format(i))
unsafe.append(i)
if time.time() - stamp > 1:
stamp = time.time()
print("\rchecked {} file, total {}".format(index+1, total), end="")
print("")
return unsafe
def main():
print("initializing...")
data_root = pathlib.Path(DATASET_ROOT)
train_img_paths = [str(ele) for ele in data_root.glob("train/photo/*/*.*")]
test_img_paths = [str(ele) for ele in data_root.glob("test/photo/*/*.*")]
img_paths = []
img_counts = len(train_img_paths) + len(test_img_paths)
print("loaded", img_counts, "image paths")
# 生成标签集转换字典
label_types = list(set(pathlib.Path(path).parent.name for path in test_img_paths))
label_types = sorted(label_types)
print("loaded labels:", label_types)
type_to_label = dict((name, index)
for index, name in enumerate(label_types))
print("labels and indexs:", type_to_label)
random.shuffle(train_img_paths)
#print("checking data safety...")
#res = check_data_safety(train_img_paths+test_img_paths)
#for i in res:
# os.remove(i)
# print("removed unsafe file \"{}\"".format(i))
# 生成标签集
train_labels = [type_to_label[pathlib.Path(path).parent.name]
for path in train_img_paths]
test_labels = [type_to_label[pathlib.Path(path).parent.name]
for path in test_img_paths]
print("dataset head:")
print("============================================")
print("Label\t\t\tIMG_Path")
for i, l in enumerate(train_img_paths):
if i > 9:
break
print(" " + str(train_labels[i]) + "\t" + str(l))
print("============================================")
# 初始化神经网络
cnn = customNN(NAME)
cnn.load_dataset((train_img_paths, train_labels),
(test_img_paths, test_labels),
mapFunc=read_preprocess_image,
batchSize=BATCH_SIZE,
shufflePercentage=BUFF_RATE,
mapFuncTest=read_preprocess_image_test
)
# 初始化网络模型并执行设置
if os.path.exists(MODEL_FILE):
cnn.load_model(MODEL_FILE)
print("loaded model file from \"{}\"".format(MODEL_FILE))
else:
cnn.init_model()
print(cnn.model.summary())
cnn.set_model_file(MODEL_FILE)
#cnn.enable_tensorboard()
#cnn.enable_checkpointAutosave(MODEL_FILE)
# 检查数据集匹配是否有错
print("datasets:\n{}".format(str(cnn.train_db)))
# 初次训练网络
choice = input("start training for {} epoch(s)? (Y/n): ".format(str(EPOCHES)))
trained = False
if EPOCHES > 0 and choice in ("Y", "y", "yes"):
cnn.train(epochs=EPOCHES, verbose=1, validation=False)
trained = True
# 微调模型
# cnn.postProc_model()
# 再次训练网络
# cnn.train(epochs=100)
# 保存模型
if trained:
cnn.save_model()
print("model saved to \"{}\"".format(MODEL_FILE))
# 测试模型
print("evaluating trained model...")
cnn.evaluate()
'''
{'neutral': 0, 'porn': 1, 'sexy': 2}
'''
if __name__ == "__main__":
main()
| 36.848126 | 119 | 0.555294 |
e49e6a81cc60492767ccac3a720150ecd2161066 | 251 | py | Python | Examples/put_pdf_request_svg.py | kaferi/aspose-pdf-cloud-python | 48f70742fec1e41644ec0b658db3f174ba845304 | [
"MIT"
] | 7 | 2018-06-11T17:44:44.000Z | 2022-02-08T05:52:48.000Z | Examples/put_pdf_request_svg.py | kaferi/aspose-pdf-cloud-python | 48f70742fec1e41644ec0b658db3f174ba845304 | [
"MIT"
] | 1 | 2021-03-20T22:16:15.000Z | 2021-06-27T15:11:52.000Z | Examples/put_pdf_request_svg.py | kaferi/aspose-pdf-cloud-python | 48f70742fec1e41644ec0b658db3f174ba845304 | [
"MIT"
] | 4 | 2018-04-18T19:41:12.000Z | 2021-06-21T13:12:24.000Z | from configuration import *
file_name = '4pages.pdf'
result_file_name = "result.svg"
opts = {
"file": test_data_path + file_name
}
response = pdf_api.put_pdf_in_request_to_svg(
temp_folder + '/' + result_file_name, **opts)
pprint(response)
| 19.307692 | 49 | 0.7251 |
b4c38bd186f4c90defccdd94f1266f1e2440141f | 569 | py | Python | main.py | parada3desu/foxy-key-broker | fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c | [
"Apache-2.0"
] | null | null | null | main.py | parada3desu/foxy-key-broker | fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c | [
"Apache-2.0"
] | null | null | null | main.py | parada3desu/foxy-key-broker | fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c | [
"Apache-2.0"
] | null | null | null | import argparse
from src.apps.kms.backend.boot import boot as boot_kms
service_mapping = {
'kms': boot_kms,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--service',
type=str,
nargs='?',
help='Service to run must be one of ["kms"]',
)
params = vars(parser.parse_args())
service_name = params['service']
service_booter = service_mapping[service_name]
print(f'Booting {service_name} server')
service_booter()
print(f'{service_name} server start success')
| 23.708333 | 54 | 0.652021 |
9eb46364e3d60bd9f815cdf3adff46bc5e4a4383 | 16,220 | py | Python | MetaScreener/external_sw/mgltools/MGLToolsPckgs/FlexTree/EssentialDynamics.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | MetaScreener/external_sw/mgltools/MGLToolsPckgs/FlexTree/EssentialDynamics.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | MetaScreener/external_sw/mgltools/MGLToolsPckgs/FlexTree/EssentialDynamics.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
######################################################################
#
# Date: Jan 2004 Author: Yong Zhao
#
# yongzhao@scripps.edu
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Yong Zhao and TSRI
#
#########################################################################
import string, types
from math import sqrt
import numpy.oldnumeric as Numeric
N=Numeric
from MolKit import Read
from MolKit.molecule import MoleculeSet, AtomSet
import glob
class EssentialDynamics:
""" """
def __init__(self):
self.vectors=[]
self.amplitudes=[]
self.pdbFile=None
self.mol = AtomSet([])
self.coords=[]
self.atomsInResidue=None
self.scale=1.0
self.movingAtoms=AtomSet([])
def load(self, filename):
ok = self._parseEssentialDynamicsFile(filename)
if ok:
self._flattenCoords()
return ok
def compute(self, originalPDB, pdbFiles=None, filter='backbone'):
""" compute essential dynamics from files in pdfFiles,
filter one of ['CA', 'backbone', 'all']
or 'CA+selection string'], e.g. CA+W:WTR:O
NOTE that selection are in 'chain:residue:atom' format
pdbFiles: string such as '../Research/concoord/test/test*.pdb'
These files are generated from ConCOORD, Using originalPDB as input
originalPDB: file name for starting structure.
"""
self.mol=Read(originalPDB)[0]
data=self.__getDataFrom(files=pdbFiles, filter=filter)
data=N.array(data)
from numpy.oldnumeric.linear_algebra import eigenvectors
from numpy.oldnumeric import mlab as MLab
covMatrix=MLab.cov(data)
egvalues, egvectors = eigenvectors(covMatrix)
self.vectors=egvectors
self.amplitudes=N.sqrt(egvalues) # sqrt(eigen value) is amplitide
self.pdbFile=originalPDB
self.filter=filter
self._flattenCoords()
return
def __getDataFrom(self, files, filter):
data=[]
counter=0
files=glob.glob(files)
files.sort()
assert len(files) > 3
for f in files:
m=Read(f)
counter +=1
coords=m[0].allAtoms.get('CA').coords
#coords=N.reshape(N.array(coords), (len(coords)*3,))
coords=N.reshape(N.array(coords), (-1,))
data.append(coords.tolist())
del m
if counter > len(coords):
print "Need %d files, found %d " %(len(coords), len(files) )
print "more than enough pdb files found.. stop."
break
return data
## def __getDataFrom(self, files, filter):
## data=[]
## counter=0
## files=glob.glob(files)
## assert len(files) > 3
## for f in files:
## m=Read(f)
## counter +=1
## if filter=='backbone':
## coords=m[0].getAtoms().NodesFrget('backbone').coords
## elif filter=='all':
## coords=m[0].getAtoms().coords
## elif filter=='CA':
## coords=m[0].getAtoms().get('CA').coords
## else: # if filter is 'CA+selection_string'
## tmp=filter.split('+')
## if len(tmp)==2 and tmp[0]=='CA':
## atms=m[0].getAtoms()
## coords=atms.get('CA').coords
## resSet=atms.parent.uniq()
## from MolKit.stringSelector import StringSelector
## stringSel = StringSelector()
## atoms, msg=stringSel.select(m, m[0].name+":"+tmp[1])
## assert len(atoms) ==1
## coords.extend(atoms.coords)
## else:
## name=m[0].name
## atms=m[0].allAtoms.NodesFromName(name+filter)
## if len(atms):
## coords=atms.coords
## else:
## raise
## print "Unknown filter", filter
## return
## coords=N.reshape(N.array(coords), (len(coords)*3,))
## data.append(coords.tolist())
## del m
## if counter > len(coords):
## print "Need %d files, found %d " %(len(coords), len(files) )
## print "more than enough pdb files found.. stop."
## break
## return data
def write(self,outputFilename):
"""Write Essential Dynamics into file """
file=open(outputFilename, 'w')
file.write(self.pdbFile+'\n')
file.write(self.filter+'\n')
num=len(self.vectors)
assert num==len(self.amplitudes)
egvalues=self.amplitudes
egvectors=self.vectors
for i in range(num):
e=egvalues[i]
if types.ComplexType==type(e):
e=e.real
v=egvectors[i].tolist()
if types.ComplexType==type(v[0]):
for i in range(len(v)):
v[i]=v[i].real
file.write('%f, %s'%(e, str(v)))
file.write('\n')
file.close()
def chooseModes(self, modeNum):
""" only used the first n modes of the available data .
n=modeNum
"""
num=len(self.vectors)
if num==0 or num < modeNum:
return
else:
self.vectors=self.vectors[:modeNum]
self.amplitudes=self.amplitudes[:modeNum]
def _parseEssentialDynamicsFile(self, filename):
"""
format of input file:
first line: PDB file name
second line: filter
all other lines: eigen value, eigen vector
"""
try:
data=file(filename, 'r').readlines()
pdbFile=data[0].split()[0]
filter=data[1].split('\n')[0]
egValues=[]
egVectors=[]
for line in data[2:]:
tmp=line.split(',')
egValues.append(eval(tmp[0]))
vector=string.join(tmp[1:],',')
egVectors.append(eval(vector))
self.vectors=N.array(egVectors,'f')
self.amplitudes=N.array(egValues,'f')
self.pdbFile=pdbFile
try:
self.mol = Read(pdbFile)[0]
atoms=self.mol.allAtoms
atoms.addConformation(atoms.coords)
except:
print "file %s in essential dynamics file not found"%pdbFile
raise ValueError
self.filter=filter
#self.atoms=self.mol.NodesFromName(self.mol.name+filter)
self.atoms=self.mol.NodesFromName(filter)
if len(self.atoms)==0:
self.movingAtoms=self.mol.allAtoms
else:
self.movingAtoms=self.atoms.parent.atoms
assert len(self.movingAtoms)!=0
assert len(self.vectors)!=0
return True
except:
return False
def _flattenCoords(self):
if not self.mol:
return
coords=self.mol.allAtoms.coords
length=len(coords)
coords=N.array(coords, 'f')
self.coords=N.reshape(coords, (length*3, ) )
def getCoords(self, indexes, weights, scale=None, allAtoms=True):
""" weight: [-1.0, 1,0]
scale: if None, use the default self.scale, otherwise use the user value
allAtoms True: return coords of all atoms
allAtoms False: return filter atoms only. If CA or CA+XXX is specified, return all the backbone atom coords. (Useful for visualization in Pmv)
"""
if not scale:
scale=self.scale
index=indexes
weight=weights # fixme.. should pass a list
#amplitude=self.amplitudes[index] * weight
amplitude=sqrt(self.amplitudes[index]) * weight
vector= N.array(self.vectors[index],'f') * amplitude*scale
if self.filter=='CA':
if allAtoms:
newCoords=self._allCoordsByResidue(vector)
newCoords=N.reshape(newCoords, (len(self.coords)/3, 3) )
return newCoords
else:
newCoords=self._backboneByResidue(vector)
return newCoords
tmp=self.filter.split('+')
# filter is "CA+W:WTR301:O"
if len(tmp)==2 and tmp[0]=='CA':
## hack here.. the W:WTR301:O has to be at the end of all CAs
if allAtoms:
newCoords=self._allCoordsByResidue(vector)
else:
newCoords=self._backboneByResidue(vector)
newCoords=N.reshape(newCoords, (len(self.coords)/3, 3) )
return newCoords
elif self.filter=='backbone':
print "Not implemented" # fixme.
return
elif self.filter=='all':
newCoords=[]
coords=self.coords
newCoords = coords + vector
newCoords=N.reshape(newCoords, (len(coords)/3, 3) )
return newCoords
elif self.filter.split(':')[-1]=='CA':
newCoords=self._allCoordsByResidue(vector)
return newCoords
elif self.filter.split(':')[-1]=='backbone':
newCoords=self._allCoordsByResidue(vector)
return newCoords
else:
print "Unknown filter:", self.filter
raise
def _backboneByResidue(self, vector):
""" One vector applied to every CA,only return backbone atoms"""
if len(self.atoms)==0:
atoms=self.mol.allAtoms
else:
atoms=self.atoms.parent.atoms
## conformation 1 is the trasnformed coords
## keep the original coords in conformation 0
currentConf = atoms[0].conformation
if currentConf!=0:
atoms.setConformation(0)
atomsInRes=[]
if self.filter.split(':')[-1]=='CA' and self.atomsInResidue is None:
resSet=atoms.parent.uniq()
if self.atomsInResidue is None:
for r in resSet:
ats = r.atoms.get('backbone') \
& atoms # make sure in the molecularFragment
atomsInRes.append(ats)
self.atomsInResidue=atomsInRes
## else:
## tmp=self.filter.split('+')
## self.atomsInResidue=atoms.get('backbone')
## ## fixme !!!! W:WTR301:O
## self.atomsInResidue.append([])
vectorList=N.reshape(vector, (len(vector)/3, 3))
data=map(None, vectorList, self.atomsInResidue)
origCoords=[]
for v, ats in data:
tmp=[]
for at in ats:
try:
tmp.append( (N.array(at.coords) + v).tolist() )
except ValueError:
print N.array(v,'f').shape, N.array(at.coords).shape
origCoords.extend(tmp)
if currentConf!=0:
atoms.setConformation(currentConf)
atoms.updateCoords(origCoords, 1)
return origCoords
def _allCoordsByResidue(self, vector):
""" One vector applied to one residue (CA)."""
if len(self.atoms)==0:
atoms=self.mol.allAtoms
else:
atoms=self.atoms.parent.atoms
if len(atoms[0]._coords)==1:
atoms.addConformation(atoms.coords)
currentConf = atoms[0].conformation
if currentConf!=0:
atoms.setConformation(0)
resSet=atoms.parent.uniq()
origCoords=[]
if self.atomsInResidue is None:
atomsInRes=[]
for r in resSet:
ats = r.atoms & atoms # make sure in the molecularFragment
atomsInRes.append(ats)
self.atomsInResidue=atomsInRes
vectorList=N.reshape(vector, (len(vector)/3, 3))
data=map(None, vectorList, self.atomsInResidue)
for v, ats in data:
tmp=[]
for at in ats:
try:
tmp.append( (N.array(at.coords) + v).tolist() )
except ValueError:
print N.array(v,'f').shape, N.array(at.coords).shape
origCoords.extend(tmp)
atoms.updateCoords(origCoords, 1)
return origCoords
def getFilteredCoords(self, indexes, weights):
""" weight: [-1.0, 1,0] """
index=indexes
weight=weights # fixme.. should pass a list
#sqrt(eigenValue) = amplitude
amplitude=sqrt(self.amplitudes[index] ) * weight
vector= N.array(self.vectors[index],'f') * amplitude
all=self.mol.getAtoms()
if self.filter=='CA':
coords=all.get('CA').coords
elif self.filter=='backbone':
coords=all.get('backbone').coords
elif self.filter=='all':
coords=all.coords
coords=N.reshape(coords, (len(vector), ) )
newCoords = coords + vector
newCoords=N.reshape(newCoords, (len(vector)/3,3 ) )
return newCoords
def writeXML(self, modeNum, xmlFileName, filter='CA'):
""" write an XML, the only motion is normal mode motion, applied to the root of the pdb file
"""
####
print "Use Pmv -->FlexTreeCommands to do this." # fixme
return
lines1=['<?xml version="1.0" ?>',
' <root',
' name="node" ',
' id="0"']
lines2=' discreteMotionParams="weightList: list float %s, name: str combined_Normal_Modes, vectorNum: int %d, modeNum: int %d, modeVectorFile: str %s, amplitudeList: list float %s'
lines3=' selectionString="%s"'
lines4=[' discreteMotion="FTMotion_NormalMode"',
' convolve="FTConvolveApplyMatrix"']
lines5=' file="%s">'
lines6= ' </root>'
from FlexTree.XMLParser import ReadXML
reader = ReadXML()
mol=self.mol.name
all=self.mol.getAtoms()
vectorPerMode=0
if filter=='CA':
vectorPerMode = len(all.get('CA') )
elif filter=='backbone':
vectorPerMode = len(all.get('backbone') )
elif filter=='all':
vectorPerMode = len(all)
#amplitudes = Save_NormalModes(self.pdbFile, '%s.Modes'%mol, modeNum)
xx=file('kkk.abc', 'w')
for vec in self.vectors[:modeNum]:
vec=N.reshape(vec, (vectorPerMode, 3))
for v in vec:
xx.write("%s %s %s\n"%(v[0], v[1], v[2]))
xx.close()
amplitudes=self.amplitudes.tolist()[:modeNum]
tmp=amplitudes.__repr__()
tmp= tmp[1:-1].split(',')
tmp=''.join(tmp)
outXML=file(xmlFileName, 'w')
for line in lines1:
outXML.write(line + '\n')
jj = '0 '*modeNum
outXML.write(lines2%(jj,
vectorPerMode, #
modeNum,
'kkk.abc',
tmp)
+ ' " \n')
outXML.write(lines3%mol + '\n')
for line in lines4:
outXML.write(line + '\n')
outXML.write(lines5%self.pdbFile + '\n')
outXML.write(lines6 + '\n')
outXML.close()
reader(xmlFileName)
tree=reader.get()[0]
root=tree.root
tree.adoptRandomConformation()
root.updateCurrentConformation()
print '------------ done with ' , mol
"""
# testers
from FlexTree.EssentialDynamics import EssentialDynamics
ed=EssentialDynamics()
ed.compute('1crn_top3.pdb', "../Research/concoord/test/test*.pdb", 'CA')
ed.write('fff')
###################
from math import sqrt
for i in range(len(ed.vectors)):
a=max(ed.vectors[i])
b=abs(min(ed.vectors[i]))
x=max(a,b)
print "max offset of %d is %f"%( i, sqrt(ed.amplitudes[i])*x)
"""
| 34.004193 | 189 | 0.523613 |
e1c8af76079fac1128219f9561bd3983c14b7ea8 | 3,572 | py | Python | py-server/src/config/grib.py | Jonnytoshen/wind-layer | 514e9b9c76b6d72faac21543fd5fb1c43e6bd9b5 | [
"BSD-3-Clause"
] | 285 | 2017-12-16T13:29:27.000Z | 2022-03-28T02:59:08.000Z | py-server/src/config/grib.py | Jonnytoshen/wind-layer | 514e9b9c76b6d72faac21543fd5fb1c43e6bd9b5 | [
"BSD-3-Clause"
] | 104 | 2018-01-01T01:40:13.000Z | 2022-03-26T18:20:45.000Z | py-server/src/config/grib.py | Jonnytoshen/wind-layer | 514e9b9c76b6d72faac21543fd5fb1c43e6bd9b5 | [
"BSD-3-Clause"
] | 97 | 2017-12-18T08:05:21.000Z | 2022-03-28T15:49:38.000Z | import os
GFS_DATE = "20200312" # 数据时间
GFS_TIME = "18" # 起报时间 00, 06, 12, 18
RES = "0p25" # 数据分辨率 0p25: 0.25, 0p50:0.5 or 1p00:1
BBOX = "leftlon=0&rightlon=360&toplat=90&bottomlat=-90" # 数据范围
LEVEL = "lev_10_m_above_ground=on" # 数据层次
VARIABLES = "var_UGRD=on&var_VGRD=on&var_TMP=on" # 要素
GRIB_DES = "pgrb2" # 文件说明 默认为pgrb2 0.5分辨率的为pgrb2full
FORECASTS_TIME = "f000"
GFS_URL = "https://nomads.ncep.noaa.gov/cgi-bin/" \
"filter_gfs_${RES}.pl?file=gfs.t${GFS_TIME}z.${GRIB_DES}" \
".${RES}.${FORECASTS_TIME}&${LEVEL}&${VARIABLES}&${BBOX}&dir=%2Fgfs" \
".${GFS_DATE}%2F${GFS_TIME}"
GRS_PAGE = "https://nomads.ncep.noaa.gov/cgi-bin/" \
"filter_gfs_${RES}.pl?dir=%2Fgfs" \
".${GFS_DATE}%2F${GFS_TIME}"
OUTPUT_BASE_DIR = os.getcwd()
OUTPUT_DIR = 'static/data'
OUTPUT_JSON_DIR = 'static/json'
OUTPUT_RASTER_DIR = 'static/raster'
BASE_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_DIR))
BASE_JSON_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_JSON_DIR))
BASE_RASTER_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_RASTER_DIR))
def get_download_url(date, gfs_time, res, forecasts_time, bbox, level, variables):
gfs_date = date or GFS_DATE
gfs_time = gfs_time or GFS_TIME
res = res or RES
forecasts_time = forecasts_time or FORECASTS_TIME
bbox = bbox or BBOX
level = level or LEVEL
variables = variables or VARIABLES
grib_des = GRIB_DES
if res == "0p50":
grib_des = 'pgrb2full'
gfs_url = GFS_URL\
.replace("${RES}", res)\
.replace("${FORECASTS_TIME}", forecasts_time)\
.replace("${BBOX}", bbox)\
.replace("${GFS_TIME}", gfs_time)\
.replace("${GRIB_DES}", grib_des)\
.replace("${GFS_DATE}", gfs_date)\
.replace("${LEVEL}", level)\
.replace("${VARIABLES}", variables)
return {
'GFS_DATE': gfs_date,
'GFS_TIME': gfs_time,
'RES': res,
'BBOX': bbox,
'LEVEL': level,
'VARIABLES': variables,
'GFS_URL': gfs_url,
}
def get_page_url(date, gfs_time, res):
gfs_date = date or GFS_DATE
gfs_time = gfs_time or GFS_TIME
res = res or RES
page_url = GRS_PAGE \
.replace("${RES}", res) \
.replace("${GFS_TIME}", gfs_time) \
.replace("${GFS_DATE}", gfs_date)
return page_url
def get_file_path(file_path, name):
basePath = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_DIR))
path = os.path.join(basePath, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
def get_json_path(file_path, name):
path = os.path.join(BASE_JSON_DIR, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
def get_raster_path(file_path, name):
path = os.path.join(BASE_RASTER_DIR, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
| 25.697842 | 83 | 0.646976 |
09874e8bd58846c90a44ca0c3ebb07d1e53ed795 | 12,153 | py | Python | src/db/dao/subjectdao.py | yuriel-v/Daedalus | ff7ae4090241fab158a8ef9096b1344f6ab8c98b | [
"MIT"
] | null | null | null | src/db/dao/subjectdao.py | yuriel-v/Daedalus | ff7ae4090241fab158a8ef9096b1344f6ab8c98b | [
"MIT"
] | null | null | null | src/db/dao/subjectdao.py | yuriel-v/Daedalus | ff7ae4090241fab158a8ef9096b1344f6ab8c98b | [
"MIT"
] | null | null | null | from core.utils import print_exc
from db.dao.genericdao import GenericDao
from db.model import Subject
from sqlalchemy.orm import Query
from typing import Union
class SubjectDao(GenericDao):
def __init__(self, session=None, autoinit=True):
super().__init__(session=session, autoinit=autoinit)
def insert(self, code: str, fullname: str, semester: int) -> dict:
"""
Cadastra uma matéria nova no banco de dados.
### Params
- `code: str`: O código da matéria. Deverá ser uma string full uppercase de 3 chars;
- `fullname: str`: O nome completo da matéria;
- `semester: int`: O semestre a qual a matéria pertence.
- Matérias eletivas são de semestre 0.
### Retorno
- Um dict no formato `{sbj.code: sbj}` em caso de sucesso.
- Casos excepcionais:
- `{'err': 1}`: Exceção lançada, transação sofreu rollback;
- `{'err': 2}`: Erro de sintaxe nos argumentos passados;
- `{'err': 3}`: O código especificado já existe.
### Levanta
- `SyntaxError` nos seguintes casos:
- `code` ou `fullname` não são `str`, nem podem ser convertidas para `str`;
- `semester` não é um `int` e nem pode ser convertido para `int`.
"""
if not isinstance(code, str):
try:
code = str(code)
except Exception:
raise SyntaxError("Argument 'code' is not a string nor can it be converted to string")
if not isinstance(fullname, str):
try:
fullname = str(fullname)
except Exception:
raise SyntaxError("Argument 'fullname' is not a string nor can it be converted to string")
if not isinstance(semester, int):
try:
semester = int(semester)
except Exception:
raise SyntaxError("Argument 'semester' is not an int nor can it be converted to int")
if any([len(code) != 3, len(fullname) == 0, semester not in range(0, 11)]):
return {'err': 2}
else:
tr = None
try:
if self.find(terms=code.upper(), by='code') is not None:
return {'err': 3}
else:
tr = self._session.begin_nested()
new_subject = Subject(code=code.upper(), fullname=fullname, semester=abs(semester))
self._session.add(new_subject)
self._gcommit(tr)
return {new_subject.code: new_subject}
except Exception:
print_exc()
if tr is not None:
tr.rollback()
return {'err': 1}
def find(self, terms: Union[int, str], by: str, single_result=True) -> Union[list[Subject], Subject, None]:
"""
Busca uma matéria baseado num filtro.
### Params
- `terms: str | int`: Os termos de pesquisa;
- `single_result: bool`: Caso `True`, retorna o primeiro resultado ou None. Senão, retorna uma lista com todos os resultados. Opcional.
- `by: str`: O critério de pesquisa. Deve ser um dos valores:
- `'code'`: Busca por código de matéria;
- `'name'`: Busca por nome, parcial ou completo;
- `'semester'`: Busca por semestre. `terms` deverá ser um `int` ou poder ser convertido para `int`.
### Retorno
- Caso `single_result == True`, retorna uma instância de `Subject`, ou `None` se nada for encontrado;
- Senão, retorna uma lista de instâncias de `Subject`, ou uma lista vazia.
- Em ambos os casos, retorna `None` se uma exceção for lançada.
### Levanta
- `SyntaxError` nos seguintes casos:
- `by` não é uma string com um dos valores válidos acima;
- Caso `by` seja `'code'` ou `'name'`:
- `terms` não é uma `str` nem pode ser convertido para `str`.
- Caso `by` seja `'semester'`:
- `terms` não é um `int` nem pode ser convertido para `int`.
"""
if not isinstance(by, str) or by.lower() not in ('code', 'name', 'semester'):
raise SyntaxError("Argument 'by' is not a string with a valid value")
if by in ('code', 'name') and not isinstance(terms, str):
try:
terms = str(terms)
except Exception:
raise SyntaxError("Argument 'terms' is not a str and cannot be cast to str")
if by == 'semester' and not isinstance(terms, int):
try:
terms = int(terms)
except Exception:
raise SyntaxError("Argument 'terms' is not an int and cannot be cast to int")
try:
q: Query = self._session.query(Subject)
if by == 'code':
q = q.filter(Subject.code == terms.upper())
elif by == 'name':
q = q.filter(Subject.fullname.ilike(f'%{terms}%'))
else:
q = q.filter(Subject.semester == terms)
if single_result:
return q.first()
else:
return q.all()
except Exception:
print_exc()
return None
def find_many(self, terms: list[str], by: str) -> list[Subject]:
"""
Busca por matérias dentro da lista de termos especificada.
### Params
- `terms: list[str]`: Uma lista de strings contendo cada termo de pesquisa;
- `by: str`: O critério de pesquisa. Dever ser um dos valores:
- `'code'`: Busca por código de matéria;
- `'name'`: Busca por nome, parcial ou completo;
- `'semester'`: Busca por semestre. `terms` deverá ser um `int` ou poder ser convertido para `int`.
### Retorno
- Uma lista de instâncias de `Subject` correspondentes a todos os termos especificados, ou uma lista vazia.
### Levanta
- `SyntaxError` nos seguintes casos:
- `by` não é uma string com um dos valores válidos acima;
- Caso `by` seja `'code'` ou `'name'`:
- Pelo menos um elemento de `terms` não é uma `str` nem pode ser convertido para `str`.
- Caso `by` seja `'semester'`:
- Pelo menos um elemento de `terms` não é um `int` nem pode ser convertido para `int`.
"""
if not isinstance(by, str) or by.lower() not in ('code', 'name', 'semester'):
raise SyntaxError("Argument 'by' is not a string with a valid value")
by = by.lower()
if by in ('code', 'name'):
try:
if by == 'code':
terms = tuple([str(term).upper() for term in terms if bool(term)])
else:
terms = tuple([str(term) for term in terms if bool(term)])
except Exception:
raise SyntaxError("At least one argument in 'terms' is not a str and cannot be cast to str")
if by == 'semester':
try:
terms = tuple([int(term) for term in terms])
except Exception:
raise SyntaxError("At least one argument in 'terms' is not an int and cannot be cast to int")
try:
q: Query = self._session.query(Subject)
if by == 'code':
q = q.filter(Subject.code.in_(terms))
elif by == 'name':
q = q.filter(Subject.fullname.in_(terms))
else:
q = q.filter(Subject.semester.in_(terms))
return q.all()
except Exception:
print_exc()
return []
def find_all(self) -> list[Subject]:
"""
Retorna uma lista enorme contendo todas as matérias cadastradas no banco de dados.
- AVISO: NÃO tente imprimir essa lista completa em um chat - não só a lista pode exceder o limite de 2000 chars
imposto pelo Discord, mas também pode causar muito spam!
- Em vez de poluir o seu chat, considere usar o método `find()`.
"""
# -> this text is left here as a lesson learned kind of thing. how to bypass the orm somewhat and connect to the
# DB API on a lower level
# This is preferred over the usual session.query(Subject).all()
# because the former issues multiple SELECT statements for each id.
# Actually nevermind, it was the rollback at the end of every function causing this.
# Leaving this here for reference on how to use DBAPI-like querying.
# Also note: put a rollback BEFORE every command call that uses the DB.
# Not only it avoids the problem mentioned above, it has 2 outcomes:
# 1. if there is a current bad transaction in progress, it will be rolled back.
# 2. if there are no active transactions, the method is a pass-through.
# cnx = engin.connect()
# res = []
# for row in cnx.execute(Subject.__table__.select()):
# res.append(Subject(id=row['id'], code=row['code'], fullname=row['fullname'], semester=row['semester']))
# return res
try:
return self._session.query(Subject).all()
except Exception:
print_exc()
return []
def update(self, code: Union[str, Subject], newcode=None, fullname=None, semester=None) -> dict:
"""
Atualiza as informações de uma matéria.
### Params
- `code: Subject | str`: A matéria (ou o código dela) a ser atualizada;
- `newcode: str`: O novo código;
- `fullname: str`: O novo nome completo;
- `semester: int`: O novo semestre.
- Pelo menos um dos 3 acima não deve ser `None`. A matéria permanecerá inalterada nos atributos que forem `None`.
### Retorno
- Um dict no formato `{0: sbj}` em caso de sucesso.
- Casos excepcionais:
- `{'err': 1}`: Exceção lançada, transação sofreu rollback;
- `{'err': 2}`: Erro de sintaxe nos argumentos passados:
- `newcode` ou `fullname` é uma string vazia;
- `semester` não é um `int` dentro do intervalo [0, 11[
- `{'err': 3}`: Matéria inexistente.
### Levanta
- `SyntaxError` caso `code` não seja uma instância de `str` nem `Subject`, nem possa ser convertido para `str`.
"""
if not isinstance(code, Union[str, Subject].__args__):
try:
code = str(code)
except Exception:
raise SyntaxError("Argument 'code' is not an instance of 'str' nor 'Subject', nor can it be cast to 'str'")
try:
if any([
all([newcode is None, fullname is None, semester is None]),
any([
newcode is not None and len(str(newcode)) != 3,
fullname is not None and len(fullname) < 3,
semester is not None and int(semester) not in range(0, 11)
])
]):
return {'err': 2}
except Exception:
print_exc()
return {'err': 2}
tr = None
try:
cur_sbj = self.find(code.upper() if isinstance(code, str) else code.id, by='code')
if cur_sbj is None:
return {'err': 3}
else:
tr = self._session.begin_nested()
if newcode is not None:
cur_sbj.code = str(newcode).upper()
if fullname is not None:
cur_sbj.fullname = str(fullname)
if semester is not None:
cur_sbj.semester = int(semester)
self._gcommit(tr)
return {0: cur_sbj}
except Exception:
print_exc()
if tr is not None:
tr.rollback()
return {'err': 1}
| 44.032609 | 144 | 0.539373 |
e79f94a8b5e1c7c712703872a4050079101f3154 | 29,081 | py | Python | api/api.py | kvokka/tilt.build | b10fe0b6d15d855dd48e6954832a4168d6be8ddc | [
"Apache-2.0"
] | null | null | null | api/api.py | kvokka/tilt.build | b10fe0b6d15d855dd48e6954832a4168d6be8ddc | [
"Apache-2.0"
] | null | null | null | api/api.py | kvokka/tilt.build | b10fe0b6d15d855dd48e6954832a4168d6be8ddc | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Union, List, Callable, Any
class Blob:
"""The result of executing a command on your local system.
Under the hood, a `Blob` is just a string, but we wrap it this way so Tilt knows the difference between a string meant to convey content and a string indicating, say, a filepath.
To wrap a string as a blob, call ``blob(my_str)``"""
class LiveUpdateStep:
"""A step in the process of performing a LiveUpdate on an image's container.
For details, see the `Live Update documentation <live_update_reference.html>`_.
"""
pass
def fall_back_on(files: Union[str, List[str]]) -> LiveUpdateStep:
"""Specify that any changes to the given files will cause Tilt to *fall back* to a
full image build (rather than performing a live update).
``fall_back_on`` step(s) may only go at the beginning of your list of steps.
(Files must be a subset of the files that we're already watching for this image;
that is, if any files fall outside of DockerBuild.context or CustomBuild.deps,
an error will be raised.)
Args:
files: a string or list of strings of files. If relative, will be evaluated relative to the Tiltfile. Tilt compares these to the local paths of edited files when determining whether to fall back to a full image build.
"""
pass
def sync(local_path: str, remote_path: str) -> LiveUpdateStep:
"""Specify that any changes to `localPath` should be synced to `remotePath`
May not follow any `run` steps in a `live_update`.
Args:
localPath: A path relative to the Tiltfile's directory. Changes to files matching this path will be synced to `remotePath`.
Can be a file (in which case just that file will be synced) or directory (in which case any files recursively under that directory will be synced).
remotePath: container path to which changes will be synced. Must be absolute.
"""
pass
def run(cmd: str, trigger: Union[List[str], str] = []) -> LiveUpdateStep:
"""Specify that the given `cmd` should be executed when updating an image's container
May not precede any `sync` steps in a `live_update`.
Args:
cmd: A shell command.
trigger: If the ``trigger`` argument is specified, the build step is only run when there are changes to the given file(s). Paths relative to Tiltfile. (Note that in addition to matching the trigger, file changes must also match at least one of this Live Update's syncs in order to trigger this run. File changes that do not match any syncs will be ignored.)
"""
pass
def restart_container() -> LiveUpdateStep:
"""Specify that a container should be restarted when it is live-updated.
May only be included in a `live_update` once, and only as the last step.
"""
pass
def docker_build(ref: str, context: str, build_args: Dict[str, str] = {}, dockerfile: str = "Dockerfile", dockerfile_contents: Union[str, Blob] = "", live_update: List[LiveUpdateStep]=[], match_in_env_vars: bool = False, ignore: Union[str, List[str]] = [], only: Union[str, List[str]] = [], entrypoint: str = "") -> None:
"""Builds a docker image.
Note that you can't set both the `dockerfile` and `dockerfile_contents` arguments (will throw an error).
Example: ``docker_build('myregistry/myproj/backend', '/path/to/code')`` is roughly equivalent to the call ``docker build /path/to/code -t myregistry/myproj/backend``
Note: If you're using the the `ignore` and `only` parameters to do context filtering and you have tricky cases, reach out to us. The implementation is complex and there might be edge cases.
Note: the `entrypoint` parameter is not supported for Docker Compose resources. If you need it for your use case, let us know.
Args:
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'). If this image will be used in a k8s resource(s), this ref must match the ``spec.container.image`` param for that resource(s).
context: path to use as the Docker build context.
build_args: build-time variables that are accessed like regular environment variables in the ``RUN`` instruction of the Dockerfile. See `the Docker Build Arg documentation <https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg>`_.
dockerfile: path to the Dockerfile to build.
dockerfile_contents: raw contents of the Dockerfile to use for this build.
live_update: set of steps for updating a running container (see `Live Update documentation <live_update_reference.html>`_).
match_in_env_vars: specifies that k8s objects can reference this image in their environment variables, and Tilt will handle those variables the same as it usually handles a k8s container spec's `image`s.
ignore: set of file patterns that will be ignored. Ignored files will not trigger builds and will not be included in images. Follows the `dockerignore syntax <https://docs.docker.com/engine/reference/builder/#dockerignore-file>`_.
only: set of file paths that should be considered for the build. All other changes will not trigger a build and will not be included in images. Inverse of ignore parameter. Only accepts real paths, not file globs.
entrypoint: command to run when this container starts. Takes precedence over the container's ``CMD`` or ``ENTRYPOINT``, and over a `container command specified in k8s YAML <https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/>`_. Will be evaluated in a shell context: e.g. ``entrypoint="foo.sh bar"`` will be executed in the container as ``/bin/sh -c 'foo.sh bar'``.
"""
pass
class FastBuild:
"""An image that was created with ``fast_build``"""
def add(self, src: str, dest: str) -> 'FastBuild':
"""Adds the content from ``src`` into the image at path ``dest``.
Args:
src: The path to content to be added to the image (absolute, or relative to the location of the Tiltfile).
dest: The path in the image where the content should be added.
"""
pass
def run(self, cmd: str, trigger: Union[List[str], str] = []) -> None:
"""Runs ``cmd`` as a build step in the image.
Args:
cmd: A shell command.
trigger: If the ``trigger`` argument is specified, the build step is only run on changes to the given file(s).
"""
pass
def hot_reload() -> None:
"""Setting this on a ``FastBuild`` image tells Tilt that this container knows how to automatically reload any changes in the container. As a result there is no need to restart it.
This is useful for containers that run something like nodemon or webpack Hot Module Replacement to update running processes quickly."""
pass
def fast_build(img_name: str, dockerfile_path: str, entrypoint: str = "") -> FastBuild:
"""Initiates a docker image build that supports ``add`` s and ``run`` s, and that uses a cache for subsequent builds.
**Note**: this is a deprecated feature. For the fast building of the future, check out our `LiveUpdate tutorial </live_update_tutorial.html>`_ and `reference documention </live_update_reference.html>`_.
"""
pass
def k8s_yaml(yaml: Union[str, List[str], Blob]) -> None:
"""Call this with a path to a file that contains YAML, or with a ``Blob`` of YAML.
We will infer what (if any) of the k8s resources defined in your YAML
correspond to Images defined elsewhere in your ``Tiltfile`` (matching based on
the DockerImage ref and on pod selectors). Any remaining YAML is YAML that Tilt
applies to your k8s cluster independently.
Any YAML files are watched (See ``watch_file``).
Examples:
.. code-block:: python
# path to file
k8s_yaml('foo.yaml')
# list of paths
k8s_yaml(['foo.yaml', 'bar.yaml'])
# Blob, i.e. `local` output (in this case, script output)
templated_yaml = local('./template_yaml.sh')
k8s_yaml(templated_yaml)
Args:
yaml: Path(s) to YAML, or YAML as a ``Blob``.
"""
pass
class TriggerMode:
"""A set of constants that describe how Tilt triggers an update for a resource.
Possible values are:
- ``TRIGGER_MODE_AUTO``: the default. When Tilt detects a change to files or config files associated with this resource, it triggers an update.
- ``TRIGGER_MODE_MANUAL``: user manually triggers update for dirty resources (i.e. resources with pending changes) via a button in the UI. (Note that the initial build always occurs automatically.)
The default trigger mode for all manifests may be set with the top-level function :meth:`trigger_mode`
(if not set, defaults to ``TRIGGER_MODE_AUTO``), and per-resource with :meth:`k8s_resource`.
See also: `Manual Update Control documentation <manual_update_control.html>`_
"""
def __init__(self):
pass
def trigger_mode(trigger_mode: TriggerMode):
"""Sets the default :class:`TriggerMode` for resources in this Tiltfile.
(Trigger mode may still be adjusted per-resource with :meth:`k8s_resource`.)
If this function is not invoked, the default trigger mode for all resources is ``TRIGGER MODE AUTO``.
See also: `Manual Update Control documentation <manual_update_control.html>`_
Args:
trigger_mode: may be one of ``TRIGGER_MODE_AUTO`` or ``TRIGGER_MODE_MANUAL``
"""
# Hack so this appears correctly in the function signature: https://stackoverflow.com/a/50193319/4628866
TRIGGER_MODE_AUTO = type('_sentinel', (TriggerMode,),
{'__repr__': lambda self: 'TRIGGER_MODE_AUTO'})()
def k8s_resource(workload: str, new_name: str = "",
port_forwards: Union[str, int, List[int]] = [],
extra_pod_selectors: Union[Dict[str, str], List[Dict[str, str]]] = [],
trigger_mode: TriggerMode = TRIGGER_MODE_AUTO) -> None:
"""Configures a kubernetes resources
This description apply to `k8s_resource_assembly_version` 2.
If you are running Tilt version < 0.8.0 and/or do not call `k8s_resource_assembly_version(2)`, see :meth:`k8s_resource_v1_DEPRECATED` instead.
Args:
workload: which workload's resource to configure. This is a colon-separated
string consisting of one or more of (name, kind, namespace, group), e.g.,
"redis", "redis:deployment", or "redis:deployment:default".
`k8s_resource` searches all loaded k8s workload objects for an object matching
all given fields. If there's exactly one, `k8s_resource` configures options for
that workload. If there's not exactly one, `k8s_resource` raises an error.
(e.g., "redis" suffices if there's only one object named "redis", but if
there's both a deployment and a cronjob named "redis", you'd need to specify
"redis:deployment").
new_name: if non-empty, will be used as the new name for this resource
port_forwards: Local ports to connect to the pod. If a target port is
specified, that will be used. Otherwise, if the container exposes a port
with the same number as the local port, that will be used. Otherwise,
the default container port will be used.
Example values: 9000 (connect localhost:9000 to the container's port 9000,
if it is exposed, otherwise connect to the container's default port),
'9000:8000' (connect localhost:9000 to the container port 8000),
['9000:8000', '9001:8001'] (connect localhost:9000 and :9001 to the
container ports 8000 and 8001, respectively).
[8000, 8001] (assuming the container exposes both 8000 and 8001, connect
localhost:8000 and localhost:8001 to the container's ports 8000 and 8001,
respectively).
extra_pod_selectors: In addition to relying on Tilt's heuristics to automatically
find K8S resources associated with this resource, a user may specify extra
labelsets to force entities to be associated with this resource. An entity
will be associated with this resource if it has all of the labels in at
least one of the entries specified (but still also if it meets any of
Tilt's usual mechanisms).
trigger_mode: one of ``TRIGGER_MODE_AUTO`` or ``TRIGGER_MODE_MANUAL``. For more info, see the
`Manual Update Control docs <manual_update_control.html>`_.
"""
pass
def k8s_resource_assembly_version(version: int) -> None:
"""
Specifies which version of k8s resource assembly loading to use.
This function is deprecated and will be removed.
See `Resource Assembly Migration </resource_assembly_migration.html>`_ for information.
Changes the behavior of :meth:`k8s_resource`.
"""
def k8s_resource_v1_DEPRECATED(name: str, yaml: Union[str, Blob] = "", image: Union[str, FastBuild] = "",
port_forwards: Union[str, int, List[int]] = [], extra_pod_selectors: Union[Dict[str, str], List[Dict[str, str]]] = []) -> None:
"""NOTE: This is actually named :meth:`k8s_resource`. This documents
the behavior of this method after a call to :meth:`k8s_resource_assembly_version` with value `1`.
This behavior is deprecated and will be removed.
See `Resource Assembly Migration </resource_assembly_migration.html>`_ for information.
Creates a kubernetes resource that tilt can deploy using the specified image.
Args:
name: What call this resource in the UI. If ``image`` is not specified ``name`` will be used as the image to group by.
yaml: Optional YAML. If this arg is not passed, we
expect to be able to extract it from an existing resource
(by looking for a k8s container running the specified ``image``).
image: An optional Image. If the image is not passed,
we expect to be able to extract it from an existing resource.
port_forwards: Local ports to connect to the pod. If no
target port is specified, will use the first container port.
Example values: 9000 (connect localhost:9000 to the default container port),
'9000:8000' (connect localhost:9000 to the container port 8000),
['9000:8000', '9001:8001'] (connect localhost:9000 and :9001 to the
container ports 8000 and 8001, respectively).
extra_pod_selectors: In addition to relying on Tilt's heuristics to automatically
find K8S resources associated with this resource, a user may specify extra
labelsets to force entities to be associated with this resource. An entity
will be associated with this resource if it has all of the labels in at
least one of the entries specified (but still also if it meets any of
Tilt's usual mechanisms).
"""
pass
def filter_yaml(yaml: Union[str, List[str], Blob], labels: dict=None, name: str=None, namespace: str=None, kind: str=None, api_version: str=None):
"""Call this with a path to a file that contains YAML, or with a ``Blob`` of YAML.
(E.g. it can be called on the output of ``kustomize`` or ``helm``.)
Captures the YAML entities that meet the filter criteria and returns them as a blob;
returns the non-matching YAML as the second return value.
For example, if you have a file of *all* your YAML, but only want to pass a few elements to Tilt: ::
# extract all YAMLs matching labels "app=foobar"
foobar_yaml, rest = filter_yaml('all.yaml', labels={'app': 'foobar'}
k8s_yaml(foobar_yaml)
# extract YAMLs of kind "deployment" with metadata.name "baz"
baz_yaml, rest = filter_yaml(rest, name='baz', kind='deployment')
k8s_yaml(baz_yaml)
Args:
yaml: Path(s) to YAML, or YAML as a ``Blob``.
labels: return only entities matching these labels. (Matching entities
must satisfy all of the specified label constraints, though they may have additional
labels as well: see the `Kubernetes docs <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`_
for more info.)
name: Case-insensitive regexp specifying the ``metadata.name`` property of entities to match
namespace: Case-insensitive regexp specifying the ``metadata.namespace`` property of entities to match
kind: Case-insensitive regexp specifying the kind of entities to match (e.g. "Service", "Deployment", etc.).
api_version: Case-insensitive regexp specifying the apiVersion for `kind`, (e.g., "apps/v1")
Returns:
2-element tuple containing
- **matching** (:class:`~api.Blob`): blob of YAML entities matching given filters
- **rest** (:class:`~api.Blob`): the rest of the YAML entities
"""
pass
def include(path: str):
"""Include another Tiltfile.
Loads any builds or resources defined in that Tiltfile.
If you want to define common functions or constants and
import them into another Tiltfile, see the `load()` function.
Example ::
include('./frontend/Tiltfile')
include('./backend/Tiltfile')
"""
def load(path: str, *args):
"""Include another Tiltfile.
Similar to `include(path)`, but binds variables in the global scope.
Used when you want to define common functions or constants
to share across Tiltfiles.
Example ::
load('./lib/Tiltfile', 'create_namespace')
create_namespace('frontend')
"""
def local(cmd: str) -> Blob:
"""Runs cmd, waits for it to finish, and returns its stdout as a ``Blob``."""
pass
def read_file(file_path: str, default: str = None) -> Blob:
"""
Reads file and returns its contents.
If the `file_path` does not exist and `default` is not `None`, `default` will be returned.
In any other case, an error reading `file_path` will be a Tiltfile load error.
Args:
file_path: Path to the file locally (absolute, or relative to the location of the Tiltfile).
default: If not `None` and the file at `file_path` does not exist, this value will be returned."""
pass
def watch_file(file_path: str) -> None:
"""Watches a file. If the file is changed a re-exectution of the Tiltfile is triggered.
Args:
file_path: Path to the file locally (absolute, or relative to the location of the Tiltfile)."""
def kustomize(pathToDir: str) -> Blob:
"""Run `kustomize <https://github.com/kubernetes-sigs/kustomize>`_ on a given directory and return the resulting YAML as a Blob
Directory is watched (See ``watch_file``).
Args:
pathToDir: Path to the directory locally (absolute, or relative to the location of the Tiltfile)."""
pass
def helm(pathToChartDir: str) -> Blob:
"""Run `helm template <https://docs.helm.sh/helm/#helm-template>`_ on a given directory that contains a chart and return the fully rendered YAML as a Blob
Chart directory is watched (See ``watch_file``).
Args:
pathToChartDir: Path to the directory locally (absolute, or relative to the location of the Tiltfile)."""
pass
def fail(msg: str) -> None:
"""Raises an error that cannot be intercepted. Can be used anywhere in a Tiltfile."""
pass
def blob(contents: str) -> Blob:
"""Creates a Blob object that wraps the provided string. Useful for passing strings in to functions that expect a `Blob`, e.g. ``k8s_yaml``."""
pass
def listdir(directory: str, recursive: bool = False) -> List[str]:
"""Returns all the files at the top level of the provided directory. If ``recursive`` is set to True, returns all files that are inside of the provided directory, recursively.
Directory is watched (See ``watch_file``)."""
pass
def k8s_kind(kind: str, api_version: str=None, *, image_json_path: Union[str, List[str]]=[]):
"""Tells Tilt about a k8s kind.
For CRDs that use images built by Tilt: call this with `image_json_path` to tell Tilt where in the CRD's spec the image is specified.
For CRDs that do not use images built by Tilt, but have pods you want in a Tilt resource: call this without `image_json_path`, simply to specify that this type is a Tilt workload. Then call :meth:`k8s_resource` with `extra_pod_selectors` to specify which pods Tilt should associate with this resource.
(Note the `*` in the signature means `image_json_path` must be passed as a keyword, e.g., `image_json_path="{.spec.image}"`)
Example ::
# Fission has a CRD named "Environment"
k8s_yaml('deploy/fission.yaml')
k8s_kind('Environment', image_json_path='{.spec.runtime.image}')
Args:
kind: Case-insensitive regexp specifying he value of the `kind` field in the k8s object definition (e.g., `"Deployment"`)
api_version: Case-insensitive regexp specifying the apiVersion for `kind`, (e.g., "apps/v1")
image_json_path: Either a string or a list of string containing json path(s) within that kind's definition
specifying images deployed with k8s objects of that type.
This uses the k8s json path template syntax, described `here <https://kubernetes.io/docs/reference/kubectl/jsonpath/>`_.
"""
pass
StructuredDataType = Union[
Dict[str, Any],
List[Any],
]
def decode_json(json: str) -> StructuredDataType:
"""Deserializes a given string from JSON to Starlark. Fails if the string can't be parsed as JSON."""
pass
def read_json(path: str, default: str = None) -> StructuredDataType:
"""
Reads the file at `path` and deserializes its contents as JSON
If the `path` does not exist and `default` is not `None`, `default` will be returned.
In any other case, an error reading `path` will be a Tiltfile load error.
Args:
path: Path to the file locally (absolute, or relative to the location of the Tiltfile).
default: If not `None` and the file at `path` does not exist, this value will be returned."""
pass
def read_yaml(path: str, default: str = None) -> StructuredDataType:
"""
Reads the file at `path` and deserializes its contents as YAML
If the `path` does not exist and `default` is not `None`, `default` will be returned.
In any other case, an error reading `path` will be a Tiltfile load error.
Args:
path: Path to the file locally (absolute, or relative to the location of the Tiltfile).
default: If not `None` and the file at `path` does not exist, this value will be returned."""
pass
def default_registry(registry: str) -> None:
"""Specifies that any images that Tilt builds should be renamed so that they have the specified Docker registry.
This is useful if, e.g., a repo is configured to push to Google Container Registry, but you want to use Elastic Container Registry instead, without having to edit a bunch of configs. For example, ``default_registry("gcr.io/myrepo")`` would cause ``docker.io/alpine`` to be rewritten to ``gcr.io/myrepo/docker.io_alpine``
Args:
registry: The registry that all built images should be renamed to use.
Images are renamed following these rules:
1. Replace ``/`` and ``@`` with ``_``.
2. Prepend the value of ``registry`` and a ``/``.
e.g., with ``default_registry('gcr.io/myorg')``, ``user-service`` becomes ``gcr.io/myorg/user-service``.
(Note: this logic is currently crude, on the assumption that development image names are ephemeral and unimportant. `Please let us know <https://github.com/windmilleng/tilt/issues>`_ if they don't suit you!)
Cf. our `using a personal registry guide <https://docs.tilt.dev/personal_registry.html>`_
"""
pass
class CustomBuild:
"""An image that was created with :meth:`custom_build`"""
def add_fast_build() -> FastBuild:
"""Returns a FastBuild that is associated with the image that was built from a ``custom_build``. When the container needs to be rebuilt it will be built using the ``CustomBuild``. Otherwise update will be done with the ``FastBuild`` instructions. """
pass
def custom_build(ref: str, command: str, deps: List[str], tag: str = "", disable_push: bool = False, live_update: List[LiveUpdateStep]=[], match_in_env_vars: bool = False, ignore: Union[str, List[str]] = [], entrypoint: str="") -> CustomBuild:
"""Provide a custom command that will build an image.
Returns an object which can be used to create a FastBuild.
The command *must* publish an image with the name & tag ``$EXPECTED_REF``.
Tilt will raise an error if the command exits successfully, but the registry does not contain
an image with the ref ``$EXPECTED_REF``.
Example ::
custom_build(
'gcr.io/foo',
'docker build -t $EXPECTED_REF .',
['.'],
)
Note: the `entrypoint` parameter is not supported for Docker Compose resources. If you need it for your use case, let us know.
Args:
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'). If this image will be used in a k8s resource(s), this ref must match the ``spec.container.image`` param for that resource(s).
command: a command that, when run in the shell, builds an image puts it in the registry as ``ref``. Must produce an image named ``$EXPECTED_REF``
deps: a list of files or directories to be added as dependencies to this image. Tilt will watch those files and will rebuild the image when they change.
tag: Some tools can't change the image tag at runtime. They need a pre-specified tag. Tilt will set ``$EXPECTED_REF = image_name:tag``,
then re-tag it with its own tag before pushing to your cluster. See `the bazel guide <integrating_bazel_with_tilt.html>`_ for an example.
disable_push: whether Tilt should push the image in to the registry that the Kubernetes cluster has access to. Set this to true if your command handles pushing as well.
live_update: set of steps for updating a running container (see `Live Update documentation <live_update_reference.html>`_).
match_in_env_vars: specifies that k8s objects can reference this image in their environment variables, and Tilt will handle those variables the same as it usually handles a k8s container spec's `image`s.
ignore: set of file patterns that will be ignored. Ignored files will not trigger builds and will not be included in images. Follows the `dockerignore syntax <https://docs.docker.com/engine/reference/builder/#dockerignore-file>`_.
entrypoint: command to run when this container starts. Takes precedence over the container's ``CMD`` or ``ENTRYPOINT``, and over a `container command specified in k8s YAML <https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/>`_. Will be evaluated in a shell context: e.g. ``entrypoint="foo.sh bar"`` will be executed in the container as ``/bin/sh -c 'foo.sh bar'``.
"""
pass
class K8sObjectID:
"""
Attributes:
name (str): The object's name (e.g., `"my-service"`)
kind (str): The object's kind (e.g., `"deployment"`)
namespace (str): The object's namespace (e.g., `"default"`)
group (str): The object's group (e.g., `"apps"`)
"""
pass
def workload_to_resource_function(fn: Callable[[K8sObjectID], str]) -> None:
"""
(Only supported with :meth:`k8s_resource_assembly_version` >= 2(2))
Provide a function that will be used to name `Tilt resources <tiltfile_concepts.html#resources>`_.
Tilt will auto-generate resource names for you. If you do not like the names
it generates, you can use this to customize how Tilt generates names.
Example ::
# name all tilt resources after the k8s object namespace + name
def resource_name(id):
return id.namespace + '-' + id.name
workload_to_resource_function(resource_name)
The names it generates must be unique (i.e., two workloads can't map to the
same resource name).
Args:
fn: A function that takes a :class:`K8sObjectID` and returns a `str`.
Tilt will call this function once for each workload to determine that workload's resource's name.
"""
pass
def k8s_context() -> str:
"""Returns the name of the Kubernetes context Tilt is connecting to.
Example ::
if k8s_context() == 'prod':
fail("failing early to avoid overwriting prod")
"""
pass
def allow_k8s_contexts(contexts: Union[str, List[str]]) -> None:
"""Specifies that Tilt is allowed to run against the specified k8s context names.
To help reduce the chances you accidentally use Tilt to deploy to your
production cluster, Tilt will error unless at least one of these is true of
the active K8S context (i.e., what is returned by `kubectl config current-context`)
1. The K8S API URL is on localhost.
2. The context name is one of a few known local context names (e.g,. "minikube").
3. The context name is explicitly passed to `allow_k8s_contexts` in the Tiltfile.
Args:
contexts: a string or list of strings, specifying one or more k8s context
names that Tilt is allowed to run in. This list is in addition to
the default of all known-local clusters.
Example ::
allow_k8s_contexts('my-staging-cluster')
allow_k8s_contexts(['my-staging-cluster', 'gke_some-project-123456_us-central1-b_windmill'])
"""
pass
def enable_feature(feature_name: str) -> None:
"""Configures Tilt to enable non-default features (e.g., experimental or deprecated).
The Tilt features controlled by this are generally in an unfinished state, and
not yet documented.
As a Tiltfile author, you don't need to worry about this function unless something
else directs you to (e.g., an experimental feature doc, or a conversation with a
Tilt contributor).
As a Tiltfile reader, you can probably ignore this, or you can ask the person
who added it to the Tiltfile what it's doing there.
Args:
feature_name: name of the feature to enable
"""
| 48.957912 | 409 | 0.720505 |
7a5ef0d1d60184e8ef2a2b4a6360f59137607317 | 488 | py | Python | tests/conftest.py | odra/kelo | 22930954c6a75ba3e60ec07d258d65d13533b5b0 | [
"MIT"
] | null | null | null | tests/conftest.py | odra/kelo | 22930954c6a75ba3e60ec07d258d65d13533b5b0 | [
"MIT"
] | null | null | null | tests/conftest.py | odra/kelo | 22930954c6a75ba3e60ec07d258d65d13533b5b0 | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
def hello_world_fn():
def fn():
return 'hello world'
return fn
@pytest.fixture
def greetings_fn():
def fn(name):
return 'hello %s' % name
return fn
@pytest.fixture
def greetings_default_fn():
def fn(name='nobody'):
return 'hello %s' % name
return fn
@pytest.fixture
def complex_fn():
def fn(name, age=32, **kwargs):
return '%s is %s years old and lives in %s' % (name, age, kwargs.get('country', 'nowhere'))
return fn
| 16.266667 | 95 | 0.655738 |
e135cd6b39b52f0b22ce50b63cd173ae37028efc | 3,445 | py | Python | tensorflow_addons/layers/wrappers_test.py | tzachar/addons | e352207da32e4670a36a295ea477c476118cb0d9 | [
"Apache-2.0"
] | null | null | null | tensorflow_addons/layers/wrappers_test.py | tzachar/addons | e352207da32e4670a36a295ea477c476118cb0d9 | [
"Apache-2.0"
] | null | null | null | tensorflow_addons/layers/wrappers_test.py | tzachar/addons | e352207da32e4670a36a295ea477c476118cb0d9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_addons.layers import wrappers
from tensorflow_addons.utils import test_utils
@test_utils.run_all_in_graph_and_eager_modes
class WeightNormalizationTest(tf.test.TestCase):
def test_weightnorm(self):
test_utils.layer_test(
wrappers.WeightNormalization,
kwargs={
'layer': tf.keras.layers.Conv2D(5, (2, 2)),
},
input_shape=(2, 4, 4, 3))
def test_weightnorm_no_bias(self):
test_utils.layer_test(
wrappers.WeightNormalization,
kwargs={
'layer': tf.keras.layers.Dense(5, use_bias=False),
},
input_shape=(2, 4))
def _check_data_init(self, data_init, input_data, expected_output):
layer = tf.keras.layers.Dense(
input_data.shape[-1],
activation=None,
kernel_initializer='identity',
bias_initializer='zeros')
test_utils.layer_test(
wrappers.WeightNormalization,
kwargs={
'layer': layer,
'data_init': data_init,
},
input_data=input_data,
expected_output=expected_output)
def test_weightnorm_with_data_init_is_false(self):
input_data = np.array([[[-4, -4], [4, 4]]], dtype=np.float32)
self._check_data_init(
data_init=False, input_data=input_data, expected_output=input_data)
def test_weightnorm_with_data_init_is_true(self):
input_data = np.array([[[-4, -4], [4, 4]]], dtype=np.float32)
self._check_data_init(
data_init=True,
input_data=input_data,
expected_output=input_data / 4)
def test_weightnorm_non_layer(self):
images = tf.random.uniform((2, 4, 43))
with self.assertRaises(AssertionError):
wrappers.WeightNormalization(images)
def test_weightnorm_non_kernel_layer(self):
images = tf.random.uniform((2, 2, 2))
with self.assertRaisesRegexp(ValueError, 'contains a `kernel`'):
non_kernel_layer = tf.keras.layers.MaxPooling2D(2, 2)
wn_wrapper = wrappers.WeightNormalization(non_kernel_layer)
wn_wrapper(images)
def test_weightnorm_with_time_dist(self):
batch_shape = (32, 16, 64, 64, 3)
inputs = tf.keras.layers.Input(batch_shape=batch_shape)
a = tf.keras.layers.Conv2D(3, 5)
b = wrappers.WeightNormalization(a)
out = tf.keras.layers.TimeDistributed(b)(inputs)
model = tf.keras.Model(inputs, out)
if __name__ == "__main__":
tf.test.main()
| 36.263158 | 79 | 0.643832 |
556b08bb68385cd7490c54f3ae939835426ba2cd | 4,484 | py | Python | config/settings/base.py | siauPatrick/ztpd | cacc4510df07ceaf2422c2f87794a9b8df6c2421 | [
"BSD-3-Clause"
] | null | null | null | config/settings/base.py | siauPatrick/ztpd | cacc4510df07ceaf2422c2f87794a9b8df6c2421 | [
"BSD-3-Clause"
] | null | null | null | config/settings/base.py | siauPatrick/ztpd | cacc4510df07ceaf2422c2f87794a9b8df6c2421 | [
"BSD-3-Clause"
] | 1 | 2018-07-24T10:23:32.000Z | 2018-07-24T10:23:32.000Z | import environ
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('apps')
env = environ.Env()
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
DATABASES = {
'default': env.db('DATABASE_URL', default='sqlite:///ztpd.db'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'rest_framework',
]
LOCAL_APPS = [
'citizens.apps.CitizensConfig'
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# PASSWORDS
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
STATIC_URL = '/static/'
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# MEDIA
# ------------------------------------------------------------------------------
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
ADMIN_URL = r'^admin/'
ADMINS = [
('Roman Afanaskin', 'r.afanaskin@yandex.ru'),
]
MANAGERS = ADMINS
# Django REST framework
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Your stuff...
# ------------------------------------------------------------------------------
| 31.356643 | 98 | 0.51182 |
f94978e07c2c7aaaa6290fec98c797788189cf57 | 4,214 | py | Python | Python3/aoc2020/solutions/day_19.py | maddenvvs/advent-of-code-2020 | f0ed187698878102598e53f356ad47c26b5a44e1 | [
"MIT"
] | 1 | 2021-01-09T15:29:50.000Z | 2021-01-09T15:29:50.000Z | Python3/aoc2020/solutions/day_19.py | maddenvvs/advent-of-code-2020 | f0ed187698878102598e53f356ad47c26b5a44e1 | [
"MIT"
] | null | null | null | Python3/aoc2020/solutions/day_19.py | maddenvvs/advent-of-code-2020 | f0ed187698878102598e53f356ad47c26b5a44e1 | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Iterable, Iterator, List, Tuple
from .solution import Solution
RuleName = int
class Rule(ABC):
@abstractmethod
def try_match(self, message: str, idx: int) -> Iterator[int]:
pass
class MatchLetter(Rule):
__slots__ = "letter"
def __init__(self, letter: str):
self.letter = letter
def try_match(self, message: str, idx: int) -> Iterator[int]:
if idx < len(message) and message[idx] == self.letter:
yield idx + 1
class MatchRuleName(Rule):
__slots__ = "rules", "name"
rules: Rules
def __init__(self, rules: Rules, name: RuleName):
self.rules = rules
self.name = name
def try_match(self, message: str, idx: int) -> Iterator[int]:
yield from self.rules.graph[self.name].try_match(message, idx)
class And(Rule):
__slots__ = "rules"
def __init__(self, rules: Iterable[Rule]):
self.rules = rules
def try_match(self, message: str, idx: int) -> Iterator[int]:
next_idx = [idx]
for rule in self.rules:
new_idx = []
for i in next_idx:
new_idx += list(rule.try_match(message, i))
next_idx = new_idx
yield from next_idx
class Or(Rule):
__slots__ = "rules"
def __init__(self, rules: Iterable[Rule]):
self.rules = rules
def try_match(self, message: str, idx: int) -> Iterator[int]:
for rule in self.rules:
yield from rule.try_match(message, idx)
class Rules:
__slots__ = "graph"
def __init__(self):
self.graph = {}
def add_rule(self, name: RuleName, definition: Rule) -> None:
self.graph[name] = definition
def matches_rule(self, message: str, rule_name: RuleName) -> bool:
last_idx_match = list(self.graph[rule_name].try_match(message, 0))
return len(message) in last_idx_match
def make_changes_in_rules(self):
changes = ("8: 42 | 42 8\n"
"11: 42 31 | 42 11 31")
self.make_rules_change(changes)
def make_rules_change(self, new_rules: str) -> None:
Rules.populate_rules(self, new_rules)
@ classmethod
def from_str(cls, rules_text: str) -> Rules:
rules = cls()
cls.populate_rules(rules, rules_text)
return rules
@classmethod
def populate_rules(cls, rules: Rules, rules_text: str) -> None:
for rule_text in rules_text.splitlines():
rules.add_rule(*cls.parse_rule(rule_text, rules))
@ classmethod
def parse_rule(cls, rule_text: str, rules: Rules) -> Tuple[RuleName, Rule]:
name_str, def_str = rule_text.split(": ")
name = int(name_str, base=10)
alternatives = def_str.split(" | ")
if len(alternatives) > 1:
return name, Or([cls.parse_rule_definition(alt, rules) for alt in alternatives])
else:
return name, cls.parse_rule_definition(alternatives[0], rules)
@ classmethod
def parse_rule_definition(cls, definition_text: str, rules: Rules) -> Rule:
parts = definition_text.split()
if len(parts) > 1:
return And([MatchRuleName(rules, int(r, base=10)) for r in parts])
else:
if parts[0].startswith("\""):
return MatchLetter(parts[0][1])
return MatchRuleName(rules, int(parts[0], base=10))
def parse_input_messages(messages_text: str) -> Tuple[Rules, List[str]]:
rules_text, messages = messages_text.split("\n\n")
return Rules.from_str(rules_text), messages.splitlines()
def count_messages_match_rule_0(rules: Rules, messages: Iterable[str]) -> int:
return sum(rules.matches_rule(message, 0) for message in messages)
class Day19(Solution):
def first_task(self, messages_text: str) -> str:
rules, messages = parse_input_messages(messages_text)
return str(count_messages_match_rule_0(rules, messages))
def second_task(self, messages_text: str) -> str:
rules, messages = parse_input_messages(messages_text)
rules.make_changes_in_rules()
return str(count_messages_match_rule_0(rules, messages))
| 28.093333 | 92 | 0.637874 |
3888a5847651fb56006574ea8b2f87679cad5236 | 485 | py | Python | vmraid/tests/test_geo_ip.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/tests/test_geo_ip.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/tests/test_geo_ip.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
class TestGeoIP(unittest.TestCase):
def test_geo_ip(self):
return
from vmraid.sessions import get_geo_ip_country
self.assertEqual(get_geo_ip_country("223.29.223.255"), "India")
self.assertEqual(get_geo_ip_country("4.18.32.80"), "United States")
self.assertEqual(get_geo_ip_country("217.194.147.25"), "United States") | 37.307692 | 73 | 0.781443 |
1b2be6fb33e6e356070214c7b0073c0f57863606 | 796 | py | Python | setup.py | sasjonge/jupyter-knowrob | 1df408e86cae7629e57f531a1ca570a705b6ab50 | [
"MIT"
] | null | null | null | setup.py | sasjonge/jupyter-knowrob | 1df408e86cae7629e57f531a1ca570a705b6ab50 | [
"MIT"
] | null | null | null | setup.py | sasjonge/jupyter-knowrob | 1df408e86cae7629e57f531a1ca570a705b6ab50 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="jknowrob",
version="0.0.1",
author="Sascha Jongebloed",
author_email="sasjonge@uni-bremen.de",
description="A Jupyter Kernel for SWI-Prolog.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sasjonge/jupyer-knowrob.git",
packages=setuptools.find_packages(),
install_requires=[
"pyswip",
"ipykernel"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': ['jknowrobkernel=jknowrob.jupyter:main'],
}
)
| 27.448276 | 68 | 0.646985 |
2ec7907a050daaddea1428e684528c81dc3c6af5 | 6,667 | py | Python | _modules/jenkins_common.py | fuez/salt-formula-jenkins | 89d20172ba5b89bb734c02b99133335172ae406a | [
"Apache-2.0"
] | null | null | null | _modules/jenkins_common.py | fuez/salt-formula-jenkins | 89d20172ba5b89bb734c02b99133335172ae406a | [
"Apache-2.0"
] | null | null | null | _modules/jenkins_common.py | fuez/salt-formula-jenkins | 89d20172ba5b89bb734c02b99133335172ae406a | [
"Apache-2.0"
] | null | null | null | import logging
from salt.exceptions import SaltInvocationError
from string import Template
try:
import bcrypt
HAS_BCRYPT = True
except ImportError:
HAS_BCRYPT = False
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
logger = logging.getLogger(__name__)
def __virtual__():
'''
Only load if bcrypt and requests libraries exist.
'''
if not HAS_BCRYPT:
return (
False,
'Can not load module jenkins_common: bcrypt library not found')
if not HAS_REQUESTS:
return (
False,
'Can not load module jenkins_common: requests library not found')
return True
def call_groovy_script(script, props, username=None,
password=None, success_status_codes=[200]):
"""
Common method for call Jenkins groovy script API
:param script: groovy script template
:param props: groovy script properties
:param username: jenkins username (optional,
if missing creds from sall will be used)
:param password: jenkins password (optional,
if missing creds from sall will be used)
:param success_status_codes: success response status code
(optional) in some cases we want to declare error call as success
:returns: HTTP dict {status,code,msg}
"""
ret = {
"status": "FAILED",
"code": 999,
"msg": ""
}
jenkins_url, jenkins_user, jenkins_password = get_jenkins_auth()
if username:
jenkins_user = username
if password:
jenkins_password = password
if not jenkins_url:
raise SaltInvocationError('No Jenkins URL found.')
token_obj = get_api_crumb(jenkins_url, jenkins_user, jenkins_password)
req_data = {"script": render_groovy_script(script, props)}
headers = {}
if token_obj:
headers[token_obj["crumbRequestField"]] = token_obj["crumb"]
logger.debug("Calling Jenkins script API with URL: %s", jenkins_url)
req = requests.post('%s/scriptText' % jenkins_url,
auth=(jenkins_user, jenkins_password) if jenkins_user else None,
data=req_data, headers=headers)
ret["code"] = req.status_code
ret["msg"] = req.text
if req.status_code in success_status_codes:
ret["status"] = "SUCCESS"
logger.debug("Jenkins script API call success: %s", ret)
else:
logger.error("Jenkins script API call failed. \
Return code %s. Text: %s", req.status_code, req.text)
return ret
def render_groovy_script(script_template, props):
"""
Helper method for rendering groovy script with props
:param script_template: groovy script template
:param props: groovy script properties
:returns: generated groovy script
"""
template = Template(script_template)
return template.safe_substitute(props)
def get_api_crumb(jenkins_url=None, jenkins_user=None, jenkins_password=None):
"""
Obtains Jenkins API crumb, if CSRF protection is enabled.
Jenkins params can be given by params or not, if not,
params will be get from salt.
:param jenkins_url: Jenkins URL (optional)
:param jenkins_user: Jenkins admin username (optional)
:param jenkins_password: Jenkins admin password (optional)
:returns: salt-specified state dict
"""
if not jenkins_url:
jenkins_url, jenkins_user, jenkins_password = get_jenkins_auth()
logger.debug("Obtaining Jenkins API crumb for URL: %s", jenkins_url)
tokenReq = requests.get("%s/crumbIssuer/api/json" % jenkins_url,
auth=(jenkins_user, jenkins_password) if jenkins_user else None)
if tokenReq.status_code == 200:
logger.debug("Got Jenkins API crumb: %s", tokenReq.json())
return tokenReq.json()
elif tokenReq.status_code in [404, 401, 502, 503]:
# 404 means CSRF security is disabled, so api crumb is not necessary,
# 401 means unauthorized
# 50x means jenkins is unavailabe - fail in call_groovy_script, but
# not here, to handle exception in state
logger.debug("Got error %s: %s", str(tokenReq.status_code), tokenReq.reason)
return None
else:
raise Exception("Cannot obtain Jenkins API crumb. Status code: %s. Text: %s" %
(tokenReq.status_code, tokenReq.text))
def get_jenkins_auth():
"""
Get jenkins params from salt
"""
jenkins_url = __salt__['config.get']('jenkins.url') or \
__salt__['config.get']('jenkins:url') or \
__salt__['pillar.get']('jenkins.url')
jenkins_user = __salt__['config.get']('jenkins.user') or \
__salt__['config.get']('jenkins:user') or \
__salt__['pillar.get']('jenkins.user')
jenkins_password = __salt__['config.get']('jenkins.password') or \
__salt__['config.get']('jenkins:password') or \
__salt__['pillar.get']('jenkins.password')
return (jenkins_url, jenkins_user, jenkins_password)
def encode_password(password):
"""
Hash plaintext password by jenkins bcrypt algorithm
:param password: plain-text password
:returns: bcrypt hashed password
"""
if isinstance(password, str):
return bcrypt.hashpw(password, bcrypt.gensalt(prefix=b"2a"))
def load_template(salt_url, env):
"""
Return content of file `salt_url`
"""
template_path = __salt__['cp.cache_file'](salt_url, env)
with open(template_path, 'r') as template_file:
template = template_file.read()
return template
def api_call(name, template, success_msgs, params, display_name):
test = __opts__['test'] # noqa
ret = {
'name': name,
'changes': {},
'result': False,
'comment': '',
}
result = False
if test:
status = success_msgs[0]
ret['changes'][name] = status
ret['comment'] = '%s "%s" %s' % (display_name, name, status.lower())
else:
call_result = call_groovy_script(template, params)
if call_result["code"] == 200 and call_result["msg"].strip() in success_msgs:
status = call_result["msg"]
if status == success_msgs[0]:
ret['changes'][name] = status
ret['comment'] = '%s "%s" %s' % (display_name, name, status.lower())
result = True
else:
status = 'FAILED'
logger.error(
'Jenkins API call failure: %s', call_result["msg"])
ret['comment'] = 'Jenkins API call failure: %s' % (call_result[
"msg"])
ret['result'] = None if test else result
return ret
| 33.671717 | 92 | 0.639268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.