hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4881f767953993d7e59e17538dbfd7fda1e5c264 | 983 | py | Python | cavirtex/__init__.py | kyonetca/cavirtex-sdk-python | 93f822474df7ddfebe18190f4cdd13faf9a37d84 | [
"MIT"
] | null | null | null | cavirtex/__init__.py | kyonetca/cavirtex-sdk-python | 93f822474df7ddfebe18190f4cdd13faf9a37d84 | [
"MIT"
] | null | null | null | cavirtex/__init__.py | kyonetca/cavirtex-sdk-python | 93f822474df7ddfebe18190f4cdd13faf9a37d84 | [
"MIT"
] | 1 | 2021-01-18T21:01:56.000Z | 2021-01-18T21:01:56.000Z | __author__ = '''
Dawson Reid (dreid93@gmail.com)
'''
# configure logging
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_exsisting_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'
},
'coloured': {
'()': 'colorlog.ColoredFormatter',
'format': "[%(asctime)s] {%(filename)s:%(lineno)d} %(log_color)s%(levelname)s%(reset)s - %(purple)s%(name)s%(reset)s - %(message)s",
'log_colors' : {
'DEBUG': 'cyan',
'INFO': 'green',
'WARN': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red'
}
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 0,
'formatter': 'coloured',
'stream': 'ext://sys.stdout'
}
},
'root': {
'level': 0,
'handlers': ['console']
}
})
from .public import orderbook, tradebook, ticker
from .private import User
| 23.404762 | 138 | 0.547304 |
7d4dc70410ab4d733745d6ee1f430869e2d31708 | 2,222 | py | Python | athene/api/actions.py | alkurbatov/athene | 867797f7f7888ffab73a041eb17ec1b3753199bc | [
"MIT"
] | 3 | 2018-08-27T10:49:41.000Z | 2019-01-29T14:55:45.000Z | athene/api/actions.py | alkurbatov/athene | 867797f7f7888ffab73a041eb17ec1b3753199bc | [
"MIT"
] | null | null | null | athene/api/actions.py | alkurbatov/athene | 867797f7f7888ffab73a041eb17ec1b3753199bc | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2017-2018 Alexander Kurbatov
"""A set of actions definitions."""
import enum
from collections import namedtuple
# General
ACTION_ATTACK = 'attack'
ACTION_DO_NOTHING = 'donothing'
ACTION_HARVEST_MINERALS = 'harvestminerals'
ACTION_MOVE_TO_BEACON = 'movetobeacon'
ACTION_SELECT_IDLE_WORKER = 'select_idle_worker'
# Terran
ACTION_BUILD_COMMAND_CENTER = 'buildcc'
ACTION_BUILD_REFINERY = 'buildrefinery'
ACTION_BUILD_SUPPLY = 'buildsupply'
ACTION_SELECT_MARINE = 'selectmarine'
ACTION_SELECT_COMMAND_CENTER = 'selectcc'
ACTION_SELECT_SCV = 'selectscv'
ACTION_TRAIN_SCV = 'trainscv'
# Zerg
ACTION_BUILD_SPAWNING_POOL = 'buildspawningpool'
ACTION_SPAWN_DRONE = 'spawndrone'
ACTION_SPAWN_OVERLORD = 'spawnoverlord'
ACTION_SPAWN_ZERGLINGS = 'spawnzerglings'
class Cost(namedtuple('Cost', ['minerals', 'vespene', 'food'])):
def __new__(cls, minerals=0, vespene=0, food=0):
return super(Cost, cls).__new__(cls, minerals, vespene, food)
COSTS = {
ACTION_BUILD_COMMAND_CENTER: Cost(minerals=400),
ACTION_BUILD_REFINERY: Cost(minerals=75),
ACTION_TRAIN_SCV: Cost(minerals=50, food=1),
ACTION_BUILD_SUPPLY: Cost(minerals=100),
}
@enum.unique
class Stages(enum.IntEnum):
"""Many tasks are done in two or more steps (stages), e.g.
#1 - Select unit.
#2 - Issue order.
This enumeration contains predefined values for the stages.
"""
CHOOSE_ACTION = 0
SELECT_UNIT = 1
ISSUE_ORDER = 2
def can(obs, action_id):
"""Returns True if the specified action is available."""
return action_id in obs.observation.available_actions
def cannot(obs, action_id):
"""Returns True if the specified action is not available."""
return not can(obs, action_id)
def cannot_afford(obs, action_id):
"""Returns True if there is not enough resources for the specified
action (e.g. to create a building or train a unit).
"""
cost = COSTS.get(action_id, None)
if not cost:
return True
return obs.observation.player.minerals < cost.minerals or \
obs.observation.player.vespene < cost.vespene or \
obs.observation.player.food_cap - obs.observation.player.food_used < cost.food
| 26.771084 | 89 | 0.729073 |
b3f7c13a3cdfd887bf058e86836d3c0f3596085e | 11,678 | py | Python | demo/demo_dag.py | plynx-team/demo-nlp | 51d1b3272b92d342035db1dfd26294b368827ce3 | [
"Apache-2.0"
] | null | null | null | demo/demo_dag.py | plynx-team/demo-nlp | 51d1b3272b92d342035db1dfd26294b368827ce3 | [
"Apache-2.0"
] | 1 | 2022-03-08T01:08:51.000Z | 2022-03-08T01:08:51.000Z | demo/demo_dag.py | plynx-team/demo-nlp | 51d1b3272b92d342035db1dfd26294b368827ce3 | [
"Apache-2.0"
] | null | null | null | from plynx.plugins.executors import dag
import plynx.db.node
class DemoDAG(dag.DAG):
def __init__(self, *args, **argv):
super(DemoDAG, self).__init__(*args, **argv)
@classmethod
def get_default_node(cls, is_workflow):
node = super().get_default_node(is_workflow)
nodes_parameter = node.parameters[0]
null = None
true = True
false = False
nodes_parameter.value.value = [
plynx.db.node.Node.from_dict({
"_id": "5e66fc2df1e5b88d18f5c14f",
"title": "Word2Vec",
"description": "Find similar words",
"kind": "demo-python-node-operation",
"parent_node_id": null,
"successor_node_id": null,
"original_node_id": "5e66fbef2fb8847bfdb495d1",
"inputs": [
{
"input_references": [
{
"node_id": "5e66fca5f1e5b88d18f5c23a",
"output_id": "words"
}
],
"name": "positives",
"file_type": "json",
"values": [],
"is_array": true,
"min_count": 0
},
{
"input_references": [
{
"node_id": "5e66fcc4f1e5b88d18f5c278",
"output_id": "words"
}
],
"name": "negatives",
"file_type": "json",
"values": [],
"is_array": true,
"min_count": 0
}
],
"outputs": [
{
"name": "result",
"file_type": "json",
"values": [],
"is_array": false,
"min_count": 1
}
],
"parameters": [
{
"name": "_cmd",
"parameter_type": "code",
"value": {
"value": "import json\nimport pickle\nimport os\n\n\ndef get_words(filenames):\n res = []\n for filename in filenames:\n with open(filename) as f:\n res.extend(json.load(f))\n return res\n\npositive = get_words(inputs['positives'])\nnegative = get_words(inputs['negatives'])\n\nwith open(os.path.join('/models', params['model']), 'rb') as f:\n model = pickle.load(f)\n\nres = model.most_similar(positive=positive, negative=negative)\n\nwith open(outputs['result'], 'w') as f:\n json.dump(res, f)",
"mode": "python"
},
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_cacheable",
"parameter_type": "bool",
"value": true,
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_timeout",
"parameter_type": "int",
"value": 600,
"mutable_type": false,
"removable": false,
"publicable": true,
"widget": null,
"reference": null
},
{
"name": "model",
"parameter_type": "enum",
"value": {
"values": [
"glove-twitter-200",
"glove-twitter-25"
],
"index": "0"
},
"mutable_type": true,
"removable": true,
"publicable": true,
"widget": "model",
"reference": null
}
],
"logs": [
{
"name": "stderr",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "stdout",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "worker",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
}
],
"node_running_status": "CREATED",
"node_status": "READY",
"cache_url": "",
"x": 282,
"y": 142,
"author": "5e5dd9c7653752f34e1e0be6",
"starred": false
}),
plynx.db.node.Node.from_dict({
"_id": "5e66fca5f1e5b88d18f5c23a",
"title": "Words to list",
"description": "Positive words",
"kind": "basic-python-node-operation",
"parent_node_id": null,
"successor_node_id": null,
"original_node_id": "5e66eaafe85256834fda470a",
"inputs": [],
"outputs": [
{
"name": "words",
"file_type": "json",
"values": [],
"is_array": false,
"min_count": 1
}
],
"parameters": [
{
"name": "_cmd",
"parameter_type": "code",
"value": {
"value": "import json\n\nwith open(outputs['words'], 'w') as f:\n json.dump(params['words'], f)",
"mode": "python"
},
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_cacheable",
"parameter_type": "bool",
"value": true,
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_timeout",
"parameter_type": "int",
"value": 600,
"mutable_type": false,
"removable": false,
"publicable": true,
"widget": null,
"reference": null
},
{
"name": "words",
"parameter_type": "list_str",
"value": [
"king",
"woman"
],
"mutable_type": true,
"removable": true,
"publicable": true,
"widget": "words",
"reference": null
}
],
"logs": [
{
"name": "stderr",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "stdout",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "worker",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
}
],
"node_running_status": "CREATED",
"node_status": "READY",
"cache_url": "",
"x": 30,
"y": 80,
"author": "5e5dd9c7653752f34e1e0be6",
"starred": false
}),
plynx.db.node.Node.from_dict({
"_id": "5e66fcc4f1e5b88d18f5c278",
"title": "Words to list",
"description": "Negative words",
"kind": "basic-python-node-operation",
"parent_node_id": null,
"successor_node_id": null,
"original_node_id": "5e66eaafe85256834fda470a",
"inputs": [],
"outputs": [
{
"name": "words",
"file_type": "json",
"values": [],
"is_array": false,
"min_count": 1
}
],
"parameters": [
{
"name": "_cmd",
"parameter_type": "code",
"value": {
"value": "import json\n\nwith open(outputs['words'], 'w') as f:\n json.dump(params['words'], f)",
"mode": "python"
},
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_cacheable",
"parameter_type": "bool",
"value": true,
"mutable_type": false,
"removable": false,
"publicable": false,
"widget": null,
"reference": null
},
{
"name": "_timeout",
"parameter_type": "int",
"value": 600,
"mutable_type": false,
"removable": false,
"publicable": true,
"widget": null,
"reference": null
},
{
"name": "words",
"parameter_type": "list_str",
"value": [
"man"
],
"mutable_type": true,
"removable": true,
"publicable": true,
"widget": "words",
"reference": null
}
],
"logs": [
{
"name": "stderr",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "stdout",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
},
{
"name": "worker",
"file_type": "file",
"values": [],
"is_array": false,
"min_count": 1
}
],
"node_running_status": "CREATED",
"node_status": "READY",
"cache_url": "",
"x": 30,
"y": 225,
"author": "5e5dd9c7653752f34e1e0be6",
"starred": false
}),
]
node.title = 'Word2vec'
node.title = 'A + B - C example'
return node
| 33.947674 | 550 | 0.340041 |
2d22422f51418a20ef6074f08641e0873e3e01a5 | 135,711 | py | Python | old_projects/waves.py | ishandutta2007/manim | 55674f64a5b4b3bf1ab05243410e81a94eca1e26 | [
"MIT"
] | null | null | null | old_projects/waves.py | ishandutta2007/manim | 55674f64a5b4b3bf1ab05243410e81a94eca1e26 | [
"MIT"
] | null | null | null | old_projects/waves.py | ishandutta2007/manim | 55674f64a5b4b3bf1ab05243410e81a94eca1e26 | [
"MIT"
] | null | null | null | from big_ol_pile_of_manim_imports import *
E_COLOR = BLUE
M_COLOR = YELLOW
class OscillatingVector(ContinualAnimation):
CONFIG = {
"tail" : ORIGIN,
"frequency" : 1,
"A_vect" : [1, 0, 0],
"phi_vect" : [0, 0, 0],
"vector_to_be_added_to" : None,
}
def setup(self):
self.vector = self.mobject
def update_mobject(self, dt):
f = self.frequency
t = self.internal_time
angle = 2*np.pi*f*t
vect = np.array([
A*np.exp(complex(0, angle + phi))
for A, phi in zip(self.A_vect, self.phi_vect)
]).real
self.update_tail()
self.vector.put_start_and_end_on(self.tail, self.tail+vect)
def update_tail(self):
if self.vector_to_be_added_to is not None:
self.tail = self.vector_to_be_added_to.get_end()
class OscillatingVectorComponents(ContinualAnimationGroup):
CONFIG = {
"tip_to_tail" : False,
}
def __init__(self, oscillating_vector, **kwargs):
digest_config(self, kwargs)
vx = Vector(UP, color = GREEN).fade()
vy = Vector(UP, color = RED).fade()
kwargs = {
"frequency" : oscillating_vector.frequency,
"tail" : oscillating_vector.tail,
}
ovx = OscillatingVector(
vx,
A_x = oscillating_vector.A_x,
phi_x = oscillating_vector.phi_x,
A_y = 0,
phi_y = 0,
**kwargs
)
ovy = OscillatingVector(
vy,
A_x = 0,
phi_x = 0,
A_y = oscillating_vector.A_y,
phi_y = oscillating_vector.phi_y,
**kwargs
)
components = [ovx, ovy]
self.vectors = VGroup(ovx.vector, ovy.vector)
if self.tip_to_tail:
ovy.vector_to_be_added_to = ovx.vector
else:
self.lines = VGroup()
for ov1, ov2 in (ovx, ovy), (ovy, ovx):
ov_line = ov1.copy()
ov_line.mobject = ov_line.vector = DashedLine(
UP, DOWN, color = ov1.vector.get_color()
)
ov_line.vector_to_be_added_to = ov2.vector
components.append(ov_line)
self.lines.add(ov_line.line)
ContinualAnimationGroup.__init__(self, *components, **kwargs)
class EMWave(ContinualAnimationGroup):
CONFIG = {
"wave_number" : 1,
"frequency" : 0.25,
"n_vectors" : 40,
"propogation_direction" : RIGHT,
"start_point" : FRAME_X_RADIUS*LEFT + DOWN + OUT,
"length" : FRAME_WIDTH,
"amplitude" : 1,
"rotation" : 0,
"A_vect" : [0, 0, 1],
"phi_vect" : [0, 0, 0],
"requires_start_up" : False,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
if not all(self.propogation_direction == RIGHT):
self.matrix_transform = np.dot(
z_to_vector(self.propogation_direction),
np.linalg.inv(z_to_vector(RIGHT)),
)
else:
self.matrix_transform = None
vector_oscillations = []
self.E_vects = VGroup()
self.M_vects = VGroup()
self.A_vect = np.array(self.A_vect)/np.linalg.norm(self.A_vect)
self.A_vect *= self.amplitude
for alpha in np.linspace(0, 1, self.n_vectors):
tail = interpolate(ORIGIN, self.length*RIGHT, alpha)
phase = -alpha*self.length*self.wave_number
kwargs = {
"phi_vect" : np.array(self.phi_vect) + phase,
"frequency" : self.frequency,
"tail" : np.array(tail),
}
E_ov = OscillatingVector(
Vector(
OUT, color = E_COLOR,
normal_vector = UP,
),
A_vect = self.A_vect,
**kwargs
)
M_ov = OscillatingVector(
Vector(
UP, color = M_COLOR,
normal_vector = OUT,
),
A_vect = rotate_vector(self.A_vect, np.pi/2, RIGHT),
**kwargs
)
vector_oscillations += [E_ov, M_ov]
self.E_vects.add(E_ov.vector)
self.M_vects.add(M_ov.vector)
ContinualAnimationGroup.__init__(self, *vector_oscillations)
def update_mobject(self, dt):
if self.requires_start_up:
n_wave_lengths = self.length / (2*np.pi*self.wave_number)
prop_time = n_wave_lengths/self.frequency
middle_alpha = interpolate(
0.4, 1.4,
self.external_time / prop_time
)
new_smooth = squish_rate_func(smooth, 0.4, 0.6)
ovs = self.continual_animations
for ov, alpha in zip(ovs, np.linspace(0, 1, len(ovs))):
epsilon = 0.0001
new_amplitude = np.clip(
new_smooth(middle_alpha - alpha), epsilon, 1
)
norm = np.linalg.norm(ov.A_vect)
if norm != 0:
ov.A_vect = new_amplitude * np.array(ov.A_vect) / norm
ContinualAnimationGroup.update_mobject(self, dt)
self.mobject.rotate(self.rotation, RIGHT)
if self.matrix_transform:
self.mobject.apply_matrix(self.matrix_transform)
self.mobject.shift(self.start_point)
class WavePacket(Animation):
CONFIG = {
"EMWave_config" : {
"wave_number" : 0,
"start_point" : FRAME_X_RADIUS*LEFT,
"phi_vect" : np.ones(3)*np.pi/4,
},
"em_wave" : None,
"run_time" : 4,
"rate_func" : None,
"packet_width" : 6,
"include_E_vects" : True,
"include_M_vects" : True,
"filter_distance" : FRAME_X_RADIUS,
"get_filtered" : False,
"remover" : True,
"width" : 2*np.pi,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
em_wave = self.em_wave
if em_wave is None:
em_wave = EMWave(**self.EMWave_config)
em_wave.update(0)
self.em_wave = em_wave
self.vects = VGroup()
if self.include_E_vects:
self.vects.add(*em_wave.E_vects)
if self.include_M_vects:
self.vects.add(*em_wave.M_vects)
for vect in self.vects:
vect.save_state()
u = em_wave.propogation_direction
self.wave_packet_start, self.wave_packet_end = [
em_wave.start_point - u*self.packet_width/2,
em_wave.start_point + u*(em_wave.length + self.packet_width/2)
]
Animation.__init__(self, self.vects, **kwargs)
def update_mobject(self, alpha):
packet_center = interpolate(
self.wave_packet_start,
self.wave_packet_end,
alpha
)
em_wave = self.em_wave
for vect in self.vects:
tail = vect.get_start()
distance_from_packet = np.dot(
tail - packet_center,
em_wave.propogation_direction
)
A = em_wave.amplitude*self.E_func(distance_from_packet)
distance_from_start = np.linalg.norm(tail - em_wave.start_point)
if self.get_filtered and distance_from_start > self.filter_distance:
A = 0
epsilon = 0.05
if abs(A) < epsilon:
A = 0
vect.restore()
vect.scale(A/vect.get_length(), about_point = tail)
def E_func(self, x):
x0 = 2*np.pi*x/self.width
return np.sin(x0)*np.exp(-0.25*x0*x0)
class FilterLabel(TexMobject):
def __init__(self, tex, degrees, **kwargs):
TexMobject.__init__(self, tex + " \\uparrow", **kwargs)
self[-1].rotate(-degrees * np.pi / 180)
class PolarizingFilter(Circle):
CONFIG = {
"stroke_color" : DARK_GREY,
"fill_color" : LIGHT_GREY,
"fill_opacity" : 0.5,
"label_tex" : None,
"filter_angle" : 0,
"include_arrow_label" : True,
"arrow_length" : 0.7,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
if self.label_tex:
self.label = TexMobject(self.label_tex)
self.label.next_to(self.get_top(), DOWN, MED_SMALL_BUFF)
self.add(self.label)
arrow = Arrow(
ORIGIN, self.arrow_length*UP,
color = WHITE,
buff = 0,
)
arrow.shift(self.get_top())
arrow.rotate(-self.filter_angle)
self.add(arrow)
self.arrow = arrow
shade_in_3d(self)
if self.include_arrow_label:
arrow_label = TexMobject(
"%.1f^\\circ"%(self.filter_angle*180/np.pi)
)
arrow_label.add_background_rectangle()
arrow_label.next_to(arrow.get_tip(), UP)
self.add(arrow_label)
self.arrow_label = arrow_label
################
class FilterScene(ThreeDScene):
CONFIG = {
"filter_x_coordinates" : [0],
"pol_filter_configs" : [{}],
"EMWave_config" : {
"start_point" : FRAME_X_RADIUS*LEFT + DOWN+OUT
},
"axes_config" : {},
"start_phi" : 0.8*np.pi/2,
"start_theta" : -0.6*np.pi,
"ambient_rotation_rate" : 0.01,
}
def setup(self):
self.axes = ThreeDAxes(**self.axes_config)
self.add(self.axes)
for x in range(len(self.filter_x_coordinates) - len(self.pol_filter_configs)):
self.pol_filter_configs.append({})
self.pol_filters = VGroup(*[
PolarizingFilter(**config)
for config in self.pol_filter_configs
])
self.pol_filters.rotate(np.pi/2, RIGHT)
self.pol_filters.rotate(-np.pi/2, OUT)
pol_filter_shift = np.array(self.EMWave_config["start_point"])
pol_filter_shift[0] = 0
self.pol_filters.shift(pol_filter_shift)
for x, pf in zip(self.filter_x_coordinates, self.pol_filters):
pf.shift(x*RIGHT)
self.add(self.pol_filters)
self.pol_filter = self.pol_filters[0]
self.set_camera_position(self.start_phi, self.start_theta)
if self.ambient_rotation_rate > 0:
self.begin_ambient_camera_rotation(self.ambient_rotation_rate)
def get_filter_absorption_animation(self, pol_filter, photon):
x = pol_filter.get_center()[0]
alpha = (x + FRAME_X_RADIUS) / (FRAME_WIDTH)
return ApplyMethod(
pol_filter.set_fill, RED,
run_time = photon.run_time,
rate_func = squish_rate_func(there_and_back, alpha - 0.1, alpha + 0.1)
)
class DirectionOfPolarizationScene(FilterScene):
CONFIG = {
"pol_filter_configs" : [{
"include_arrow_label" : False,
}],
"target_theta" : -0.97*np.pi,
"target_phi" : 0.9*np.pi/2,
"ambient_rotation_rate" : 0.005,
"apply_filter" : False,
"quantum" : False,
}
def setup(self):
self.reference_line = Line(ORIGIN, RIGHT)
self.reference_line.set_stroke(width = 0)
self.em_wave = EMWave(**self.EMWave_config)
self.add(self.em_wave)
FilterScene.setup(self)
def change_polarization_direction(self, angle, **kwargs):
added_anims = kwargs.get("added_anims", [])
self.play(
ApplyMethod(
self.reference_line.rotate, angle,
**kwargs
),
*added_anims
)
def setup_rectangles(self):
rect1 = Rectangle(
height = 2*self.em_wave.amplitude,
width = FRAME_X_RADIUS + 0.25,
stroke_color = BLUE,
fill_color = BLUE,
fill_opacity = 0.2,
)
rect1.rotate(np.pi/2, RIGHT)
pf_copy = self.pol_filter.deepcopy()
pf_copy.remove(pf_copy.arrow)
center = pf_copy.get_center()
rect1.move_to(center, RIGHT)
rect2 = rect1.copy()
rect2.move_to(center, LEFT)
self.rectangles = VGroup(rect1, rect2)
def continual_update(self, *args, **kwargs):
reference_angle = self.reference_line.get_angle()
self.em_wave.rotation = reference_angle
FilterScene.continual_update(self, *args, **kwargs)
if self.apply_filter:
self.apply_filters()
self.update_rectangles()
def apply_filters(self):
vect_groups = [self.em_wave.E_vects, self.em_wave.M_vects]
filters = sorted(
self.pol_filters,
lambda pf1, pf2 : cmp(
pf1.get_center()[0],
pf2.get_center()[0],
)
)
for pol_filter in filters:
filter_x = pol_filter.arrow.get_center()[0]
for vect_group, angle in zip(vect_groups, [0, -np.pi/2]):
target_angle = pol_filter.filter_angle + angle
for vect_mob in vect_group:
vect = vect_mob.get_vector()
vect_angle = angle_of_vector([
vect[2], -vect[1]
])
angle_diff = target_angle - vect_angle
angle_diff = (angle_diff+np.pi/2)%np.pi - np.pi/2
start, end = vect_mob.get_start_and_end()
if start[0] > filter_x:
vect_mob.rotate(angle_diff, RIGHT)
if not self.quantum:
vect_mob.scale(
np.cos(angle_diff),
about_point = start,
)
def update_rectangles(self):
if not hasattr(self, "rectangles") or self.rectangles not in self.mobjects:
return
r1, r2 = self.rectangles
target_angle = self.reference_line.get_angle()
anchors = r1.get_anchors()
vect = anchors[0] - anchors[3]
curr_angle = angle_of_vector([vect[2], -vect[1]])
r1.rotate_in_place(target_angle - curr_angle, RIGHT)
epsilon = 0.001
curr_depth = max(r2.get_depth(), epsilon)
target_depth = max(
2*self.em_wave.amplitude*abs(np.cos(target_angle)),
epsilon
)
r2.stretch_in_place(target_depth/curr_depth, 2)
################
class WantToLearnQM(TeacherStudentsScene):
def construct(self):
question1 = TexMobject(
"\\text{What does }\\qquad \\\\",
"|\\!\\psi \\rangle", "=",
"\\frac{1}{\\sqrt{2}}", "|\\!\\uparrow \\rangle", "+",
"\\frac{1}{\\sqrt{2}}", "|\\!\\downarrow \\rangle \\\\",
"\\text{mean?}\\qquad\\quad"
)
question1.set_color_by_tex_to_color_map({
"psi" : BLUE,
"uparrow" : GREEN,
"downarrow" : RED,
})
question2 = TextMobject(
"Why are complex \\\\ numbers involved?"
)
question3 = TextMobject(
"How do you compute \\\\ quantum probabilities?"
)
questions = [question1, question2, question3]
bubbles = VGroup()
for i, question in zip([1, 2, 0], questions):
self.student_says(
question,
content_introduction_kwargs = {"run_time" : 2},
student_index = i,
bubble_kwargs = {"fill_opacity" : 1},
bubble_creation_class = FadeIn,
)
bubble = self.students[i].bubble
bubble.add(bubble.content)
bubbles.add(bubble)
self.students
self.students[i].bubble = None
self.wait(2)
self.teacher_says(
"First, lots and lots \\\\ of linear algebra",
added_anims = map(FadeOut, bubbles)
)
self.wait()
class Goal(PiCreatureScene):
def construct(self):
randy = self.pi_creature
goal = TextMobject("Goal: ")
goal.set_color(YELLOW)
goal.shift(FRAME_X_RADIUS*LEFT/2 + UP)
weirdness = TextMobject("Eye-catching quantum weirdness")
weirdness.next_to(goal, RIGHT)
cross = Cross(weirdness)
foundations = TextMobject("Foundational intuitions")
foundations.next_to(goal, RIGHT)
goal.save_state()
goal.scale(0.01)
goal.move_to(randy.get_right())
self.play(
goal.restore,
randy.change, "raise_right_hand"
)
self.play(Write(weirdness, run_time = 2))
self.play(
ShowCreation(cross),
randy.change, "sassy"
)
self.wait()
self.play(
VGroup(weirdness, cross).shift, DOWN,
Write(foundations, run_time = 2),
randy.change, "happy"
)
self.wait(2)
####
def create_pi_creature(self):
return Randolph().to_corner(DOWN+LEFT)
class AskWhatsDifferentInQM(TeacherStudentsScene):
def construct(self):
self.student_says(
"What's different in \\\\ quantum mechanics?"
)
self.play(self.teacher.change, "pondering")
self.wait(3)
class VideoWrapper(Scene):
CONFIG = {
"title" : ""
}
def construct(self):
title = TextMobject(self.title)
title.to_edge(UP)
self.add(title)
rect = ScreenRectangle()
rect.scale_to_fit_height(6)
rect.next_to(title, DOWN)
self.add(rect)
self.wait()
class BellsWrapper(VideoWrapper):
CONFIG = {
"title" : "Bell's inequalities"
}
class FromOtherVideoWrapper(VideoWrapper):
CONFIG = {
"title" : "See the other video..."
}
class OriginOfQuantumMechanicsWrapper(VideoWrapper):
CONFIG = {
"title" : "The origin of quantum mechanics"
}
class IntroduceElectricField(PiCreatureScene):
CONFIG = {
"vector_field_colors" : [BLUE_B, BLUE_D],
"max_vector_length" : 0.9,
}
def construct(self):
self.write_title()
self.draw_field()
self.add_particle()
self.let_particle_wander()
def write_title(self):
morty = self.pi_creature
title = TextMobject(
"Electro", "magnetic", " field",
arg_separator = ""
)
title.next_to(morty, UP+LEFT)
electric = TextMobject("Electric")
electric.next_to(title[-1], LEFT)
electric.set_color(BLUE)
title.save_state()
title.shift(DOWN)
title.fade(1)
self.play(
title.restore,
morty.change, "raise_right_hand",
)
self.play(
title[0].set_color, BLUE,
title[1].set_color, YELLOW,
)
self.wait()
self.play(
ShrinkToCenter(title[1]),
Transform(title[0], electric)
)
title.add_background_rectangle()
self.title = title
def draw_field(self):
morty = self.pi_creature
vector_field = self.get_vector_field()
self.play(
LaggedStart(
ShowCreation, vector_field,
run_time = 3
),
self.title.center,
self.title.scale, 1.5,
self.title.to_edge, UP,
morty.change, "happy", ORIGIN,
)
self.wait()
self.vector_field = vector_field
def add_particle(self):
morty = self.pi_creature
point = UP+LEFT + SMALL_BUFF*(UP+RIGHT)
particle = self.get_particle()
particle.move_to(point)
vector = self.get_vector(particle.get_center())
vector.set_color(RED)
vector.scale(1.5, about_point = point)
vector.shift(SMALL_BUFF*vector.get_vector())
force = TextMobject("Force")
force.next_to(ORIGIN, UP+RIGHT, SMALL_BUFF)
force.rotate(vector.get_angle())
force.shift(vector.get_start())
particle.save_state()
particle.move_to(morty.get_left() + 0.5*UP + 0.2*RIGHT)
particle.fade(1)
self.play(
particle.restore,
morty.change, "raise_right_hand",
)
self.play(morty.change, "thinking", particle)
self.play(
ShowCreation(vector),
Write(force, run_time = 1),
)
self.wait(2)
self.particle = particle
self.force_vector = VGroup(vector, force)
def let_particle_wander(self):
possible_points = [v.get_start() for v in self.vector_field]
points = random.sample(possible_points, 45)
points.append(3*UP+3*LEFT)
particles = VGroup(self.particle, *[
self.particle.copy().move_to(point)
for point in points
])
for particle in particles:
particle.velocity = np.zeros(3)
self.play(
FadeOut(self.force_vector),
LaggedStart(FadeIn, VGroup(*particles[1:]))
)
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles, self.pi_creature)
self.always_continually_update = True
self.wait(10)
###
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
vect = self.field_function(p.get_center())
p.velocity += vect*dt
p.shift(p.velocity*dt)
self.pi_creature.look_at(self.moving_particles[-1])
def get_particle(self):
particle = Circle(radius = 0.2)
particle.set_stroke(RED, 3)
particle.set_fill(RED, 0.5)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(particle)
particle.add(plus)
return particle
def get_vector_field(self):
result = VGroup(*[
self.get_vector(point)
for x in np.arange(-9, 9)
for y in np.arange(-5, 5)
for point in [x*RIGHT + y*UP]
])
shading_list = list(result)
shading_list.sort(
lambda m1, m2 : cmp(m1.get_length(), m2.get_length())
)
VGroup(*shading_list).set_color_by_gradient(*self.vector_field_colors)
result.set_fill(opacity = 0.75)
result.sort_submobjects(np.linalg.norm)
return result
def get_vector(self, point):
return Vector(self.field_function(point)).shift(point)
def field_function(self, point):
x, y = point[:2]
result = y*RIGHT + np.sin(x)*UP
return self.normalized(result)
def normalized(self, vector):
norm = np.linalg.norm(vector) or 1
target_length = self.max_vector_length * sigmoid(0.1*norm)
return target_length * vector/norm
class IntroduceMagneticField(IntroduceElectricField, ThreeDScene):
CONFIG = {
"vector_field_colors" : [YELLOW_C, YELLOW_D]
}
def setup(self):
IntroduceElectricField.setup(self)
self.remove(self.pi_creature)
def construct(self):
self.set_camera_position(0.1, -np.pi/2)
self.add_title()
self.add_vector_field()
self.introduce_moving_charge()
self.show_force()
# self.many_charges()
def add_title(self):
title = TextMobject("Magnetic", "field")
title[0].set_color(YELLOW)
title.scale(1.5)
title.to_edge(UP)
title.add_background_rectangle()
self.add(title)
self.title = title
def add_vector_field(self):
vector_field = self.get_vector_field()
self.play(
LaggedStart(ShowCreation, vector_field, run_time = 3),
Animation(self.title)
)
self.wait()
def introduce_moving_charge(self):
point = 3*RIGHT + UP
particle = self.get_particle()
particle.move_to(point)
velocity = Vector(2*RIGHT).shift(particle.get_right())
velocity.set_color(WHITE)
velocity_word = TextMobject("Velocity")
velocity_word.set_color(velocity.get_color())
velocity_word.add_background_rectangle()
velocity_word.next_to(velocity, UP, 0, LEFT)
M_vect = self.get_vector(point)
M_vect.set_color(YELLOW)
M_vect.shift(SMALL_BUFF*M_vect.get_vector())
particle.save_state()
particle.shift(FRAME_WIDTH*LEFT)
self.play(
particle.restore,
run_time = 2,
rate_func = None,
)
self.add(velocity)
self.play(Write(velocity_word, run_time = 0.5))
# self.play(ShowCreation(M_vect))
self.wait()
self.particle = particle
def show_force(self):
point = self.particle.get_center()
F_vect = Vector(
3*np.cross(self.field_function(point), RIGHT),
color = GREEN
)
F_vect.shift(point)
F_word = TextMobject("Force")
F_word.rotate(np.pi/2, RIGHT)
F_word.next_to(F_vect, OUT)
F_word.set_color(F_vect.get_color())
F_eq = TexMobject(
"=","q", "\\textbf{v}", "\\times", "\\textbf{B}"
)
F_eq.set_color_by_tex_to_color_map({
"q" : RED,
"B" : YELLOW,
})
F_eq.rotate(np.pi/2, RIGHT)
F_eq.next_to(F_word, RIGHT)
self.move_camera(0.8*np.pi/2, -0.55*np.pi)
self.begin_ambient_camera_rotation()
self.play(ShowCreation(F_vect))
self.play(Write(F_word))
self.wait()
self.play(Write(F_eq))
self.wait(8)
def many_charges(self):
charges = VGroup()
for y in range(2, 3):
charge = self.get_particle()
charge.move_to(3*LEFT + y*UP)
charge.velocity = (2*RIGHT).astype('float')
charges.add(charge)
self.revert_to_original_skipping_status()
self.add_foreground_mobjects(*charges)
self.moving_particles = charges
self.wait(5)
###
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
M_vect = self.field_function(p.get_center())
F_vect = 3*np.cross(p.velocity, M_vect)
p.velocity += F_vect*dt
p.shift(p.velocity*dt)
def field_function(self, point):
x, y = point[:2]
y += 0.5
gauss = lambda r : np.exp(-0.5*r**2)
result = (y**2 - 1)*RIGHT + x*(gauss(y+2) - gauss(y-2))*UP
return self.normalized(result)
class CurlRelationBetweenFields(ThreeDScene):
def construct(self):
self.add_axes()
self.loop_in_E()
self.loop_in_M()
self.second_loop_in_E()
def add_axes(self):
self.add(ThreeDAxes(x_axis_radius = FRAME_X_RADIUS))
def loop_in_E(self):
E_vects = VGroup(*[
Vector(0.5*rotate_vector(vect, np.pi/2)).shift(vect)
for vect in compass_directions(8)
])
E_vects.set_color(E_COLOR)
point = 1.2*RIGHT + 2*UP + OUT
E_vects.shift(point)
M_vect = Vector(
IN,
normal_vector = DOWN,
color = M_COLOR
)
M_vect.shift(point)
M_vect.save_state()
M_vect.scale(0.01, about_point = M_vect.get_start())
self.play(ShowCreation(E_vects, run_time = 2))
self.wait()
self.move_camera(0.8*np.pi/2, -0.45*np.pi)
self.begin_ambient_camera_rotation()
self.play(M_vect.restore, run_time = 3, rate_func = None)
self.wait(3)
self.E_vects = E_vects
self.E_circle_center = point
self.M_vect = M_vect
def loop_in_M(self):
M_vects = VGroup(*[
Vector(
rotate_vector(vect, np.pi/2),
normal_vector = IN,
color = M_COLOR
).shift(vect)
for vect in compass_directions(8, LEFT)[1:]
])
M_vects.rotate(np.pi/2, RIGHT)
new_point = self.E_circle_center + RIGHT
M_vects.shift(new_point)
E_vect = self.E_vects[0]
self.play(
ShowCreation(M_vects, run_time = 2),
*map(FadeOut, self.E_vects[1:])
)
self.wait()
self.play(
E_vect.rotate, np.pi, RIGHT, [], new_point,
E_vect.scale_about_point, 3, new_point,
run_time = 4,
rate_func = None,
)
self.wait()
self.M_circle_center = new_point
M_vects.add(self.M_vect)
self.M_vects = M_vects
self.E_vect = E_vect
def second_loop_in_E(self):
E_vects = VGroup(*[
Vector(1.5*rotate_vector(vect, np.pi/2)).shift(vect)
for vect in compass_directions(8, LEFT)[1:]
])
E_vects.set_color(E_COLOR)
point = self.M_circle_center + RIGHT
E_vects.shift(point)
M_vect = self.M_vects[3]
self.M_vects.remove(M_vect)
self.play(FadeOut(self.M_vects))
self.play(ShowCreation(E_vects), Animation(M_vect))
self.play(
M_vect.rotate, np.pi, RIGHT, [], point,
run_time = 5,
rate_func = None,
)
self.wait(3)
class WriteCurlEquations(Scene):
def construct(self):
eq1 = TexMobject(
"\\nabla \\times", "\\textbf{E}", "=",
"-\\frac{1}{c}",
"\\frac{\\partial \\textbf{B}}{\\partial t}"
)
eq2 = TexMobject(
"\\nabla \\times", "\\textbf{B}", "=^*",
"\\frac{1}{c}",
"\\frac{\\partial \\textbf{E}}{\\partial t}"
)
eqs = VGroup(eq1, eq2)
eqs.arrange_submobjects(DOWN, buff = LARGE_BUFF)
eqs.scale_to_fit_height(FRAME_HEIGHT - 1)
eqs.to_edge(LEFT)
for eq in eqs:
eq.set_color_by_tex_to_color_map({
"E" : E_COLOR,
"B" : M_COLOR,
})
footnote = TextMobject("*Ignoring currents")
footnote.next_to(eqs[1], RIGHT)
footnote.to_edge(RIGHT)
self.play(Write(eq1, run_time = 2))
self.wait(3)
self.play(Write(eq2, run_time = 2))
self.play(FadeIn(footnote))
self.wait(3)
class IntroduceEMWave(ThreeDScene):
CONFIG = {
"EMWave_config" : {
"requires_start_up" : True
}
}
def setup(self):
self.axes = ThreeDAxes()
self.add(self.axes)
self.em_wave = EMWave(**self.EMWave_config)
self.add(self.em_wave)
self.set_camera_position(0.8*np.pi/2, -0.7*np.pi)
self.begin_ambient_camera_rotation()
def construct(self):
words = TextMobject(
"Electro", "magnetic", " radiation",
arg_separator = ""
)
words.set_color_by_tex_to_color_map({
"Electro" : E_COLOR,
"magnetic" : M_COLOR,
})
words.next_to(ORIGIN, LEFT, MED_LARGE_BUFF)
words.to_edge(UP)
words.rotate(np.pi/2, RIGHT)
self.wait(7)
self.play(Write(words, run_time = 2))
self.wait(20)
#####
class SimpleEMWave(IntroduceEMWave):
def construct(self):
self.wait(30)
class ListRelevantWaveIdeas(TeacherStudentsScene):
def construct(self):
title = TextMobject("Wave","topics")
title.to_corner(UP + LEFT, LARGE_BUFF)
title.set_color(BLUE)
h_line = Line(title.get_left(), title.get_right())
h_line.next_to(title, DOWN, SMALL_BUFF)
topics = VGroup(*map(TextMobject, [
"- Superposition",
"- Amplitudes",
"- How phase influences addition",
]))
topics.scale(0.8)
topics.arrange_submobjects(DOWN, aligned_edge = LEFT)
topics.next_to(h_line, DOWN, aligned_edge = LEFT)
quantum = TextMobject("Quantum")
quantum.set_color(GREEN)
quantum.move_to(title[0], LEFT)
wave_point = self.teacher.get_corner(UP+LEFT) + 2*UP
self.play(
Animation(VectorizedPoint(wave_point)),
self.teacher.change, "raise_right_hand"
)
self.wait(2)
self.play(
Write(title, run_time = 2),
ShowCreation(h_line)
)
self.change_student_modes(
*["pondering"]*3,
added_anims = [LaggedStart(
FadeIn, topics,
run_time = 3
)],
look_at_arg = title
)
self.play(
Animation(title),
self.teacher.change, "happy"
)
self.play(
title[0].next_to, quantum.copy(), UP, MED_SMALL_BUFF, LEFT,
title[0].fade, 0.5,
title[1].next_to, quantum.copy(), RIGHT, 2*SMALL_BUFF,
Write(quantum),
)
self.wait(5)
class DirectWaveOutOfScreen(IntroduceEMWave):
CONFIG = {
"EMWave_config" : {
"requires_start_up" : False,
"amplitude" : 2,
"start_point" : FRAME_X_RADIUS*LEFT,
"A_vect" : [0, 1, 0],
"start_up_time" : 0,
}
}
def setup(self):
IntroduceEMWave.setup(self)
self.remove(self.axes)
for ov in self.em_wave.continual_animations:
ov.vector.normal_vector = RIGHT
self.set_camera_position(0.9*np.pi/2, -0.3*np.pi)
def construct(self):
self.move_into_position()
self.fade_M_vects()
self.fade_all_but_last_E_vects()
def move_into_position(self):
self.wait(2)
self.continual_update()
faded_vectors = VGroup(*[
ov.vector
for ov in self.em_wave.continual_animations[:-2]
])
self.move_camera(
0.99*np.pi/2, -0.01,
run_time = 2,
added_anims = [faded_vectors.set_fill, None, 0.5]
)
self.stop_ambient_camera_rotation()
self.move_camera(
np.pi/2, 0,
added_anims = [faded_vectors.set_fill, None, 0.05],
run_time = 2,
)
self.faded_vectors = faded_vectors
def fade_M_vects(self):
self.play(
self.em_wave.M_vects.set_fill, None, 0
)
self.wait(2)
def fade_all_but_last_E_vects(self):
self.play(self.faded_vectors.set_fill, None, 0)
self.wait(4)
class ShowVectorEquation(Scene):
CONFIG = {
"f_color" : RED,
"phi_color" : MAROON_B,
"A_color" : GREEN,
}
def construct(self):
self.add_vector()
self.add_plane()
self.write_horizontally_polarized()
self.write_components()
self.show_graph()
self.add_phi()
self.add_amplitude()
self.add_kets()
self.switch_to_vertically_polarized_light()
def add_vector(self):
self.vector = Vector(2*RIGHT, color = E_COLOR)
self.oscillating_vector = OscillatingVector(
self.vector,
A_vect = [2, 0, 0],
frequency = 0.25,
)
self.add(self.oscillating_vector)
self.wait(3)
def add_plane(self):
xy_plane = NumberPlane(
axes_color = LIGHT_GREY,
color = DARK_GREY,
secondary_color = DARK_GREY,
x_unit_size = 2,
y_unit_size = 2,
)
xy_plane.add_coordinates()
xy_plane.add(xy_plane.get_axis_labels())
self.play(
Write(xy_plane),
Animation(self.vector)
)
self.wait(2)
self.xy_plane = xy_plane
def write_horizontally_polarized(self):
words = TextMobject(
"``", "Horizontally", " polarized", "''",
arg_separator = ""
)
words.next_to(ORIGIN, LEFT)
words.to_edge(UP)
words.add_background_rectangle()
self.play(Write(words, run_time = 3))
self.wait()
self.horizontally_polarized_words = words
def write_components(self):
x, y = components = VGroup(
TexMobject("\\cos(", "2\\pi", "f_x", "t", "+ ", "\\phi_x", ")"),
TexMobject("0", "")
)
components.arrange_submobjects(DOWN)
lb, rb = brackets = TexMobject("[]")
brackets.scale_to_fit_height(components.get_height() + SMALL_BUFF)
lb.next_to(components, LEFT, buff = 0.3)
rb.next_to(components, RIGHT, buff = 0.3)
E, equals = E_equals = TexMobject(
"\\vec{\\textbf{E}}", "="
)
E.set_color(E_COLOR)
E_equals.next_to(brackets, LEFT)
E_equals.add_background_rectangle()
brackets.add_background_rectangle()
group = VGroup(E_equals, brackets, components)
group.next_to(
self.horizontally_polarized_words,
DOWN, MED_LARGE_BUFF, RIGHT
)
x_without_phi = TexMobject("\\cos(", "2\\pi", "f_x", "t", ")")
x_without_phi.move_to(x)
for mob in x, x_without_phi:
mob.set_color_by_tex_to_color_map({
"f_x" : self.f_color,
"phi_x" : self.phi_color,
})
def update_brace(brace):
brace.stretch_to_fit_width(
max(self.vector.get_width(), 0.001)
)
brace.next_to(self.vector.get_center(), DOWN, SMALL_BUFF)
return brace
moving_brace = ContinualUpdateFromFunc(
Brace(Line(LEFT, RIGHT), DOWN), update_brace
)
moving_x_without_phi = ContinualUpdateFromFunc(
x_without_phi.copy().add_background_rectangle(),
lambda m : m.next_to(moving_brace.mobject, DOWN, SMALL_BUFF)
)
self.play(Write(E_equals), Write(brackets))
y.save_state()
y.move_to(self.horizontally_polarized_words)
y.set_fill(opacity = 0)
self.play(y.restore)
self.wait()
self.add(moving_brace, moving_x_without_phi)
self.play(
FadeIn(moving_brace.mobject),
FadeIn(x_without_phi),
FadeIn(moving_x_without_phi.mobject),
submobject_mode = "lagged_start",
run_time = 2,
)
self.wait(3)
self.play(
FadeOut(moving_brace.mobject),
FadeOut(moving_x_without_phi.mobject),
)
self.remove(moving_brace, moving_x_without_phi)
self.E_equals = E_equals
self.brackets = brackets
self.x_without_phi = x_without_phi
self.components = components
def show_graph(self):
axes = Axes(
x_min = -0.5,
x_max = 5.2,
y_min = -1.5,
y_max = 1.5,
)
axes.x_axis.add_numbers(*range(1, 6))
t = TexMobject("t")
t.next_to(axes.x_axis, UP, SMALL_BUFF, RIGHT)
cos = self.x_without_phi.copy()
cos.next_to(axes.y_axis, RIGHT, SMALL_BUFF, UP)
cos_arg = VGroup(*cos[1:-1])
fx_equals_1 = TexMobject("f_x", "= 1")
fx_equals_fourth = TexMobject("f_x", "= 0.25")
fx_group = VGroup(fx_equals_1, fx_equals_fourth)
for fx in fx_group:
fx[0].set_color(self.f_color)
fx.move_to(axes, UP+RIGHT)
high_f_graph, low_f_graph = graphs = VGroup(*[
FunctionGraph(
lambda x : np.cos(2*np.pi*f*x),
color = E_COLOR,
x_min = 0,
x_max = 4/f,
num_steps = 20/f,
)
for f in (1, 0.25,)
])
group = VGroup(axes, t, cos, high_f_graph, *fx_group)
rect = SurroundingRectangle(
group,
buff = MED_LARGE_BUFF,
stroke_color = WHITE,
stroke_width = 3,
fill_color = BLACK,
fill_opacity = 0.9
)
group.add_to_back(rect)
group.scale(0.8)
group.to_corner(UP+RIGHT, buff = -SMALL_BUFF)
group.remove(*it.chain(fx_group, graphs))
low_f_graph.scale(0.8)
low_f_graph.move_to(high_f_graph, LEFT)
cos_arg_rect = SurroundingRectangle(cos_arg)
new_ov = OscillatingVector(
Vector(RIGHT, color = E_COLOR),
A_vect = [2, 0, 0],
frequency = 1,
start_up_time = 0,
)
self.play(FadeIn(group))
self.play(
ReplacementTransform(
self.components[0].get_part_by_tex("f_x").copy(),
fx_equals_1
),
)
self.wait(4 - (self.oscillating_vector.internal_time%4))
self.remove(self.oscillating_vector)
self.add(new_ov)
self.play(ShowCreation(
high_f_graph, run_time = 4,
rate_func = None,
))
self.wait()
self.play(FadeOut(new_ov.vector))
self.remove(new_ov)
self.add(self.oscillating_vector)
self.play(
ReplacementTransform(*fx_group),
ReplacementTransform(*graphs),
FadeOut(new_ov.vector),
FadeIn(self.vector)
)
self.wait(4)
self.play(ShowCreation(cos_arg_rect))
self.play(FadeOut(cos_arg_rect))
self.wait(5)
self.corner_group = group
self.fx_equals_fourth = fx_equals_fourth
self.corner_cos = cos
self.low_f_graph = low_f_graph
self.graph_axes = axes
def add_phi(self):
corner_cos = self.corner_cos
corner_phi = TexMobject("+", "\\phi_x")
corner_phi.set_color_by_tex("phi", self.phi_color)
corner_phi.scale(0.8)
corner_phi.next_to(corner_cos[-2], RIGHT, SMALL_BUFF)
x, y = self.components
x_without_phi = self.x_without_phi
words = TextMobject("``Phase shift''")
words.next_to(ORIGIN, UP+LEFT)
words.set_color(self.phi_color)
words.add_background_rectangle()
arrow = Arrow(words.get_top(), x[-2])
arrow.set_color(WHITE)
self.play(
ReplacementTransform(
VGroup(*x_without_phi[:-1]),
VGroup(*x[:-3]),
),
ReplacementTransform(x_without_phi[-1], x[-1]),
Write(VGroup(*x[-3:-1])),
corner_cos[-1].next_to, corner_phi.copy(), RIGHT, SMALL_BUFF,
Write(corner_phi),
FadeOut(self.fx_equals_fourth),
)
self.play(self.low_f_graph.shift, MED_LARGE_BUFF*LEFT)
self.play(
Write(words, run_time = 1),
ShowCreation(arrow)
)
self.wait(3)
self.play(*map(FadeOut, [words, arrow]))
self.corner_cos.add(corner_phi)
def add_amplitude(self):
x, y = self.components
corner_cos = self.corner_cos
graph = self.low_f_graph
graph_y_axis = self.graph_axes.y_axis
A = TexMobject("A_x")
A.set_color(self.A_color)
A.move_to(x.get_left())
corner_A = A.copy()
corner_A.scale(0.8)
corner_A.move_to(corner_cos, LEFT)
h_brace = Brace(Line(ORIGIN, 2*RIGHT), UP)
v_brace = Brace(Line(
graph_y_axis.number_to_point(0),
graph_y_axis.number_to_point(1),
), LEFT, buff = SMALL_BUFF)
for brace in h_brace, v_brace:
brace.A = brace.get_tex("A_x")
brace.A.set_color(self.A_color)
v_brace.A.scale(0.5, about_point = v_brace.get_center())
all_As = VGroup(A, corner_A, h_brace.A, v_brace.A)
def update_vect(vect):
self.oscillating_vector.A_vect[0] = h_brace.get_width()
return vect
self.play(
GrowFromCenter(h_brace),
GrowFromCenter(v_brace),
)
self.wait(2)
self.play(
x.next_to, A, RIGHT, SMALL_BUFF,
corner_cos.next_to, corner_A, RIGHT, SMALL_BUFF,
FadeIn(all_As)
)
x.add(A)
corner_cos.add(corner_A)
self.wait()
factor = 0.5
self.play(
v_brace.stretch_in_place, factor, 1,
v_brace.move_to, v_brace.copy(), DOWN,
MaintainPositionRelativeTo(v_brace.A, v_brace),
h_brace.stretch_in_place, factor, 0,
h_brace.move_to, h_brace.copy(), LEFT,
MaintainPositionRelativeTo(h_brace.A, h_brace),
UpdateFromFunc(self.vector, update_vect),
graph.stretch_in_place, factor, 1,
)
self.wait(4)
self.h_brace = h_brace
self.v_brace = v_brace
def add_kets(self):
x, y = self.components
E_equals = self.E_equals
for mob in x, y, E_equals:
mob.add_background_rectangle()
mob.generate_target()
right_ket = TexMobject("|\\rightarrow\\rangle")
up_ket = TexMobject("|\\uparrow\\rangle")
kets = VGroup(right_ket, up_ket)
kets.set_color(YELLOW)
for ket in kets:
ket.add_background_rectangle()
plus = TextMobject("+")
group = VGroup(
E_equals.target,
x.target, right_ket, plus,
y.target, up_ket,
)
group.arrange_submobjects(RIGHT)
E_equals.target.shift(SMALL_BUFF*UP)
group.scale(0.8)
group.move_to(self.brackets, DOWN)
group.to_edge(LEFT, buff = MED_SMALL_BUFF)
kets_word = TextMobject("``kets''")
kets_word.next_to(kets, DOWN, buff = 0.8)
arrows = VGroup(*[
Arrow(kets_word.get_top(), ket, color = ket.get_color())
for ket in kets
])
ket_rects = VGroup(*map(SurroundingRectangle, kets))
ket_rects.set_color(WHITE)
unit_vectors = VGroup(*[Vector(2*vect) for vect in (RIGHT, UP)])
unit_vectors.set_fill(YELLOW)
self.play(
FadeOut(self.brackets),
*map(MoveToTarget, [E_equals, x, y])
)
self.play(*map(Write, [right_ket, plus, up_ket]), run_time = 1)
self.play(
Write(kets_word),
LaggedStart(ShowCreation, arrows, lag_ratio = 0.7),
run_time = 2,
)
self.wait()
for ket, ket_rect, unit_vect in zip(kets, ket_rects, unit_vectors):
self.play(ShowCreation(ket_rect))
self.play(FadeOut(ket_rect))
self.play(ReplacementTransform(ket[1][1].copy(), unit_vect))
self.wait()
self.play(FadeOut(unit_vectors))
self.play(*map(FadeOut, [kets_word, arrows]))
self.kets = kets
self.plus = plus
def switch_to_vertically_polarized_light(self):
x, y = self.components
x_ket, y_ket = self.kets
plus = self.plus
x.target = TexMobject("0", "").add_background_rectangle()
y.target = TexMobject(
"A_y", "\\cos(", "2\\pi", "f_y", "t", "+", "\\phi_y", ")"
)
y.target.set_color_by_tex_to_color_map({
"A" : self.A_color,
"f" : self.f_color,
"phi" : self.phi_color,
})
y.target.add_background_rectangle()
VGroup(x.target, y.target).scale(0.8)
for mob in [plus] + list(self.kets):
mob.generate_target()
movers = x, x_ket, plus, y, y_ket
group = VGroup(*[m.target for m in movers])
group.arrange_submobjects(RIGHT)
group.move_to(x, LEFT)
vector_A_vect = np.array(self.oscillating_vector.A_vect)
def update_vect(vect, alpha):
self.oscillating_vector.A_vect = rotate_vector(
vector_A_vect, alpha*np.pi/2
)
return vect
new_h_brace = Brace(Line(ORIGIN, UP), RIGHT)
words = TextMobject(
"``", "Vertically", " polarized", "''",
arg_separator = "",
)
words.add_background_rectangle()
words.move_to(self.horizontally_polarized_words)
self.play(
UpdateFromAlphaFunc(self.vector, update_vect),
Transform(self.h_brace, new_h_brace),
self.h_brace.A.next_to, new_h_brace, RIGHT, SMALL_BUFF,
Transform(self.horizontally_polarized_words, words),
*map(FadeOut, [
self.corner_group, self.v_brace,
self.v_brace.A, self.low_f_graph,
])
)
self.play(*map(MoveToTarget, movers))
self.wait(5)
class ChangeFromHorizontalToVerticallyPolarized(DirectionOfPolarizationScene):
CONFIG = {
"filter_x_coordinates" : [],
"EMWave_config" : {
"start_point" : FRAME_X_RADIUS*LEFT,
"A_vect" : [0, 2, 0],
}
}
def setup(self):
DirectionOfPolarizationScene.setup(self)
self.axes.z_axis.rotate(np.pi/2, OUT)
self.axes.y_axis.rotate(np.pi/2, UP)
self.remove(self.pol_filter)
self.em_wave.M_vects.set_fill(opacity = 0)
for vect in self.em_wave.E_vects:
vect.normal_vector = RIGHT
vect.set_fill(opacity = 0.5)
self.em_wave.E_vects[-1].set_fill(opacity = 1)
self.set_camera_position(0.9*np.pi/2, -0.05*np.pi)
def construct(self):
self.wait(3)
self.change_polarization_direction(np.pi/2)
self.wait(10)
class SumOfTwoWaves(ChangeFromHorizontalToVerticallyPolarized):
CONFIG = {
"axes_config" : {
"y_max" : 1.5,
"y_min" : -1.5,
"z_max" : 1.5,
"z_min" : -1.5,
},
"EMWave_config" : {
"A_vect" : [0, 0, 1],
},
"ambient_rotation_rate" : 0,
}
def setup(self):
ChangeFromHorizontalToVerticallyPolarized.setup(self)
for vect in self.em_wave.E_vects[:-1]:
vect.set_fill(opacity = 0.3)
self.side_em_waves = []
for shift_vect, A_vect in (5*DOWN, [0, 1, 0]), (5*UP, [0, 1, 1]):
axes = self.axes.copy()
em_wave = copy.deepcopy(self.em_wave)
axes.shift(shift_vect)
em_wave.mobject.shift(shift_vect)
em_wave.start_point += shift_vect
for ov in em_wave.continual_animations:
ov.A_vect = np.array(A_vect)
self.add(axes, em_wave)
self.side_em_waves.append(em_wave)
self.set_camera_position(0.95*np.pi/2, -0.03*np.pi)
def construct(self):
plus, equals = pe = VGroup(*map(TexMobject, "+="))
pe.scale(2)
pe.rotate(np.pi/2, RIGHT)
pe.rotate(np.pi/2, OUT)
plus.shift(2.5*DOWN)
equals.shift(2.5*UP)
self.add(pe)
self.wait(16)
class ShowTipToTailSum(ShowVectorEquation):
def construct(self):
self.force_skipping()
self.add_vector()
self.add_plane()
self.add_vertial_vector()
self.revert_to_original_skipping_status()
self.add_kets()
self.show_vector_sum()
self.write_superposition()
self.add_amplitudes()
self.add_phase_shift()
def add_vertial_vector(self):
self.h_vector = self.vector
self.h_oscillating_vector = self.oscillating_vector
self.h_oscillating_vector.start_up_time = 0
self.v_oscillating_vector = self.h_oscillating_vector.copy()
self.v_vector = self.v_oscillating_vector.vector
self.v_oscillating_vector.A_vect = [0, 2, 0]
self.v_oscillating_vector.update(0)
self.d_oscillating_vector = ContinualUpdateFromFunc(
Vector(UP+RIGHT, color = E_COLOR),
lambda v : v.put_start_and_end_on(
ORIGIN,
self.v_vector.get_end()+ self.h_vector.get_end(),
)
)
self.d_vector = self.d_oscillating_vector.mobject
self.d_oscillating_vector.update(0)
self.add(self.v_oscillating_vector)
self.add_foreground_mobject(self.v_vector)
def add_kets(self):
h_ket, v_ket = kets = VGroup(*[
TexMobject(
"\\cos(", "2\\pi", "f", "t", ")",
"|\\!\\%sarrow\\rangle"%s
)
for s in ("right", "up")
])
for ket in kets:
ket.set_color_by_tex_to_color_map({
"f" : self.f_color,
"rangle" : YELLOW,
})
ket.add_background_rectangle(opacity = 1)
ket.scale(0.8)
h_ket.next_to(2*RIGHT, UP, SMALL_BUFF)
v_ket.next_to(2*UP, UP, SMALL_BUFF)
self.add_foreground_mobject(kets)
self.kets = kets
def show_vector_sum(self):
h_line = DashedLine(ORIGIN, 2*RIGHT)
v_line = DashedLine(ORIGIN, 2*UP)
h_line.update = self.generate_dashed_line_update(
self.h_vector, self.v_vector
)
v_line.update = self.generate_dashed_line_update(
self.v_vector, self.h_vector
)
h_ket, v_ket = self.kets
for ket in self.kets:
ket.generate_target()
plus = TexMobject("+")
ket_sum = VGroup(h_ket.target, plus, v_ket.target)
ket_sum.arrange_submobjects(RIGHT)
ket_sum.next_to(3*RIGHT + 2*UP, UP, SMALL_BUFF)
self.wait(4)
self.remove(self.h_oscillating_vector, self.v_oscillating_vector)
self.add(self.h_vector, self.v_vector)
h_line.update(h_line)
v_line.update(v_line)
self.play(*it.chain(
map(MoveToTarget, self.kets),
[Write(plus)],
map(ShowCreation, [h_line, v_line]),
))
blue_black = average_color(BLUE, BLACK)
self.play(
GrowFromPoint(self.d_vector, ORIGIN),
self.h_vector.set_fill, blue_black,
self.v_vector.set_fill, blue_black,
)
self.wait()
self.add(
self.h_oscillating_vector,
self.v_oscillating_vector,
self.d_oscillating_vector,
ContinualUpdateFromFunc(h_line, h_line.update),
ContinualUpdateFromFunc(v_line, v_line.update),
)
self.wait(4)
self.ket_sum = VGroup(h_ket, plus, v_ket)
def write_superposition(self):
superposition_words = TextMobject(
"``Superposition''", "of",
"$|\\!\\rightarrow\\rangle$", "and",
"$|\\!\\uparrow\\rangle$",
)
superposition_words.scale(0.8)
superposition_words.set_color_by_tex("rangle", YELLOW)
superposition_words.add_background_rectangle()
superposition_words.to_corner(UP+LEFT)
ket_sum = self.ket_sum
ket_sum.generate_target()
ket_sum.target.move_to(superposition_words)
ket_sum.target.align_to(ket_sum, UP)
sum_word = TextMobject("", "Sum")
weighted_sum_word = TextMobject("Weighted", "sum")
for word in sum_word, weighted_sum_word:
word.scale(0.8)
word.set_color(GREEN)
word.add_background_rectangle()
word.move_to(superposition_words.get_part_by_tex("Super"))
self.play(
Write(superposition_words, run_time = 2),
MoveToTarget(ket_sum)
)
self.wait(2)
self.play(
FadeIn(sum_word),
superposition_words.shift, MED_LARGE_BUFF*DOWN,
ket_sum.shift, MED_LARGE_BUFF*DOWN,
)
self.wait()
self.play(ReplacementTransform(
sum_word, weighted_sum_word
))
self.wait(2)
def add_amplitudes(self):
h_ket, plus, r_ket = self.ket_sum
for mob in self.ket_sum:
mob.generate_target()
h_A, v_A = 2, 0.5
h_A_mob, v_A_mob = A_mobs = VGroup(*[
TexMobject(str(A)).add_background_rectangle()
for A in [h_A, v_A]
])
A_mobs.scale(0.8)
A_mobs.set_color(GREEN)
h_A_mob.move_to(h_ket, LEFT)
VGroup(h_ket.target, plus.target).next_to(
h_A_mob, RIGHT, SMALL_BUFF
)
v_A_mob.next_to(plus.target, RIGHT, SMALL_BUFF)
r_ket.target.next_to(v_A_mob, RIGHT, SMALL_BUFF)
A_mobs.shift(0.4*SMALL_BUFF*UP)
h_ov = self.h_oscillating_vector
v_ov = self.v_oscillating_vector
self.play(*it.chain(
map(MoveToTarget, self.ket_sum),
map(Write, A_mobs),
[
UpdateFromAlphaFunc(
ov.vector,
self.generate_A_update(
ov,
A*np.array(ov.A_vect),
np.array(ov.A_vect)
)
)
for ov, A in [(h_ov, h_A), (v_ov, v_A)]
]
))
self.wait(4)
self.A_mobs = A_mobs
def add_phase_shift(self):
h_ket, plus, v_ket = self.ket_sum
plus_phi = TexMobject("+", "\\pi/2")
plus_phi.set_color_by_tex("pi", self.phi_color)
plus_phi.scale(0.8)
plus_phi.next_to(v_ket.get_part_by_tex("t"), RIGHT, SMALL_BUFF)
v_ket.generate_target()
VGroup(*v_ket.target[1][-2:]).next_to(plus_phi, RIGHT, SMALL_BUFF)
v_ket.target[0].replace(v_ket.target[1])
h_ov = self.h_oscillating_vector
v_ov = self.v_oscillating_vector
ellipse = Circle()
ellipse.stretch_to_fit_height(2)
ellipse.stretch_to_fit_width(8)
ellipse.set_color(self.phi_color)
h_A_mob, v_A_mob = self.A_mobs
new_h_A_mob = v_A_mob.copy()
new_h_A_mob.move_to(h_A_mob, RIGHT)
self.add_foreground_mobject(plus_phi)
self.play(
MoveToTarget(v_ket),
Write(plus_phi),
UpdateFromAlphaFunc(
v_ov.vector,
self.generate_phi_update(
v_ov,
np.array([0, np.pi/2, 0]),
np.array(v_ov.phi_vect)
)
)
)
self.play(FadeIn(ellipse))
self.wait(5)
self.play(
UpdateFromAlphaFunc(
h_ov.vector,
self.generate_A_update(
h_ov,
0.25*np.array(h_ov.A_vect),
np.array(h_ov.A_vect),
)
),
ellipse.stretch, 0.25, 0,
Transform(h_A_mob, new_h_A_mob)
)
self.wait(8)
#####
def generate_A_update(self, ov, A_vect, prev_A_vect):
def update(vect, alpha):
ov.A_vect = interpolate(
np.array(prev_A_vect),
A_vect,
alpha
)
return vect
return update
def generate_phi_update(self, ov, phi_vect, prev_phi_vect):
def update(vect, alpha):
ov.phi_vect = interpolate(
prev_phi_vect, phi_vect, alpha
)
return vect
return update
def generate_dashed_line_update(self, v1, v2):
def update_line(line):
line.put_start_and_end_on_with_projection(
*v1.get_start_and_end()
)
line.shift(v2.get_end() - line.get_start())
return update_line
class FromBracketFootnote(Scene):
def construct(self):
words = TextMobject(
"From, ``Bra", "ket", "''",
arg_separator = ""
)
words.set_color_by_tex("ket", YELLOW)
words.scale_to_fit_width(FRAME_WIDTH - 1)
self.add(words)
class Ay(Scene):
def construct(self):
sym = TexMobject("A_y").set_color(GREEN)
sym.scale(5)
self.add(sym)
class CircularlyPolarizedLight(SumOfTwoWaves):
CONFIG = {
"EMWave_config" : {
"phi_vect" : [0, np.pi/2, 0],
},
}
class AlternateBasis(ShowTipToTailSum):
def construct(self):
self.force_skipping()
self.add_vector()
self.add_plane()
self.add_vertial_vector()
self.add_kets()
self.show_vector_sum()
self.remove(self.ket_sum, self.kets)
self.reset_amplitude()
self.revert_to_original_skipping_status()
self.add_superposition_text()
self.rotate_plane()
self.show_vertically_polarized()
def reset_amplitude(self):
self.h_oscillating_vector.A_vect = np.array([1, 0, 0])
def add_superposition_text(self):
self.hv_superposition, self.da_superposition = superpositions = [
TexMobject(
"\\vec{\\textbf{E}}", "=",
"(\\dots)",
"|\\!\\%sarrow\\rangle"%s1,
"+",
"(\\dots)",
"|\\!\\%sarrow\\rangle"%s2,
)
for s1, s2 in [("right", "up"), ("ne", "nw")]
]
for superposition in superpositions:
superposition.set_color_by_tex("rangle", YELLOW)
superposition.set_color_by_tex("E", E_COLOR)
superposition.add_background_rectangle(opacity = 1)
superposition.to_edge(UP)
self.add(self.hv_superposition)
def rotate_plane(self):
new_plane = NumberPlane(
x_unit_size = 2,
y_unit_size = 2,
y_radius = FRAME_X_RADIUS,
secondary_line_ratio = 0,
)
new_plane.add_coordinates()
new_plane.save_state()
new_plane.fade(1)
d = (RIGHT + UP)/np.sqrt(2)
a = (LEFT + UP)/np.sqrt(2)
self.wait(4)
self.play(
self.xy_plane.fade, 0.5,
self.xy_plane.coordinate_labels.fade, 1,
new_plane.restore,
new_plane.rotate, np.pi/4,
UpdateFromAlphaFunc(
self.h_vector,
self.generate_A_update(
self.h_oscillating_vector,
2*d*np.dot(0.5*RIGHT + UP, d),
np.array(self.h_oscillating_vector.A_vect)
)
),
UpdateFromAlphaFunc(
self.v_vector,
self.generate_A_update(
self.v_oscillating_vector,
2*a*np.dot(0.5*RIGHT + UP, a),
np.array(self.v_oscillating_vector.A_vect)
)
),
Transform(self.hv_superposition, self.da_superposition),
run_time = 2,
)
self.wait(4)
def show_vertically_polarized(self):
self.play(
UpdateFromAlphaFunc(
self.h_vector,
self.generate_A_update(
self.h_oscillating_vector,
np.array([0.7, 0.7, 0]),
np.array(self.h_oscillating_vector.A_vect)
)
),
UpdateFromAlphaFunc(
self.v_vector,
self.generate_A_update(
self.v_oscillating_vector,
np.array([-0.7, 0.7, 0]),
np.array(self.v_oscillating_vector.A_vect)
)
),
)
self.wait(8)
class WriteBasis(Scene):
def construct(self):
words = TextMobject("Choice of ``basis''")
words.scale_to_fit_width(FRAME_WIDTH-1)
self.play(Write(words))
self.wait()
class ShowPolarizingFilter(DirectionOfPolarizationScene):
CONFIG = {
"EMWave_config" : {
"start_point" : FRAME_X_RADIUS*LEFT,
},
"apply_filter" : True,
}
def construct(self):
self.setup_rectangles()
self.fade_M_vects()
self.axes.fade(0.5)
self.initial_rotation()
self.mention_energy_absorption()
self.write_as_superposition()
self.diagonal_filter()
def setup_rectangles(self):
DirectionOfPolarizationScene.setup_rectangles(self)
self.rectangles[-1].fade(1)
def fade_M_vects(self):
self.em_wave.M_vects.set_fill(opacity = 0)
def initial_rotation(self):
self.wait()
self.play(FadeIn(self.rectangles))
self.wait()
self.change_polarization_direction(np.pi/2, run_time = 3)
self.move_camera(phi = 0.9*np.pi/2, theta = -0.05*np.pi)
def mention_energy_absorption(self):
words = TextMobject("Absorbs horizontal \\\\ energy")
words.set_color(RED)
words.next_to(ORIGIN, UP+RIGHT, MED_LARGE_BUFF)
words.rotate(np.pi/2, RIGHT)
words.rotate(np.pi/2, OUT)
lines = VGroup(*[
Line(
np.sin(a)*RIGHT + np.cos(a)*UP,
np.sin(a)*LEFT + np.cos(a)*UP,
color = RED,
stroke_width = 2,
)
for a in np.linspace(0, np.pi, 15)
])
lines.rotate(np.pi/2, RIGHT)
lines.rotate(np.pi/2, OUT)
self.play(
Write(words, run_time = 2),
*map(GrowFromCenter, lines)
)
self.wait(6)
self.play(FadeOut(lines))
self.play(FadeOut(words))
def write_as_superposition(self):
superposition, continual_updates = self.get_superposition_tex(0, "right", "up")
rect = superposition.rect
self.play(Write(superposition, run_time = 2))
self.add(*continual_updates)
for angle in np.pi/4, -np.pi/6:
self.change_polarization_direction(angle)
self.wait(3)
self.move_camera(
theta = -0.6*np.pi,
added_anims = [
Rotate(superposition, -0.6*np.pi, axis = OUT)
]
)
rect.set_stroke(YELLOW, 3)
self.play(ShowCreation(rect))
arrow = Arrow(
rect.get_nadir(), 3*RIGHT + 0.5*OUT,
normal_vector = DOWN
)
self.play(ShowCreation(arrow))
for angle in np.pi/3, -np.pi/3, np.pi/6:
self.change_polarization_direction(angle)
self.wait(2)
self.play(
FadeOut(superposition),
FadeOut(arrow),
*[
FadeOut(cu.mobject)
for cu in continual_updates
]
)
self.move_camera(theta = -0.1*np.pi)
def diagonal_filter(self):
superposition, continual_updates = self.get_superposition_tex(-np.pi/4, "nw", "ne")
def update_filter_angle(pf, alpha):
pf.filter_angle = interpolate(0, -np.pi/4, alpha)
self.play(
Rotate(self.pol_filter, np.pi/4, axis = LEFT),
UpdateFromAlphaFunc(self.pol_filter, update_filter_angle),
Animation(self.em_wave.mobject)
)
superposition.rect.set_stroke(YELLOW, 2)
self.play(Write(superposition, run_time = 2))
self.add(*continual_updates)
for angle in np.pi/4, -np.pi/3, -np.pi/6:
self.change_polarization_direction(np.pi/4)
self.wait(2)
#######
def get_superposition_tex(self, angle, s1, s2):
superposition = TexMobject(
"0.00", "\\cos(", "2\\pi", "f", "t", ")",
"|\\! \\%sarrow \\rangle"%s1,
"+",
"1.00", "\\cos(", "2\\pi", "f", "t", ")",
"|\\! \\%sarrow \\rangle"%s2,
)
A_x = DecimalNumber(0)
A_y = DecimalNumber(1)
A_x.move_to(superposition[0])
A_y.move_to(superposition[8])
superposition.submobjects[0] = A_x
superposition.submobjects[8] = A_y
VGroup(A_x, A_y).set_color(GREEN)
superposition.set_color_by_tex("f", RED)
superposition.set_color_by_tex("rangle", YELLOW)
plus = superposition.get_part_by_tex("+")
plus.add_to_back(BackgroundRectangle(plus))
v_part = VGroup(*superposition[8:])
rect = SurroundingRectangle(v_part)
rect.fade(1)
superposition.rect = rect
superposition.add(rect)
superposition.shift(3*UP + SMALL_BUFF*LEFT)
superposition.rotate(np.pi/2, RIGHT)
superposition.rotate(np.pi/2, OUT)
def generate_decimal_update(trig_func):
def update_decimal(decimal):
new_decimal = DecimalNumber(abs(trig_func(
self.reference_line.get_angle() - angle
)))
new_decimal.rotate(np.pi/2, RIGHT)
new_decimal.rotate(np.pi/2, OUT)
new_decimal.rotate(self.camera.get_theta(), OUT)
new_decimal.scale_to_fit_depth(decimal.get_depth())
new_decimal.move_to(decimal, UP)
new_decimal.set_color(decimal.get_color())
decimal.align_data(new_decimal)
families = [
mob.family_members_with_points()
for mob in (decimal, new_decimal)
]
for sm1, sm2 in zip(*families):
sm1.interpolate(sm1, sm2, 1)
return decimal
return update_decimal
continual_updates = [
ContinualUpdateFromFunc(
A_x, generate_decimal_update(np.sin),
),
ContinualUpdateFromFunc(
A_y, generate_decimal_update(np.cos),
),
]
return superposition, continual_updates
class NamePolarizingFilter(Scene):
def construct(self):
words = TextMobject("Polarizing filter")
words.scale_to_fit_width(FRAME_WIDTH - 1)
self.play(Write(words))
self.wait()
class EnergyOfWavesWavePortion(DirectWaveOutOfScreen):
CONFIG = {
"EMWave_config" : {
"A_vect" : [0, 1, 1],
"amplitude" : 4,
"start_point" : FRAME_X_RADIUS*LEFT + 2*DOWN,
}
}
def construct(self):
self.grow_arrows()
self.move_into_position()
self.fade_M_vects()
self.label_A()
self.add_components()
self.scale_up_and_down()
def grow_arrows(self):
for ov in self.em_wave.continual_animations:
ov.vector.rectangular_stem_width = 0.1
ov.vector.tip_length = 0.5
def label_A(self):
brace = Brace(Line(ORIGIN, 4*RIGHT))
brace.rotate(np.pi/4, OUT)
brace.A = brace.get_tex("A", buff = MED_SMALL_BUFF)
brace.A.scale_in_place(2)
brace.A.set_color(GREEN)
brace_group = VGroup(brace, brace.A)
self.position_brace_group(brace_group)
self.play(Write(brace_group, run_time = 1))
self.wait(12)
self.brace = brace
def add_components(self):
h_wave = self.em_wave.copy()
h_wave.A_vect = [0, 1, 0]
v_wave = self.em_wave.copy()
v_wave.A_vect = [0, 0, 1]
length = 4/np.sqrt(2)
for wave in h_wave, v_wave:
for ov in wave.continual_animations:
ov.A_vect = length*np.array(wave.A_vect)
h_brace = Brace(Line(ORIGIN, length*RIGHT))
v_brace = Brace(Line(ORIGIN, length*UP), LEFT)
for brace, c in (h_brace, "x"), (v_brace, "y"):
brace.A = brace.get_tex("A_%s"%c, buff = MED_LARGE_BUFF)
brace.A.scale_in_place(2)
brace.A.set_color(GREEN)
brace_group = VGroup(h_brace, h_brace.A, v_brace, v_brace.A)
self.position_brace_group(brace_group)
rhs = TexMobject("= \\sqrt{A_x^2 + A_y^2}")
rhs.scale(2)
for i in 3, 5, 7, 9:
rhs[i].set_color(GREEN)
rhs.rotate(np.pi/2, RIGHT)
rhs.rotate(np.pi/2, OUT)
period = 1./self.em_wave.frequency
self.add(h_wave, v_wave)
self.play(
FadeIn(h_wave.mobject),
FadeIn(v_wave.mobject),
self.brace.A.move_to, self.brace,
self.brace.A.shift, SMALL_BUFF*(2*UP+IN),
ReplacementTransform(self.brace, h_brace),
Write(h_brace.A)
)
self.wait(6)
self.play(
ReplacementTransform(h_brace.copy(), v_brace),
Write(v_brace.A)
)
self.wait(6)
rhs.next_to(self.brace.A, UP, SMALL_BUFF)
self.play(Write(rhs))
self.wait(2*period)
self.h_brace = h_brace
self.v_brace = v_brace
self.h_wave = h_wave
self.v_wave = v_wave
def scale_up_and_down(self):
for scale_factor in 1.25, 0.4, 1.5, 0.3, 2:
self.scale_wave(scale_factor)
self.wait()
self.wait(4)
######
def position_brace_group(self, brace_group):
brace_group.rotate(np.pi/2, RIGHT)
brace_group.rotate(np.pi/2, OUT)
brace_group.shift(2*DOWN)
def scale_wave(self, factor):
def generate_vect_update(ov):
prev_A = np.array(ov.A_vect)
new_A = factor*prev_A
def update(vect, alpha):
ov.A_vect = interpolate(
prev_A, new_A, alpha
)
return vect
return update
h_brace = self.h_brace
v_brace = self.v_brace
h_brace.generate_target()
h_brace.target.stretch_about_point(
factor, 1, h_brace.get_bottom()
)
v_brace.generate_target()
v_brace.target.stretch_about_point(
factor, 2, v_brace.get_nadir()
)
self.play(
MoveToTarget(h_brace),
MoveToTarget(v_brace),
*[
UpdateFromAlphaFunc(ov.vector, generate_vect_update(ov))
for ov in it.chain(
self.em_wave.continual_animations,
self.h_wave.continual_animations,
self.v_wave.continual_animations,
)
]
)
class EnergyOfWavesTeacherPortion(TeacherStudentsScene):
def construct(self):
self.show_energy_equation()
self.show_both_ways_of_thinking_about_it()
def show_energy_equation(self):
dot = Dot(self.teacher.get_top() + 2*(UP+LEFT))
dot.fade(1)
self.dot = dot
energy = TexMobject(
"\\frac{\\text{Energy}}{\\text{Volume}}",
"=",
"\\epsilon_0", "A", "^2"
)
energy.set_color_by_tex("A", GREEN)
energy.to_corner(UP+LEFT)
component_energy = TexMobject(
"=", "\\epsilon_0", "A_x", "^2",
"+", "\\epsilon_0", "A_y", "^2",
)
for i in 2, 6:
component_energy[i][0].set_color(GREEN)
component_energy[i+1].set_color(GREEN)
component_energy.next_to(energy[1], DOWN, MED_LARGE_BUFF, LEFT)
self.play(
Animation(dot),
self.teacher.change, "raise_right_hand", dot,
)
self.change_student_modes(
*["pondering"]*3,
look_at_arg = dot
)
self.wait(2)
self.play(Write(energy))
self.play(self.teacher.change, "happy")
self.wait(3)
self.play(
ReplacementTransform(
VGroup(*energy[-4:]).copy(),
VGroup(*component_energy[:4])
),
ReplacementTransform(
VGroup(*energy[-4:]).copy(),
VGroup(*component_energy[4:])
)
)
self.change_student_modes(*["happy"]*3, look_at_arg = energy)
self.wait()
def show_both_ways_of_thinking_about_it(self):
s1, s2 = self.get_students()[:2]
b1, b2 = [
ThoughtBubble(direction = v).scale(0.5)
for v in (LEFT, RIGHT)
]
b1.pin_to(s1)
b2.pin_to(s2)
b1.write("Add \\\\ components")
b2.write("Pythagorean \\\\ theorem")
for b, s in (b1, s1), (b2, s2):
self.play(
ShowCreation(b),
Write(b.content, run_time = 2),
s.change, "thinking"
)
self.wait(2)
self.change_student_modes(
*["plain"]*3,
look_at_arg = self.dot,
added_anims = [
self.teacher.change, "raise_right_hand", self.dot
]
)
self.play(self.teacher.look_at, self.dot)
self.wait(5)
class DescribePhoton(ThreeDScene):
CONFIG = {
"x_color" : RED,
"y_color" : GREEN,
}
def setup(self):
self.axes = ThreeDAxes()
self.add(self.axes)
self.set_camera_position(phi = 0.8*np.pi/2, theta = -np.pi/4)
em_wave = EMWave(
start_point = FRAME_X_RADIUS*LEFT,
A_vect = [0, 1, 1],
wave_number = 0,
amplitude = 3,
)
for ov in em_wave.continual_animations:
ov.vector.normal_vector = RIGHT
ov.vector.set_fill(opacity = 0.7)
for M_vect in em_wave.M_vects:
M_vect.set_fill(opacity = 0)
em_wave.update(0)
photon = WavePacket(
em_wave = em_wave,
run_time = 2,
)
self.photon = photon
self.em_wave = em_wave
def construct(self):
self.add_ket_equation()
self.shoot_a_few_photons()
self.freeze_photon()
self.reposition_to_face_photon_head_on()
self.show_components()
self.show_amplitude_and_phase()
self.change_basis()
self.write_different_meaning()
self.write_components()
self.describe_via_energy()
self.components_not_possible_in_isolation()
self.ask_what_they_mean()
self.change_camera()
def add_ket_equation(self):
equation = TexMobject(
"|\\!\\psi\\rangle",
"=",
"\\alpha", "|\\!\\rightarrow \\rangle", "+",
"\\beta", "|\\!\\uparrow \\rangle",
)
equation.to_edge(UP)
equation.set_color_by_tex("psi", E_COLOR)
equation.set_color_by_tex("alpha", self.x_color)
equation.set_color_by_tex("beta", self.y_color)
rect = SurroundingRectangle(equation.get_part_by_tex("psi"))
rect.set_color(E_COLOR)
words = TextMobject("Polarization\\\\", "state")
words.next_to(rect, DOWN)
for part in words:
bg_rect = BackgroundRectangle(part)
bg_rect.stretch_in_place(2, 1)
part.add_to_back(bg_rect)
equation.rect = rect
equation.words = words
equation.add_background_rectangle()
equation.add(rect, words)
VGroup(rect, words).fade(1)
equation.rotate(np.pi/2, RIGHT)
equation.rotate(np.pi/2 + self.camera.get_theta(), OUT)
self.add(equation)
self.equation = equation
self.superposition = VGroup(*equation[1][2:])
def shoot_a_few_photons(self):
for x in range(2):
self.play(self.photon)
def freeze_photon(self):
self.play(
self.photon,
rate_func = lambda x : 0.55*x,
run_time = 1
)
self.add(self.photon.mobject)
self.photon.rate_func = lambda x : x
self.photon.run_time = 2
def reposition_to_face_photon_head_on(self):
plane = NumberPlane(
color = LIGHT_GREY,
secondary_color = DARK_GREY,
x_unit_size = 2,
y_unit_size = 2,
y_radius = FRAME_X_RADIUS,
)
plane.add_coordinates(x_vals = range(-3, 4), y_vals = [])
plane.rotate(np.pi/2, RIGHT)
plane.rotate(np.pi/2, OUT)
self.play(self.em_wave.M_vects.set_fill, None, 0)
self.move_camera(
phi = np.pi/2, theta = 0,
added_anims = [
Rotate(self.equation, -self.camera.get_theta())
]
)
self.play(
Write(plane, run_time = 1),
Animation(self.equation)
)
self.xy_plane = plane
def show_components(self):
h_arrow, v_arrow = [
Vector(
1.38*direction,
color = color,
normal_vector = RIGHT,
)
for color, direction in [(self.x_color, UP), (self.y_color, OUT)]
]
v_arrow.move_to(h_arrow.get_end(), IN)
h_part = VGroup(*self.equation[1][2:4]).copy()
v_part = VGroup(*self.equation[1][5:7]).copy()
self.play(
self.equation.rect.set_stroke, BLUE, 4,
self.equation.words.set_fill, WHITE, 1,
)
for part, arrow, d in (h_part, h_arrow, IN), (v_part, v_arrow, UP):
self.play(
part.next_to, arrow.get_center(), d,
ShowCreation(arrow)
)
part.rotate(np.pi/2, DOWN)
bg_rect = BackgroundRectangle(part)
bg_rect.stretch_in_place(1.3, 0)
part.add_to_back(bg_rect)
part.rotate(np.pi/2, UP)
self.add(part)
self.wait()
self.h_part_tex = h_part
self.h_arrow = h_arrow
self.v_part_tex = v_part
self.v_arrow = v_arrow
def show_amplitude_and_phase(self):
alpha = self.h_part_tex[1]
new_alpha = alpha.copy().shift(IN)
rhs = TexMobject(
"=", "A_x", "e",
"^{i", "(2\\pi", "f", "t", "+", "\\phi_x)}"
)
A_rect = SurroundingRectangle(rhs.get_part_by_tex("A_x"), buff = 0.5*SMALL_BUFF)
A_word = TextMobject("Amplitude")
A_word.add_background_rectangle()
A_word.next_to(A_rect, DOWN, aligned_edge = LEFT)
A_group = VGroup(A_rect, A_word)
A_group.set_color(YELLOW)
phase_rect = SurroundingRectangle(VGroup(*rhs[4:]), buff = 0.5*SMALL_BUFF)
phase_word = TextMobject("Phase")
phase_word.add_background_rectangle()
phase_word.next_to(phase_rect, UP)
phase_group = VGroup(phase_word, phase_rect)
phase_group.set_color(MAROON_B)
rhs.add_background_rectangle()
group = VGroup(rhs, A_group, phase_group)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
group.next_to(new_alpha, UP, SMALL_BUFF)
self.play(
ReplacementTransform(alpha.copy(), new_alpha),
FadeIn(rhs)
)
for word, rect in A_group, phase_group:
self.play(
ShowCreation(rect),
Write(word, run_time = 1)
)
self.wait()
self.play(*map(FadeOut, [new_alpha, group]))
def change_basis(self):
superposition = self.superposition
plane = self.xy_plane
h_arrow = self.h_arrow
v_arrow = self.v_arrow
h_part = self.h_part_tex
v_part = self.v_part_tex
axes = self.axes
movers = [
plane, axes,
h_arrow, v_arrow,
h_part, v_part,
self.equation,
superposition,
]
for mob in movers:
mob.save_state()
superposition.target = TexMobject(
"\\gamma", "|\\! \\nearrow \\rangle", "+",
"\\delta", "|\\! \\nwarrow \\rangle",
)
superposition.target.set_color_by_tex("gamma", TEAL_D)
superposition.target.set_color_by_tex("delta", MAROON)
for part in superposition.target.get_parts_by_tex("rangle"):
part[1].rotate_in_place(-np.pi/12)
superposition.target.rotate(np.pi/2, RIGHT)
superposition.target.rotate(np.pi/2, OUT)
superposition.target.move_to(superposition)
for mob in plane, axes:
mob.generate_target()
mob.target.rotate(np.pi/6, RIGHT)
A = 1.9
h_arrow.target = Vector(
A*np.cos(np.pi/12)*rotate_vector(UP, np.pi/6, RIGHT),
normal_vector = RIGHT,
color = TEAL
)
v_arrow.target = Vector(
A*np.sin(np.pi/12)*rotate_vector(OUT, np.pi/6, RIGHT),
normal_vector = RIGHT,
color = MAROON
)
v_arrow.target.shift(h_arrow.target.get_vector())
h_part.target = VGroup(*superposition.target[:2]).copy()
v_part.target = VGroup(*superposition.target[3:]).copy()
h_part.target.next_to(
h_arrow.target.get_center(), IN+UP, SMALL_BUFF
)
v_part.target.next_to(
v_arrow.target.get_center(), UP, SMALL_BUFF
)
for part in h_part.target, v_part.target:
part.rotate(np.pi/2, DOWN)
part.add_to_back(BackgroundRectangle(part))
part.rotate(np.pi/2, UP)
self.equation.generate_target()
self.play(*map(MoveToTarget, movers))
self.wait(2)
self.play(*[mob.restore for mob in movers])
self.wait()
def write_different_meaning(self):
superposition = self.superposition
superposition.rotate(np.pi/2, DOWN)
rect = SurroundingRectangle(superposition)
VGroup(superposition, rect).rotate(np.pi/2, UP)
morty = Mortimer(mode = "confused")
blinked = morty.copy().blink()
words = TextMobject("Means something \\\\ different...")
for mob in morty, blinked, words:
mob.rotate(np.pi/2, RIGHT)
mob.rotate(np.pi/2, OUT)
words.next_to(rect, UP)
VGroup(morty, blinked).next_to(words, IN)
self.play(
ShowCreation(rect),
Write(words, run_time = 2)
)
self.play(FadeIn(morty))
self.play(Transform(
morty, blinked,
rate_func = squish_rate_func(there_and_back)
))
self.wait()
self.play(*map(FadeOut, [
morty, words, rect,
self.equation.rect,
self.equation.words,
]))
def write_components(self):
d_brace = Brace(Line(ORIGIN, 2*RIGHT), UP, buff = SMALL_BUFF)
h_brace = Brace(Line(ORIGIN, (2/np.sqrt(2))*RIGHT), DOWN, buff = SMALL_BUFF)
v_brace = Brace(Line(ORIGIN, (2/np.sqrt(2))*UP), RIGHT, buff = SMALL_BUFF)
d_brace.rotate(np.pi/4)
v_brace.shift((2/np.sqrt(2))*RIGHT)
braces = VGroup(d_brace, h_brace, v_brace)
group = VGroup(braces)
tex = ["1"] + 2*["\\sqrt{1/2}"]
colors = BLUE, self.x_color, self.y_color
for brace, tex, color in zip(braces, tex, colors):
brace.label = brace.get_tex(tex, buff = SMALL_BUFF)
brace.label.add_background_rectangle()
brace.label.set_color(color)
group.add(brace.label)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
self.play(
GrowFromCenter(d_brace),
Write(d_brace.label)
)
self.wait()
self.play(
FadeOut(self.h_part_tex),
FadeOut(self.v_part_tex),
GrowFromCenter(h_brace),
GrowFromCenter(v_brace),
)
self.play(
Write(h_brace.label),
Write(v_brace.label),
)
self.wait()
self.d_brace = d_brace
self.h_brace = h_brace
self.v_brace = v_brace
def describe_via_energy(self):
energy = TexMobject(
"&\\text{Energy}",
"=", "(hf)", "(", "1", ")^2\\\\",
"&=", "(hf)", "\\left(", "\\sqrt{1/2}", "\\right)^2",
"+", "(hf)", "\\left(", "\\sqrt{1/2}", "\\right)^2",
)
energy.scale(0.8)
one = energy.get_part_by_tex("1", substring = False)
one.set_color(BLUE)
halves = energy.get_parts_by_tex("1/2")
halves[0].set_color(self.x_color)
halves[1].set_color(self.y_color)
indices = [0, 3, 6, len(energy)]
parts = VGroup(*[
VGroup(*energy[i1:i2])
for i1, i2 in zip(indices, indices[1:])
])
for part in parts:
bg_rect = BackgroundRectangle(part)
bg_rect.stretch_in_place(1.5, 1)
part.add_to_back(bg_rect)
parts.to_corner(UP+LEFT, buff = MED_SMALL_BUFF)
parts.shift(DOWN)
parts.rotate(np.pi/2, RIGHT)
parts.rotate(np.pi/2, OUT)
self.play(Write(parts[0]), run_time = 2)
self.play(Indicate(energy.get_part_by_tex("hf")))
self.play(
Transform(
self.d_brace.label.copy(),
one.copy(),
remover = True
),
Write(parts[1], run_time = 1),
)
self.wait()
self.play(
Transform(
self.h_brace.label[1].copy(),
halves[0].copy(),
remover = True,
rate_func = squish_rate_func(smooth, 0, 0.75)
),
Transform(
self.v_brace.label[1].copy(),
halves[1].copy(),
remover = True,
rate_func = squish_rate_func(smooth, 0.25, 1)
),
Write(parts[2]),
run_time = 2
)
self.wait()
self.energy_equation_parts = parts
def components_not_possible_in_isolation(self):
half_hf = VGroup(*self.energy_equation_parts[2][1:6])
half_hf.rotate(np.pi/2, DOWN)
rect = SurroundingRectangle(half_hf)
VGroup(half_hf, rect).rotate(np.pi/2, UP)
randy = Randolph()
randy.scale(0.7)
randy.look(UP)
randy.rotate(np.pi/2, RIGHT)
randy.rotate(np.pi/2, OUT)
randy.next_to(rect, IN)
self.play(
ShowCreation(rect),
FadeIn(randy)
)
self.play(
randy.rotate, np.pi/2, IN,
randy.rotate, np.pi/2, LEFT,
randy.change, "maybe",
randy.rotate, np.pi/2, RIGHT,
randy.rotate, np.pi/2, OUT,
)
self.wait()
def ask_what_they_mean(self):
morty = Mortimer(mode = "confused")
morty.scale(0.7)
morty.to_edge(LEFT)
bubble = morty.get_bubble()
bubble.write("?!?")
bubble.resize_to_content()
bubble.add(bubble.content)
bubble.pin_to(morty)
group = VGroup(morty, bubble)
group.to_corner(DOWN+RIGHT)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
component = VGroup(self.h_arrow, self.h_brace, self.h_brace.label)
self.play(
FadeIn(morty),
component.next_to, morty, DOWN, OUT,
component.shift, MED_LARGE_BUFF*(DOWN + OUT),
)
component.rotate(np.pi/2, DOWN)
cross = Cross(component)
VGroup(component, cross).rotate(np.pi/2, UP)
cross.set_color("#ff0000")
self.play(ShowCreation(cross))
bubble.remove(bubble.content)
self.play(
ShowCreation(bubble),
Write(bubble.content),
morty.look_at, component,
)
self.wait()
def change_camera(self):
everything = VGroup(*self.get_top_level_mobjects())
everything.remove(self.photon.mobject)
everything.remove(self.axes)
self.play(*map(FadeOut, everything))
self.move_camera(
phi = 0.8*np.pi/2,
theta = -0.3*np.pi,
run_time = 2
)
self.play(
self.photon,
rate_func = lambda x : min(x + 0.55, 1),
run_time = 2,
)
self.photon.rate_func = lambda x : x
self.play(self.photon)
self.wait()
class SeeCommentInDescription(Scene):
def construct(self):
words = TextMobject("""
\\begin{flushleft}
$^*$See comment in the \\\\
description on single-headed \\\\
vs. double-headed arrows
\\end{flushleft}
""")
words.scale_to_fit_width(FRAME_WIDTH - 1)
words.to_corner(DOWN+LEFT)
self.add(words)
class SeeCommentInDescriptionAgain(Scene):
def construct(self):
words = TextMobject("$^*$Again, see description")
words.scale_to_fit_width(FRAME_WIDTH - 1)
words.to_corner(DOWN+LEFT)
self.add(words)
class GetExperimental(TeacherStudentsScene):
def construct(self):
self.teacher_says("Get experimental!", target_mode = "hooray")
self.change_student_modes(*["hooray"]*3)
self.wait(3)
class ShootPhotonThroughFilter(DirectionOfPolarizationScene):
CONFIG = {
"EMWave_config" : {
"wave_number" : 0,
"A_vect" : [0, 1, 1],
"start_point" : FRAME_X_RADIUS*LEFT,
"amplitude" : np.sqrt(2),
},
"pol_filter_configs" : [{
"label_tex" : "\\text{Filter}",
"include_arrow_label" : False,
}],
"apply_filter" : True,
"quantum" : True,
"pre_filter_alpha" : 0.35,
"ambient_rotation_rate" : 0,
}
def setup(self):
DirectionOfPolarizationScene.setup(self)
self.em_wave.update(0)
self.remove(self.em_wave)
def construct(self):
self.force_skipping()
self.add_superposition_tex()
self.ask_what_would_happen()
self.expect_half_energy_to_be_absorbed()
self.probabalistic_passing_and_blocking()
# self.note_change_in_polarization()
def add_superposition_tex(self):
superposition_tex = TexMobject(
"|\\!\\nearrow\\rangle",
"=",
"(\\sqrt{1/2})", "|\\!\\rightarrow \\rangle", "+",
"(\\sqrt{1/2})", "|\\!\\uparrow \\rangle",
)
superposition_tex.scale(0.9)
superposition_tex[0].set_color(E_COLOR)
halves = superposition_tex.get_parts_by_tex("1/2")
for half, color in zip(halves, [RED, GREEN]):
half.set_color(color)
h_rect = SurroundingRectangle(VGroup(*superposition_tex[2:4]))
v_rect = SurroundingRectangle(VGroup(*superposition_tex[5:7]))
VGroup(h_rect, v_rect).fade(1)
superposition_tex.h_rect = h_rect
superposition_tex.v_rect = v_rect
superposition_tex.add(h_rect, v_rect)
superposition_tex.next_to(ORIGIN, LEFT)
superposition_tex.to_edge(UP)
superposition_tex.rotate(np.pi/2, RIGHT)
self.superposition_tex = superposition_tex
def ask_what_would_happen(self):
photon = self.get_photon(
rate_func = lambda t : self.pre_filter_alpha*t,
remover = False,
run_time = 0.6,
)
question = TextMobject("What's going to happen?")
question.add_background_rectangle()
question.set_color(YELLOW)
question.rotate(np.pi/2, RIGHT)
question.next_to(self.superposition_tex, IN)
self.pol_filter.add(
self.pol_filter.arrow.copy().rotate(np.pi/2, OUT)
)
self.pol_filter.save_state()
self.pol_filter.shift(5*OUT)
self.set_camera_position(theta = -0.9*np.pi)
self.play(self.pol_filter.restore)
self.move_camera(
theta = -0.6*np.pi,
)
self.play(
photon,
FadeIn(self.superposition_tex)
)
self.play(Write(question, run_time = 1))
self.wait()
self.play(FadeOut(self.pol_filter.label))
self.pol_filter.remove(self.pol_filter.label)
self.add(self.pol_filter)
self.question = question
self.frozen_photon = photon
def expect_half_energy_to_be_absorbed(self):
words = TextMobject("Absorbs horizontal \\\\ energy")
words.set_color(RED)
words.next_to(ORIGIN, UP+RIGHT, MED_LARGE_BUFF)
words.rotate(np.pi/2, RIGHT)
words.rotate(np.pi/2, OUT)
lines = VGroup(*[
Line(
np.sin(a)*RIGHT + np.cos(a)*UP,
np.sin(a)*LEFT + np.cos(a)*UP,
color = RED,
stroke_width = 2,
)
for a in np.linspace(0, np.pi, 15)
])
lines.rotate(np.pi/2, RIGHT)
lines.rotate(np.pi/2, OUT)
self.move_camera(
phi = np.pi/2, theta = 0,
added_anims = [
Rotate(self.superposition_tex, np.pi/2),
] + [
ApplyMethod(
v.rotate_in_place,
-np.pi/2,
method_kwargs = {"axis" : v.get_vector()}
)
for v in self.frozen_photon.mobject
]
)
self.play(
Write(words, run_time = 2),
self.superposition_tex.h_rect.set_stroke, RED, 3,
*map(GrowFromCenter, lines)+\
[
Animation(self.pol_filter),
Animation(self.frozen_photon.mobject)
]
)
self.wait(2)
self.move_camera(
phi = 0.8*np.pi/2, theta = -0.7*np.pi,
added_anims = [
FadeOut(words),
Animation(lines),
Rotate(self.superposition_tex, -np.pi/2),
] + [
ApplyMethod(
v.rotate_in_place,
np.pi/2,
method_kwargs = {"axis" : v.get_vector()}
)
for v in self.frozen_photon.mobject
]
)
self.play(
FadeOut(lines),
FadeOut(self.question),
self.superposition_tex.h_rect.fade, 1,
Animation(self.pol_filter)
)
self.wait()
self.absorption_words = words
def probabalistic_passing_and_blocking(self):
absorption = self.get_filter_absorption_animation(
self.pol_filter, self.get_blocked_photon()
)
prob = TexMobject("P(", "\\text{pass}", ")", "=", "1/2")
prob.set_color_by_tex("pass", GREEN)
prob.rotate(np.pi/2, RIGHT)
prob.next_to(self.superposition_tex, IN, MED_SMALL_BUFF, RIGHT)
self.remove(self.frozen_photon.mobject)
self.play(
self.get_photon(),
rate_func = lambda t : min(t+self.pre_filter_alpha, 1),
)
self.play(
FadeIn(prob),
self.get_blocked_photon(),
absorption
)
bools = 6*[True] + 6*[False]
self.revert_to_original_skipping_status()
random.shuffle(bools)
for should_pass in bools:
if should_pass:
self.play(self.get_photon(), run_time = 1)
else:
self.play(
self.get_blocked_photon(),
Animation(self.axes),
absorption,
run_time = 1
)
self.play(FadeOut(prob))
def note_change_in_polarization(self):
words = TextMobject(
"``Collapses'' \\\\ from", "$|\\!\\nearrow\\rangle$",
"to", "$|\\!\\uparrow\\rangle$"
)
words.set_color_by_tex("nearrow", E_COLOR)
words.set_color_by_tex("uparrow", GREEN)
words.next_to(ORIGIN, RIGHT, MED_LARGE_BUFF)
words.shift(2*UP)
words.rotate(np.pi/2, RIGHT)
photon = self.get_photon(run_time = 4)
for vect in photon.mobject:
if vect.get_center()[0] > 0:
vect.saved_state.set_fill(GREEN)
self.play(FadeIn(words), photon)
for x in range(3):
self.play(photon)
######
def get_photon(self, **kwargs):
kwargs["run_time"] = kwargs.get("run_time", 1)
kwargs["include_M_vects"] = False
return WavePacket(em_wave = self.em_wave.copy(), **kwargs)
def get_blocked_photon(self, **kwargs):
kwargs["get_filtered"] = True
return self.get_photon(self, **kwargs)
class PhotonPassesCompletelyOrNotAtAllStub(ExternallyAnimatedScene):
pass
class YouCanSeeTheCollapse(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"You can literally \\\\ \\emph{see} the collapse",
target_mode = "hooray"
)
self.change_student_modes("confused", "hooray", "erm")
self.wait(3)
class ThreeFilters(ShootPhotonThroughFilter):
CONFIG = {
"filter_x_coordinates" : [-4, 0, 4],
"pol_filter_configs" : [
{"filter_angle" : 0},
{"filter_angle" : np.pi/4},
{"filter_angle" : np.pi/2},
],
"EMWave_config" : {
"A_vect" : [0, 0, 1],
"amplitude" : 1.5,
"n_vectors" : 60,
},
"line_start_length" : 8,
"line_end_length" : 8,
"n_lines" : 20,
"lines_depth" : 1.8,
"lines_shift_vect" : SMALL_BUFF*OUT,
"random_seed" : 6,
}
def construct(self):
self.remove(self.axes)
self.setup_filters()
self.setup_lines()
self.setup_arrows()
self.fifty_percent_pass_second()
self.show_changed_to_diagonal()
self.fifty_percent_to_pass_third()
self.show_lines_with_middle()
self.remove_middle_then_put_back()
def setup_filters(self):
for pf in self.pol_filters:
pf.arrow_label.rotate(np.pi/2, OUT)
pf.arrow_label.next_to(pf.arrow, RIGHT)
pf.arrow_label.rotate(np.pi/2, LEFT)
pf.arrow_label.add_background_rectangle()
pf.arrow_label.rotate(np.pi/2, RIGHT)
self.add_foreground_mobject(pf.arrow_label)
def setup_lines(self):
lines_group = VGroup(*[
self.get_lines(pf1, pf2, ratio)
for pf1, pf2, ratio in zip(
[None] + list(self.pol_filters),
list(self.pol_filters) + [None],
[1, 1, 0.5, 0.25]
)
])
lines = lines_group[0]
spacing = lines[1].get_start() - lines[0].get_start()
lines.add(lines.copy().shift(spacing/2))
self.lines_group = lines_group
self.A_to_C_lines = self.get_lines(
self.pol_filters[0], self.pol_filters[2],
)
def setup_arrows(self):
for E_vect in self.em_wave.E_vects:
E_vect.normal_vector = IN+DOWN
self.em_wave.update(0)
def fifty_percent_pass_second(self):
arrow = Arrow(
ORIGIN, 3*RIGHT,
use_rectangular_stem = False,
path_arc = -0.8*np.pi
)
label = TexMobject("50\\%")
label.next_to(arrow, UP)
group = VGroup(arrow, label)
group.rotate(np.pi/2, RIGHT)
group.next_to(self.pol_filters[1], OUT, buff = 0)
group.set_color(BLUE)
l1, l2, l3 = self.lines_group[:3]
pf1, pf2, pf3 = self.pol_filters
kwargs = {
"submobject_mode" : "all_at_once",
"rate_func" : None,
}
self.play(ShowCreation(l1, run_time = 1, **kwargs))
self.play(
ShowCreation(l2, **kwargs),
Animation(VGroup(pf1, l1)),
ShowCreation(arrow),
run_time = 0.5,
)
self.play(
ShowCreation(l3, **kwargs),
Animation(VGroup(pf2, l2, pf1, l1)),
FadeIn(label),
run_time = 0.5,
)
self.wait(2)
self.play(
FadeOut(l3),
Animation(pf2),
FadeOut(l2),
Animation(pf1),
FadeOut(l1)
)
self.fifty_percent_arrow_group = group
def show_changed_to_diagonal(self):
photon = self.get_photon(
run_time = 2,
rate_func = lambda x : 0.6*x,
remover = False,
)
brace = Brace(Line(1.5*LEFT, 1.5*RIGHT), DOWN)
label = brace.get_text(
"Changed to",
"$|\\!\\nearrow\\rangle$"
)
label.set_color_by_tex("rangle", BLUE)
group = VGroup(brace, label)
group.rotate(np.pi/2, RIGHT)
group.shift(2*RIGHT + 0.5*IN)
self.play(photon)
self.play(
GrowFromCenter(brace),
Write(label, run_time = 1)
)
kwargs = {
"run_time" : 3,
"rate_func" : there_and_back_with_pause,
}
self.move_camera(
phi = np.pi/2,
theta = 0,
added_anims = [
Animation(VGroup(*self.pol_filters[:2]))
] + [
Rotate(
v, np.pi/2,
axis = v.get_vector(),
in_place = True,
**kwargs
)
for v in photon.mobject
] + [
Animation(self.pol_filters[2]),
Rotate(
label, np.pi/2,
axis = OUT,
in_place = True,
**kwargs
),
],
**kwargs
)
self.wait()
self.photon = photon
self.brace_group = VGroup(brace, label)
def fifty_percent_to_pass_third(self):
arrow_group = self.fifty_percent_arrow_group.copy()
arrow_group.shift(4*RIGHT)
arrow, label = arrow_group
a = self.photon.rate_func(1)
new_photon = self.get_photon(
rate_func = lambda x : (1-a)*x + a,
run_time = 1
)
self.revert_to_original_skipping_status()
self.play(
ShowCreation(arrow),
Write(label, run_time = 1)
)
self.remove(self.photon.mobject)
self.play(new_photon)
self.second_fifty_percent_arrow_group = arrow_group
def show_lines_with_middle(self):
l1, l2, l3, l4 = self.lines_group
pf1, pf2, pf3 = self.pol_filters
self.play(
FadeIn(l4),
Animation(pf3),
FadeIn(l3),
Animation(pf2),
FadeIn(l2),
Animation(pf1),
FadeIn(l1),
FadeOut(self.brace_group)
)
self.wait(2)
def remove_middle_then_put_back(self):
l1, l2, l3, l4 = self.lines_group
pf1, pf2, pf3 = self.pol_filters
mid_lines = self.A_to_C_lines
mover = VGroup(
pf2,
self.fifty_percent_arrow_group,
self.second_fifty_percent_arrow_group,
)
arrow = Arrow(
ORIGIN, 7*RIGHT,
use_rectangular_stem = False,
path_arc = 0.5*np.pi,
)
labels = VGroup(*map(TexMobject, ["0\\%", "25\\%"]))
labels.scale(1.5)
labels.next_to(arrow, DOWN)
group = VGroup(arrow, labels)
group.rotate(np.pi/2, RIGHT)
group.shift(2*LEFT + IN)
group.set_color(GREEN)
self.remove(l2, l3)
self.play(
FadeOut(l4),
Animation(pf3),
FadeOut(l3),
ApplyMethod(
mover.shift, 3*OUT,
rate_func = running_start
),
ReplacementTransform(l2.copy(), mid_lines),
Animation(pf1),
Animation(l1)
)
self.play(
ShowCreation(arrow),
Write(labels[0], run_time = 1)
)
self.wait(2)
self.play(
FadeIn(l4),
Animation(pf3),
FadeOut(mid_lines),
FadeIn(l3),
mover.shift, 3*IN,
FadeIn(l2),
Animation(pf1),
Animation(l1)
)
self.play(ReplacementTransform(*labels))
self.wait(3)
####
def get_photon(self, **kwargs):
return ShootPhotonThroughFilter.get_photon(self, width = 4, **kwargs)
def get_lines(self, filter1 = None, filter2 = None, ratio = 1.0):
n = self.n_lines
start, end = [
(f.point_from_proportion(0.75) if f is not None else None)
for f in (filter1, filter2)
]
if start is None:
start = end + self.line_start_length*LEFT
if end is None:
end = start + self.line_end_length*RIGHT
nudge = (float(self.lines_depth)/self.n_lines)*OUT
lines = VGroup(*[
Line(start, end).shift(z*nudge)
for z in range(n)
])
lines.set_stroke(YELLOW, 2)
lines.move_to(start, IN+LEFT)
lines.shift(self.lines_shift_vect)
n_to_block = int((1-ratio)*self.n_lines)
random.seed(self.random_seed)
indices_to_block = random.sample(
range(self.n_lines), n_to_block
)
VGroup(*[lines[i] for i in indices_to_block]).set_stroke(width = 0)
return lines
class PhotonAtSlightAngle(ThreeFilters):
CONFIG = {
"filter_x_coordinates" : [3],
"pol_filter_configs" : [{
"label_tex" : "",
"include_arrow_label" : False,
"radius" : 1.4,
}],
"EMWave_config" : {
"wave_number" : 0,
"A_vect" : [0, np.sin(np.pi/8), np.cos(np.pi/8)],
"start_point" : FRAME_X_RADIUS*LEFT,
"amplitude" : 2,
},
"axes_config" : {
"z_max" : 2.5,
},
"radius" : 1.3,
"lines_depth" : 2.5,
"line_start_length" : 12,
}
def construct(self):
self.force_skipping()
self.shoot_photon()
self.reposition_camera_to_head_on()
self.write_angle()
self.write_components()
self.classical_energy_conception()
self.reposition_camera_back()
self.rewrite_15_percent_meaning()
self.probabalistic_passing()
def shoot_photon(self):
photon = self.get_photon(
rate_func = lambda x : 0.5*x,
remover = False,
)
self.play(photon)
self.photon = photon
def reposition_camera_to_head_on(self):
self.move_camera(
phi = np.pi/2, theta = 0,
added_anims = list(it.chain(*[
[
v.rotate_in_place, np.pi/2, v.get_vector(),
v.set_fill, None, 0.7,
]
for v in self.photon.mobject
])) + [Animation(self.pol_filter)]
)
def write_angle(self):
arc = Arc(
start_angle = np.pi/2, angle = -np.pi/8,
radius = self.pol_filter.radius,
)
label = TexMobject("22.5^\\circ")
label.next_to(arc.get_center(), UP+RIGHT, SMALL_BUFF)
group = VGroup(arc, label)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
self.play(
FadeOut(self.pol_filter),
ShowCreation(arc),
Write(label, run_time = 1)
)
self.wait()
self.arc = arc
self.angle_label = label
def write_components(self):
d_brace = Brace(Line(ORIGIN, self.radius*RIGHT), UP, buff = SMALL_BUFF)
d_brace.rotate(np.pi/2 - np.pi/8)
d_brace.label = d_brace.get_tex("1", buff = SMALL_BUFF)
d_brace.label.add_background_rectangle()
h_arrow = Vector(
self.radius*np.sin(np.pi/8)*RIGHT,
color = RED,
)
h_label = TexMobject("\\sin(22.5^\\circ)")
h_label.scale(0.7)
h_label.set_color(RED)
h_label.next_to(h_arrow.get_center(), DOWN, aligned_edge = LEFT)
v_arrow = Vector(
self.radius*np.cos(np.pi/8)*UP,
color = GREEN
)
v_arrow.shift(h_arrow.get_vector())
v_label = TexMobject("\\cos(22.5^\\circ)")
v_label.scale(0.7)
v_label.set_color(GREEN)
v_label.next_to(v_arrow, RIGHT, SMALL_BUFF)
state = TexMobject(
"|\\!\\psi\\rangle",
"=", "\\sin(22.5^\\circ)", "|\\!\\rightarrow\\rangle",
"+", "\\cos(22.5^\\circ)", "|\\!\\uparrow\\rangle",
)
state.set_color_by_tex_to_color_map({
"psi" : BLUE,
"rightarrow" : RED,
"uparrow" : GREEN,
})
# state.add_background_rectangle()
state.to_edge(UP)
sin_brace = Brace(state.get_part_by_tex("sin"), DOWN, buff = SMALL_BUFF)
sin_brace.label = sin_brace.get_tex("%.2f"%np.sin(np.pi/8), buff = SMALL_BUFF)
cos_brace = Brace(state.get_part_by_tex("cos"), DOWN, buff = SMALL_BUFF)
cos_brace.label = cos_brace.get_tex("%.2f"%np.cos(np.pi/8), buff = SMALL_BUFF)
group = VGroup(
d_brace, d_brace.label,
h_arrow, h_label,
v_arrow, v_label,
state,
sin_brace, sin_brace.label,
cos_brace, cos_brace.label,
)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
self.play(
GrowFromCenter(d_brace),
Write(d_brace.label)
)
self.wait()
self.play(
GrowFromPoint(h_arrow, ORIGIN),
Write(h_label, run_time = 1)
)
self.play(
Write(VGroup(*state[:2])),
ReplacementTransform(
h_label.copy(),
state.get_part_by_tex("sin")
),
ReplacementTransform(
h_arrow.copy(),
state.get_part_by_tex("rightarrow")
),
Write(state.get_part_by_tex("+"))
)
self.play(
GrowFromCenter(sin_brace),
Write(sin_brace.label, run_time = 1)
)
self.wait()
self.play(
GrowFromPoint(v_arrow, h_arrow.get_end()),
Write(v_label, run_time = 1)
)
self.play(
ReplacementTransform(
v_label.copy(),
state.get_part_by_tex("cos")
),
ReplacementTransform(
v_arrow.copy(),
state.get_part_by_tex("uparrow")
),
)
self.play(
GrowFromCenter(cos_brace),
Write(cos_brace.label, run_time = 1)
)
self.wait()
self.d_brace = d_brace
self.state_equation = state
self.state_equation.add(
sin_brace, sin_brace.label,
cos_brace, cos_brace.label,
)
self.sin_brace = sin_brace
self.cos_brace = cos_brace
self.h_arrow = h_arrow
self.h_label = h_label
self.v_arrow = v_arrow
self.v_label = v_label
def classical_energy_conception(self):
randy = Randolph(mode = "pondering").flip()
randy.scale(0.7)
randy.next_to(ORIGIN, LEFT)
randy.to_edge(DOWN)
bubble = ThoughtBubble(direction = RIGHT)
h_content = TexMobject(
"0.38", "^2", "= 0.15", "\\text{ energy}\\\\",
"\\text{in the }", "\\rightarrow", "\\text{ direction}"
)
alt_h_content = TexMobject(
"0.38", "^2", "=& 15\\%", "\\text{ of energy}\\\\",
"&\\text{absorbed}", "", "",
)
h_content.set_color_by_tex("rightarrow", RED)
alt_h_content.set_color_by_tex("rightarrow", RED)
alt_h_content.scale(0.8)
v_content = TexMobject(
"0.92", "^2", "= 0.85", "\\text{ energy}\\\\",
"\\text{in the }", "\\uparrow", "\\text{ direction}"
)
v_content.set_color_by_tex("uparrow", GREEN)
bubble.add_content(h_content)
bubble.resize_to_content()
v_content.move_to(h_content)
bubble_group = VGroup(bubble, h_content, v_content)
bubble_group.scale(0.8)
bubble_group.next_to(randy, UP+LEFT, SMALL_BUFF)
classically = TextMobject("Classically...")
classically.next_to(bubble[-1], UP)
classically.set_color(YELLOW)
alt_h_content.next_to(classically, DOWN)
group = VGroup(randy, bubble_group, classically, alt_h_content)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/2, OUT)
filter_lines = self.get_filter_lines(self.pol_filter)
self.play(
FadeIn(randy),
FadeIn(classically),
ShowCreation(bubble),
)
self.play(
ReplacementTransform(
self.sin_brace.label.copy(),
h_content[0]
),
ReplacementTransform(
self.state_equation.get_part_by_tex("rightarrow").copy(),
h_content.get_part_by_tex("rightarrow")
)
)
self.play(
Write(VGroup(*h_content[1:5])),
Write(h_content.get_part_by_tex("direction")),
run_time = 2,
)
self.wait(2)
self.play(h_content.shift, 2*IN)
self.play(
ReplacementTransform(
self.cos_brace.label.copy(),
v_content[0]
),
ReplacementTransform(
self.state_equation.get_part_by_tex("uparrow").copy(),
v_content.get_part_by_tex("uparrow")
)
)
self.play(
Write(VGroup(*v_content[1:5])),
Write(v_content.get_part_by_tex("direction")),
run_time = 2,
)
self.wait(2)
self.play(
FadeOut(randy),
FadeOut(bubble),
FadeOut(v_content),
Transform(h_content, alt_h_content),
FadeIn(self.pol_filter),
Animation(self.arc)
)
self.play(ShowCreation(filter_lines, submobject_mode = "all_at_once"))
self.play(FadeOut(filter_lines))
self.wait()
self.classically = VGroup(classically, h_content)
def reposition_camera_back(self):
self.move_camera(
phi = 0.8*np.pi/2, theta = -0.6*np.pi,
added_anims = [
FadeOut(self.h_arrow),
FadeOut(self.h_label),
FadeOut(self.v_arrow),
FadeOut(self.v_label),
FadeOut(self.d_brace),
FadeOut(self.d_brace.label),
FadeOut(self.arc),
FadeOut(self.angle_label),
Rotate(self.state_equation, np.pi/2, IN),
Rotate(self.classically, np.pi/2, IN),
] + [
Rotate(
v, np.pi/2,
axis = v.get_vector(),
in_place = True,
)
for v in self.photon.mobject
],
run_time = 1.5
)
def rewrite_15_percent_meaning(self):
self.classically.rotate(np.pi/2, LEFT)
cross = Cross(self.classically)
cross.set_color("#ff0000")
VGroup(self.classically, cross).rotate(np.pi/2, RIGHT)
new_conception = TextMobject(
"$0.38^2 = 15\\%$ chance of \\\\ getting blocked"
)
new_conception.scale(0.8)
new_conception.rotate(np.pi/2, RIGHT)
new_conception.move_to(self.classically, OUT)
a = self.photon.rate_func(1)
finish_photon = self.get_blocked_photon(
rate_func = lambda t : a + (1-a)*t
)
finish_photon.mobject.set_fill(opacity = 0.7)
self.play(ShowCreation(cross))
self.classically.add(cross)
self.play(
self.classically.shift, 4*IN,
FadeIn(new_conception),
)
self.remove(self.photon.mobject)
self.revert_to_original_skipping_status()
self.play(
finish_photon,
ApplyMethod(
self.pol_filter.set_color, RED,
rate_func = squish_rate_func(there_and_back, 0, 0.3),
run_time = finish_photon.run_time
)
)
def probabalistic_passing(self):
# photons = [
# self.get_photon()
# for x in range(3)
# ] + [self.get_blocked_photon()]
# random.shuffle(photons)
# for photon in photons:
# added_anims = []
# if photon.get_filtered:
# added_anims.append(
# self.get_filter_absorption_animation(
# self.pol_filter, photon,
# )
# )
# self.play(photon, *added_anims)
# self.wait()
l1 = self.get_lines(None, self.pol_filter)
l2 = self.get_lines(self.pol_filter, None, 0.85)
for line in it.chain(l1, l2):
if line.get_stroke_width() > 0:
line.set_stroke(width = 3)
arrow = Arrow(
2*LEFT, 2*RIGHT,
path_arc = 0.8*np.pi,
use_rectangular_stem = False,
)
label = TexMobject("15\\% \\text{ absorbed}")
label.next_to(arrow, DOWN)
group = VGroup(arrow, label)
group.set_color(RED)
group.rotate(np.pi/2, RIGHT)
group.shift(3*RIGHT + 1.5*IN)
kwargs = {
"rate_func" : None,
"submobject_mode" : "all_at_once",
}
self.play(
ShowCreation(arrow),
Write(label, run_time = 1),
ShowCreation(l1, **kwargs)
)
self.play(
ShowCreation(l2, run_time = 0.5, **kwargs),
Animation(self.pol_filter),
Animation(l1)
)
self.wait()
###
def get_filter_lines(self, pol_filter):
lines = VGroup(*[
Line(
np.sin(a)*RIGHT + np.cos(a)*UP,
np.sin(a)*LEFT + np.cos(a)*UP,
color = RED,
stroke_width = 2,
)
for a in np.linspace(0, np.pi, 15)
])
lines.scale(pol_filter.radius)
lines.rotate(np.pi/2, RIGHT)
lines.rotate(np.pi/2, OUT)
lines.shift(pol_filter.get_center()[0]*RIGHT)
return lines
def get_blocked_photon(self, **kwargs):
return self.get_photon(
filter_distance = FRAME_X_RADIUS + 3,
get_filtered = True,
**kwargs
)
class CompareWaveEquations(TeacherStudentsScene):
def construct(self):
self.add_equation()
self.show_complex_plane()
self.show_interpretations()
def add_equation(self):
equation = TexMobject(
"|\\!\\psi\\rangle",
"=", "\\alpha", "|\\!\\rightarrow\\rangle",
"+", "\\beta", "|\\!\\uparrow\\rangle",
)
equation.set_color_by_tex_to_color_map({
"psi" : BLUE,
"rightarrow" : RED,
"uparrow" : GREEN,
})
equation.next_to(ORIGIN, LEFT)
equation.to_edge(UP)
psi_rect = SurroundingRectangle(equation.get_part_by_tex("psi"))
psi_rect.set_color(WHITE)
state_words = TextMobject("Polarization \\\\ state")
state_words.set_color(BLUE)
state_words.scale(0.8)
state_words.next_to(psi_rect, DOWN)
equation.save_state()
equation.scale(0.01)
equation.fade(1)
equation.move_to(self.teacher.get_left())
equation.shift(SMALL_BUFF*UP)
self.play(
equation.restore,
self.teacher.change, "raise_right_hand",
)
self.change_student_modes(
*["pondering"]*3,
look_at_arg = psi_rect,
added_anims = [
ShowCreation(psi_rect),
Write(state_words, run_time = 1)
],
run_time = 1
)
self.play(FadeOut(psi_rect))
self.equation = equation
self.state_words = state_words
def show_complex_plane(self):
new_alpha, new_beta = terms = [
self.equation.get_part_by_tex(tex).copy()
for tex in ("alpha", "beta")
]
for term in terms:
term.save_state()
term.generate_target()
term.target.scale(0.7)
plane = ComplexPlane(
x_radius = 1.5,
y_radius = 1.5,
)
plane.add_coordinates()
plane.scale(1.3)
plane.next_to(ORIGIN, RIGHT, MED_LARGE_BUFF)
plane.to_edge(UP)
alpha_dot, beta_dot = [
Dot(
plane.coords_to_point(x, 0.5),
radius = 0.05,
color = color
)
for x, color in [(-0.5, RED), (0.5, GREEN)]
]
new_alpha.target.next_to(alpha_dot, UP+LEFT, 0.5*SMALL_BUFF)
new_alpha.target.set_color(RED)
new_beta.target.next_to(beta_dot, UP+RIGHT, 0.5*SMALL_BUFF)
new_beta.target.set_color(GREEN)
rhs = TexMobject(
"=", "A_y", "e", "^{i(",
"2\\pi", "f", "t", "+", "\\phi_y", ")}"
)
rhs.scale(0.7)
rhs.next_to(new_beta.target, RIGHT, SMALL_BUFF)
rhs.shift(0.5*SMALL_BUFF*UP)
rhs.set_color_by_tex_to_color_map({
"A_y" : GREEN,
"phi" : MAROON_B,
})
A_copy = rhs.get_part_by_tex("A_y").copy()
phi_copy = rhs.get_part_by_tex("phi_y").copy()
A_line = Line(
plane.coords_to_point(0, 0),
plane.coords_to_point(0.5, 0.5),
color = GREEN,
stroke_width = 2,
)
arc = Arc(angle = np.pi/4, radius = 0.5)
arc.shift(plane.get_center())
self.play(
Write(plane, run_time = 2),
MoveToTarget(new_alpha),
MoveToTarget(new_beta),
DrawBorderThenFill(alpha_dot, run_time = 1),
DrawBorderThenFill(beta_dot, run_time = 1),
)
self.play(
Write(rhs),
ShowCreation(A_line),
ShowCreation(arc)
)
self.play(
phi_copy.next_to, arc, RIGHT, SMALL_BUFF,
phi_copy.shift, 0.5*SMALL_BUFF*UP
)
self.play(
A_copy.next_to, A_line.get_center(),
UP, SMALL_BUFF,
A_copy.shift, 0.5*SMALL_BUFF*(UP+LEFT),
)
self.wait()
def show_interpretations(self):
c_words = TexMobject(
"\\text{Classically: }", "&|\\beta|^2",
"\\rightarrow",
"\\text{Component of} \\\\",
"&\\text{energy in }", "|\\!\\uparrow\\rangle",
"\\text{ direction}",
)
qm_words = TexMobject(
"\\text{Quantum: }", "&|\\beta|^2",
"\\rightarrow",
"\\text{Probability that}", "\\text{ \\emph{all}} \\\\",
"&\\text{energy is measured in }", "|\\!\\uparrow\\rangle",
"\\text{ direction}",
)
for words in c_words, qm_words:
words.set_color_by_tex_to_color_map({
"Classically" : YELLOW,
"Quantum" : BLUE,
"{all}" : BLUE,
"beta" : GREEN,
"uparrow" : GREEN,
})
words.scale(0.7)
c_words.to_edge(LEFT)
c_words.shift(2*UP)
qm_words.next_to(c_words, DOWN, MED_LARGE_BUFF, LEFT)
self.play(
FadeOut(self.state_words),
Write(c_words),
self.teacher.change, "happy"
)
self.change_student_modes(
*["happy"]*3, look_at_arg = c_words
)
self.play(Write(qm_words))
self.change_student_modes(
"erm", "confused", "pondering",
look_at_arg = qm_words
)
self.wait()
class CircularPhotons(ShootPhotonThroughFilter):
CONFIG = {
"EMWave_config" : {
"phi_vect" : [0, -np.pi/2, 0],
"wave_number" : 1,
"start_point" : 10*LEFT,
"length" : 20,
"n_vectors" : 60,
},
"apply_filter" : False,
}
def construct(self):
self.set_camera_position(theta = -0.75*np.pi)
self.setup_filter()
self.show_phase_difference()
self.shoot_circular_photons()
self.show_filter()
self.show_vertically_polarized_light()
def setup_filter(self):
pf = self.pol_filter
pf.remove(pf.label)
pf.remove(pf.arrow)
self.remove(pf.label, pf.arrow)
arrows = VGroup(*[
Arrow(
v1, v2,
use_rectangular_stem = False,
color = WHITE,
path_arc = np.pi,
)
for v1, v2 in [(LEFT, RIGHT), (RIGHT, LEFT)]
])
arrows.scale(0.7)
arrows.rotate(np.pi/2, RIGHT)
arrows.rotate(np.pi/2, OUT)
arrows.move_to(center_of_mass(pf.points))
pf.label = arrows
pf.add(arrows)
self.remove(pf)
def show_phase_difference(self):
equation = TexMobject(
"|\\!\\circlearrowright\\rangle",
"=", "\\frac{1}{\\sqrt{2}}", "|\\!\\rightarrow\\rangle",
"+", "\\frac{i}{\\sqrt{2}}", "|\\!\\uparrow\\rangle",
)
equation.set_color_by_tex_to_color_map({
"circlearrowright" : BLUE,
"rightarrow" : RED,
"uparrow" : GREEN,
})
equation.next_to(ORIGIN, LEFT, LARGE_BUFF)
equation.to_edge(UP)
rect = SurroundingRectangle(equation.get_part_by_tex("frac{i}"))
words = TextMobject("Phase shift")
words.next_to(rect, DOWN)
words.set_color(YELLOW)
group = VGroup(equation, rect, words)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/4, IN)
self.play(FadeIn(equation))
self.play(self.get_circular_photon())
self.play(
ShowCreation(rect),
Write(words, run_time = 1)
)
self.circ_equation_group = group
def shoot_circular_photons(self):
for x in range(2):
self.play(self.get_circular_photon())
def show_filter(self):
pf = self.pol_filter
pf.save_state()
pf.shift(4*OUT)
pf.fade(1)
self.play(pf.restore)
self.play(
self.get_circular_photon(),
Animation(self.circ_equation_group)
)
self.play(FadeOut(self.circ_equation_group))
def show_vertically_polarized_light(self):
equation = TexMobject(
"|\\!\\uparrow \\rangle",
"=", "\\frac{i}{\\sqrt{2}}", "|\\!\\circlearrowleft \\rangle",
"+", "\\frac{-i}{\\sqrt{2}}", "|\\!\\circlearrowright \\rangle",
)
equation.set_color_by_tex_to_color_map({
"circlearrowright" : BLUE,
"frac{-i}" : BLUE,
"circlearrowleft" : YELLOW,
"frac{i}" : YELLOW,
"uparrow" : GREEN,
})
equation.next_to(ORIGIN, LEFT, LARGE_BUFF)
equation.to_edge(UP)
prob = TexMobject(
"P(", "\\text{passing}", ")",
"=", "\\left(", "\\frac{-i}{\\sqrt{2}}", "\\right)^2"
)
prob.set_color_by_tex("sqrt{2}", BLUE)
prob.next_to(equation, DOWN)
group = VGroup(equation, prob)
group.rotate(np.pi/2, RIGHT)
group.rotate(np.pi/4, IN)
em_wave = EMWave(
wave_number = 0,
amplitude = 2,
start_point = 10*LEFT,
length = 20,
)
v_photon = WavePacket(
em_wave = em_wave,
include_M_vects = False,
run_time = 2
)
c_photon = self.get_circular_photon()
for v_vect in v_photon.mobject:
v_vect.saved_state.set_fill(GREEN)
if v_vect.get_start()[0] > 0:
v_vect.saved_state.set_fill(opacity = 0)
for c_vect in c_photon.mobject:
if c_vect.get_start()[0] < 0:
c_vect.saved_state.set_fill(opacity = 0)
blocked_v_photon = copy.deepcopy(v_photon)
blocked_v_photon.get_filtered = True
blocked_v_photon.filter_distance = 10
self.play(Write(equation, run_time = 1))
self.play(v_photon, c_photon)
self.play(FadeIn(prob))
bools = 3*[True] + 3*[False]
random.shuffle(bools)
for should_pass in bools:
if should_pass:
self.play(v_photon, c_photon)
else:
self.play(
blocked_v_photon,
self.get_filter_absorption_animation(
self.pol_filter, blocked_v_photon
)
)
self.wait()
####
def get_circular_photon(self, **kwargs):
kwargs["run_time"] = kwargs.get("run_time", 2)
photon = ShootPhotonThroughFilter.get_photon(self, **kwargs)
photon.E_func = lambda x : np.exp(-0.25*(2*np.pi*x/photon.width)**2)
return photon
class ClockwisePhotonInsert(Scene):
def construct(self):
eq = TexMobject(
"\\left| \\frac{-i}{\\sqrt{2}} \\right|^2"
)
eq.set_color(BLUE)
VGroup(*it.chain(eq[:4], eq[-5:])).set_color(WHITE)
eq.scale_to_fit_height(FRAME_HEIGHT - 1)
eq.to_edge(LEFT)
self.add(eq)
class OrClickHere(Scene):
def construct(self):
words = TextMobject("Or click here")
words.scale(3)
arrow = Vector(
2*UP + 2*RIGHT,
rectangular_stem_width = 0.1,
tip_length = 0.5
)
arrow.next_to(words, UP).shift(RIGHT)
self.play(
Write(words),
ShowCreation(arrow)
)
self.wait()
class WavesPatreonThanks(PatreonThanks):
CONFIG = {
"specific_patrons" : [
"Desmos",
"CrypticSwarm",
"Burt Humburg",
"Charlotte",
"Juan Batiz-Benet",
"Ali Yahya",
"William",
"Mayank M. Mehrotra",
"Lukas Biewald",
"Samantha D. Suplee",
"James Park",
"Yana Chernobilsky",
"Kaustuv DeBiswas",
"Kathryn Schmiedicke",
"Yu Jun",
"dave nicponski",
"Damion Kistler",
"Markus Persson",
"Yoni Nazarathy",
"Ed Kellett",
"Joseph John Cox",
"Dan Rose",
"Luc Ritchie",
"Harsev Singh",
"Mads Elvheim",
"Erik Sundell",
"Xueqi Li",
"David G. Stork",
"Tianyu Ge",
"Ted Suzman",
"Linh Tran",
"Andrew Busey",
"Michael McGuffin",
"John Haley",
"Ankalagon",
"Eric Lavault",
"Boris Veselinovich",
"Julian Pulgarin",
"Jeff Linse",
"Cooper Jones",
"Ryan Dahl",
"Mark Govea",
"Robert Teed",
"Jason Hise",
"Meshal Alshammari",
"Bernd Sing",
"Nils Schneider",
"James Thornton",
"Mustafa Mahdi",
"Mathew Bramson",
"Jerry Ling",
"Vecht",
"Shimin Kuang",
"Rish Kundalia",
"Achille Brighton",
"Ripta Pasay",
],
}
class Footnote(Scene):
def construct(self):
words = TextMobject("""
\\begin{flushleft}
\\Large
By the way, in the quantum mechanical description
of polarization, states are written like
$|\\! \\leftrightarrow \\rangle$ with a double-headed
arrow, rather than $|\\! \\rightarrow \\rangle$ with
a single-headed arrow. This conveys how there's no distinction
between left and right; they each have the same measurable
state: horizontal. \\\\
\\quad \\\\
Because of how I chose to motivate things with classical waves,
I'll stick with the single-headed $|\\! \\rightarrow \\rangle$
for this video, but just keep in mind that this differs
from quantum mechanics conventions.
\\end{flushleft}
""")
words.scale_to_fit_width(FRAME_WIDTH - 2)
self.add(words)
| 31.626893 | 91 | 0.539138 |
aaad45954c54ff6a1121a896735e34696329f3f2 | 7,631 | py | Python | config/settings/production.py | taumaa/django-blog-back | c4de2d44863d0feaf06d4b1fd0f7d4edc3ce667d | [
"MIT"
] | null | null | null | config/settings/production.py | taumaa/django-blog-back | c4de2d44863d0feaf06d4b1fd0f7d4edc3ce667d | [
"MIT"
] | null | null | null | config/settings/production.py | taumaa/django-blog-back | c4de2d44863d0feaf06d4b1fd0f7d4edc3ce667d | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# https://github.com/jazzband/django-redis#memcached-exceptions-behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#cloudfront
AWS_S3_CUSTOM_DOMAIN = env("DJANGO_AWS_S3_CUSTOM_DOMAIN", default=None)
aws_s3_domain = AWS_S3_CUSTOM_DOMAIN or f"{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com"
# STATIC
# ------------------------
STATICFILES_STORAGE = "django_blog.utils.storages.StaticRootS3Boto3Storage"
COLLECTFAST_STRATEGY = "collectfast.strategies.boto3.Boto3Strategy"
STATIC_URL = f"https://{aws_s3_domain}/static/"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "django_blog.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{aws_s3_domain}/media/"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL",
default="django-blog <noreply@example.com>",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX",
default="[django-blog]",
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| 43.357955 | 87 | 0.625737 |
cd41ceeac7a7f041d36edb030db3361f4f775ba0 | 272 | py | Python | torchvision/prototype/datasets/decoder.py | shuokay/vision | 136a16eaa604044d91485a91e47d9dc1e6a8d2d8 | [
"BSD-3-Clause"
] | null | null | null | torchvision/prototype/datasets/decoder.py | shuokay/vision | 136a16eaa604044d91485a91e47d9dc1e6a8d2d8 | [
"BSD-3-Clause"
] | null | null | null | torchvision/prototype/datasets/decoder.py | shuokay/vision | 136a16eaa604044d91485a91e47d9dc1e6a8d2d8 | [
"BSD-3-Clause"
] | null | null | null | import io
import numpy as np
import PIL.Image
import torch
__all__ = ["pil"]
def pil(file: io.IOBase, mode: str = "RGB") -> torch.Tensor:
image = PIL.Image.open(file).convert(mode.upper())
return torch.from_numpy(np.array(image, copy=True)).permute((2, 0, 1))
| 20.923077 | 74 | 0.680147 |
61e7c9df8f430ed0d576bc0ce7d6b2318d241832 | 344 | py | Python | backend/notification/routing.py | RyanSiu1995/Course_PWA_Client | bce0ea9406ceeef1def3f72bc48672b89dfcf13f | [
"MIT"
] | null | null | null | backend/notification/routing.py | RyanSiu1995/Course_PWA_Client | bce0ea9406ceeef1def3f72bc48672b89dfcf13f | [
"MIT"
] | 1 | 2018-05-12T16:37:34.000Z | 2018-05-13T14:43:55.000Z | backend/notification/routing.py | RyanSiu1995/Course_PWA_Client | bce0ea9406ceeef1def3f72bc48672b89dfcf13f | [
"MIT"
] | null | null | null | # mysite/routing.py
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from . import route
application = ProtocolTypeRouter({
# (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
route.websocket_urlpatterns
)
),
}) | 26.461538 | 58 | 0.715116 |
0e42b1bc04335fc864a08597463f800ec956a9dd | 32,006 | py | Python | sdk/python/pulumi_azure_native/network/v20190201/network_interface.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20190201/network_interface.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20190201/network_interface.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkInterfaceArgs', 'NetworkInterface']
@pulumi.input_type
class NetworkInterfaceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
dns_settings: Optional[pulumi.Input['NetworkInterfaceDnsSettingsArgs']] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
network_interface_name: Optional[pulumi.Input[str]] = None,
network_security_group: Optional[pulumi.Input['NetworkSecurityGroupArgs']] = None,
primary: Optional[pulumi.Input[bool]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceTapConfigurationArgs']]]] = None):
"""
The set of arguments for constructing a NetworkInterface resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['NetworkInterfaceDnsSettingsArgs'] dns_settings: The DNS settings in network interface.
:param pulumi.Input[bool] enable_accelerated_networking: If the network interface is accelerated networking enabled.
:param pulumi.Input[bool] enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]] ip_configurations: A list of IPConfigurations of the network interface.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] mac_address: The MAC address of the network interface.
:param pulumi.Input[str] network_interface_name: The name of the network interface.
:param pulumi.Input['NetworkSecurityGroupArgs'] network_security_group: The reference of the NetworkSecurityGroup resource.
:param pulumi.Input[bool] primary: Gets whether this is a primary network interface on a virtual machine.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_guid: The resource GUID property of the network interface resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceTapConfigurationArgs']]] tap_configurations: A list of TapConfigurations of the network interface.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking is not None:
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding is not None:
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_interface_name is not None:
pulumi.set(__self__, "network_interface_name", network_interface_name)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tap_configurations is not None:
pulumi.set(__self__, "tap_configurations", tap_configurations)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional[pulumi.Input['NetworkInterfaceDnsSettingsArgs']]:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@dns_settings.setter
def dns_settings(self, value: Optional[pulumi.Input['NetworkInterfaceDnsSettingsArgs']]):
pulumi.set(self, "dns_settings", value)
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[pulumi.Input[bool]]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@enable_accelerated_networking.setter
def enable_accelerated_networking(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_accelerated_networking", value)
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@enable_ip_forwarding.setter
def enable_ip_forwarding(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ip_forwarding", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]]]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@ip_configurations.setter
def ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]]]):
pulumi.set(self, "ip_configurations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[pulumi.Input[str]]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@mac_address.setter
def mac_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mac_address", value)
@property
@pulumi.getter(name="networkInterfaceName")
def network_interface_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the network interface.
"""
return pulumi.get(self, "network_interface_name")
@network_interface_name.setter
def network_interface_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface_name", value)
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional[pulumi.Input['NetworkSecurityGroupArgs']]:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@network_security_group.setter
def network_security_group(self, value: Optional[pulumi.Input['NetworkSecurityGroupArgs']]):
pulumi.set(self, "network_security_group", value)
@property
@pulumi.getter
def primary(self) -> Optional[pulumi.Input[bool]]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@primary.setter
def primary(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "primary", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[pulumi.Input[str]]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@resource_guid.setter
def resource_guid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_guid", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tapConfigurations")
def tap_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceTapConfigurationArgs']]]]:
"""
A list of TapConfigurations of the network interface.
"""
return pulumi.get(self, "tap_configurations")
@tap_configurations.setter
def tap_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceTapConfigurationArgs']]]]):
pulumi.set(self, "tap_configurations", value)
class NetworkInterface(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dns_settings: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceDnsSettingsArgs']]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
network_interface_name: Optional[pulumi.Input[str]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
primary: Optional[pulumi.Input[bool]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceTapConfigurationArgs']]]]] = None,
__props__=None):
"""
A network interface in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['NetworkInterfaceDnsSettingsArgs']] dns_settings: The DNS settings in network interface.
:param pulumi.Input[bool] enable_accelerated_networking: If the network interface is accelerated networking enabled.
:param pulumi.Input[bool] enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]]] ip_configurations: A list of IPConfigurations of the network interface.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] mac_address: The MAC address of the network interface.
:param pulumi.Input[str] network_interface_name: The name of the network interface.
:param pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']] network_security_group: The reference of the NetworkSecurityGroup resource.
:param pulumi.Input[bool] primary: Gets whether this is a primary network interface on a virtual machine.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_guid: The resource GUID property of the network interface resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceTapConfigurationArgs']]]] tap_configurations: A list of TapConfigurations of the network interface.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkInterfaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A network interface in a resource group.
:param str resource_name: The name of the resource.
:param NetworkInterfaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkInterfaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dns_settings: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceDnsSettingsArgs']]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
network_interface_name: Optional[pulumi.Input[str]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
primary: Optional[pulumi.Input[bool]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceTapConfigurationArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkInterfaceArgs.__new__(NetworkInterfaceArgs)
__props__.__dict__["dns_settings"] = dns_settings
__props__.__dict__["enable_accelerated_networking"] = enable_accelerated_networking
__props__.__dict__["enable_ip_forwarding"] = enable_ip_forwarding
__props__.__dict__["etag"] = etag
__props__.__dict__["id"] = id
__props__.__dict__["ip_configurations"] = ip_configurations
__props__.__dict__["location"] = location
__props__.__dict__["mac_address"] = mac_address
__props__.__dict__["network_interface_name"] = network_interface_name
__props__.__dict__["network_security_group"] = network_security_group
__props__.__dict__["primary"] = primary
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_guid"] = resource_guid
__props__.__dict__["tags"] = tags
__props__.__dict__["tap_configurations"] = tap_configurations
__props__.__dict__["hosted_workloads"] = None
__props__.__dict__["interface_endpoint"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machine"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190201:NetworkInterface"), pulumi.Alias(type_="azure-native:network:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20150501preview:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20150615:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20150615:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20160330:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20160330:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20160601:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20160601:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20160901:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20160901:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20161201:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20161201:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20170301:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20170301:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20170601:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20170601:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20170801:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20170801:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20170901:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20170901:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20171001:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20171001:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20171101:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20171101:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180101:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180101:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180201:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180201:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180401:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180401:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180601:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180601:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180701:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180701:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20180801:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20180801:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20181001:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20181001:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20181101:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20181101:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20181201:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20181201:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20190401:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20190401:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20190601:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20190601:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20190701:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20190701:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20190801:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20190801:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20190901:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20190901:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20191101:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20191101:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkInterface"), pulumi.Alias(type_="azure-native:network/v20201101:NetworkInterface"), pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkInterface")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkInterface, __self__).__init__(
'azure-native:network/v20190201:NetworkInterface',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkInterface':
"""
Get an existing NetworkInterface resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkInterfaceArgs.__new__(NetworkInterfaceArgs)
__props__.__dict__["dns_settings"] = None
__props__.__dict__["enable_accelerated_networking"] = None
__props__.__dict__["enable_ip_forwarding"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["hosted_workloads"] = None
__props__.__dict__["interface_endpoint"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["location"] = None
__props__.__dict__["mac_address"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_security_group"] = None
__props__.__dict__["primary"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["tap_configurations"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machine"] = None
return NetworkInterface(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceDnsSettingsResponse']]:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> pulumi.Output[Optional[bool]]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="hostedWorkloads")
def hosted_workloads(self) -> pulumi.Output[Sequence[str]]:
"""
A list of references to linked BareMetal resources
"""
return pulumi.get(self, "hosted_workloads")
@property
@pulumi.getter(name="interfaceEndpoint")
def interface_endpoint(self) -> pulumi.Output['outputs.InterfaceEndpointResponse']:
"""
A reference to the interface endpoint to which the network interface is linked.
"""
return pulumi.get(self, "interface_endpoint")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> pulumi.Output[Optional[str]]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> pulumi.Output[Optional['outputs.NetworkSecurityGroupResponse']]:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> pulumi.Output[Optional[bool]]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tapConfigurations")
def tap_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]]:
"""
A list of TapConfigurations of the network interface.
"""
return pulumi.get(self, "tap_configurations")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> pulumi.Output['outputs.SubResourceResponse']:
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
| 54.617747 | 5,267 | 0.692714 |
498a202a25f48e5fedb03ad6701be24c3b062e2c | 17,655 | py | Python | test/webdnn_test/graph_test/variable_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/webdnn_test/graph_test/variable_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/webdnn_test/graph_test/variable_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import numpy as np
from nose.tools import raises
from webdnn.graph.axis import Axis
from webdnn.graph.operators.abs import Abs
from webdnn.graph.operators.elementwise_add import ElementwiseAdd
from webdnn.graph.operators.elementwise_div import ElementwiseDiv
from webdnn.graph.operators.elementwise_mul import ElementwiseMul
from webdnn.graph.operators.elementwise_pow import ElementwisePow
from webdnn.graph.operators.greater import Greater
from webdnn.graph.operators.greater_equal import GreaterEqual
from webdnn.graph.operators.reinterpret_axis import ReinterpretAxis
from webdnn.graph.operators.reshape import Reshape
from webdnn.graph.operators.scalar_add import ScalarAdd
from webdnn.graph.operators.scalar_mul import ScalarMul
from webdnn.graph.operators.scalar_pow import ScalarPow
from webdnn.graph.operators.slice import Slice
from webdnn.graph.operators.transpose import Transpose
from webdnn.graph.order import OrderNHWC, OrderHWCN, OrderNC, OrderCHWN, OrderCN, Order, OrderNCHW
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
def test_construction():
v1 = Variable([1, 2, 3, 4], OrderNHWC)
assert v1.shape == (1, 2, 3, 4)
assert v1.order == OrderNHWC
def test_size():
v1 = Variable([1, 2, 3, 4], OrderNHWC)
assert v1.size == 1 * 2 * 3 * 4
def test_ndim():
v1 = Variable([1, 2, 3, 4], OrderNHWC)
assert v1.ndim == 4
def test_shape_dict():
v1 = Variable([1, 2, 3, 4], OrderNHWC)
assert len(v1.shape_dict) == 4
assert v1.shape_dict[Axis.N] == 1
assert v1.shape_dict[Axis.H] == 2
assert v1.shape_dict[Axis.W] == 3
assert v1.shape_dict[Axis.C] == 4
def test_change_order():
v = Variable([1, 2, 3, 4], OrderNHWC)
v.change_order(OrderHWCN)
assert v.order == OrderHWCN
assert v.shape == (2, 3, 4, 1)
def test_change_order_with_expansion():
v = Variable([3, 4], OrderNC)
v.change_order(OrderCHWN)
assert v.order == OrderCHWN
assert v.shape == (4, 1, 1, 3)
def test_change_order_with_compression():
v = Variable([3, 1, 1, 4], OrderNHWC)
v.change_order(OrderCN)
assert v.order == OrderCN
assert v.shape == (4, 3)
@raises(AssertionError)
def test_change_order_with_invalid_compression():
v = Variable([3, 2, 2, 4], OrderNHWC)
v.change_order(OrderCN)
# unary operators
def test_pos():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = +v1
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.value == 1
assert v2.output_from.inputs["x0"] == v1
def test_neg():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = -v1
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.value == -1
assert v2.output_from.inputs["x0"] == v1
def test_abs():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = abs(v1)
assert isinstance(v2.output_from, Abs)
assert v2.output_from.inputs["x0"] == v1
# binary operators
# add
def test_add_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 + v2
assert isinstance(v3.output_from, ElementwiseAdd)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_add_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 + 3
assert isinstance(v2.output_from, ScalarAdd)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 3
@raises(TypeError)
def test_add_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) + "3"
def test_radd_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 3 + v1
assert isinstance(v2.output_from, ScalarAdd)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 3
@raises(TypeError)
def test_radd_invalid_type():
"3" + Variable([2, 3, 4, 5], OrderNHWC)
# sub
def test_sub_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 - v2
assert isinstance(v3.output_from, ElementwiseAdd)
assert v3.output_from.inputs["x0"] == v1
neg_v2 = v3.output_from.inputs["x1"]
assert isinstance(neg_v2.output_from, ScalarMul)
assert neg_v2.output_from.inputs["x0"] == v2
assert neg_v2.output_from.value == -1
def test_sub_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 - 3
assert isinstance(v2.output_from, ScalarAdd)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == -3
@raises(TypeError)
def test_sub_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) - "3"
def test_rsub_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 3 - v1
assert isinstance(v2.output_from, ScalarAdd)
assert v2.output_from.value == 3
neg_v1 = v2.output_from.inputs["x0"]
assert isinstance(neg_v1.output_from, ScalarMul)
assert neg_v1.output_from.inputs["x0"] == v1
@raises(TypeError)
def test_rsub_invalid_type():
"3" - Variable([2, 3, 4, 5], OrderNHWC)
# mul
def test_mul_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 * v2
assert isinstance(v3.output_from, ElementwiseMul)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_mul_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 * 3
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 3
@raises(TypeError)
def test_mul_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) * "3"
def test_rmul_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 3 * v1
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 3
@raises(TypeError)
def test_rmul_invalid_type():
"3" * Variable([2, 3, 4, 5], OrderNHWC)
# truediv
def test_truediv_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 / v2
assert isinstance(v3.output_from, ElementwiseDiv)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_truediv_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 / 4
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 0.25
@raises(TypeError)
def test_truediv_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) / "3"
def test_rtruediv_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 / v1
assert isinstance(v2.output_from, ScalarMul)
assert v2.output_from.value == 4
inv_v1 = v2.output_from.inputs["x0"]
assert isinstance(inv_v1.output_from, ScalarPow)
assert inv_v1.output_from.inputs["x0"] == v1
assert inv_v1.output_from.value == -1
@raises(TypeError)
def test_rtruediv_invalid_type():
"3" / Variable([2, 3, 4, 5], OrderNHWC)
# pow
def test_pow_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 ** v2
assert isinstance(v3.output_from, ElementwisePow)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_pow_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 ** 4
assert isinstance(v2.output_from, ScalarPow)
assert v2.output_from.inputs["x0"] == v1
assert v2.output_from.value == 4
@raises(TypeError)
def test_pow_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) ** "3"
def test_rpow_with_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 ** v1
assert isinstance(v2.output_from, ElementwisePow)
assert isinstance(v2.output_from.inputs["x0"], ConstantVariable)
assert np.all(v2.output_from.inputs["x0"].data == 4)
assert v2.output_from.inputs["x1"] == v1
@raises(TypeError)
def test_rpow_invalid_type():
"3" ** Variable([2, 3, 4, 5], OrderNHWC)
@raises(NotImplementedError)
def test_pow_modulo():
Variable([2, 3, 4, 5], OrderNHWC).__pow__(2, 3)
# gt
def test_gt_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 > v2
assert isinstance(v3.output_from, Greater)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_gt_variable_and_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 > 4
assert isinstance(v2.output_from, Greater)
assert v2.output_from.inputs["x0"] == v1
assert isinstance(v2.output_from.inputs["x1"], ConstantVariable)
assert np.all(v2.output_from.inputs["x1"].data == 4)
def test_gt_scalar_and_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 > v1
assert isinstance(v2.output_from, Greater)
assert isinstance(v2.output_from.inputs["x0"], ConstantVariable)
assert np.all(v2.output_from.inputs["x0"].data == 4)
assert v2.output_from.inputs["x1"] == v1
@raises(TypeError)
def test_gt_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) > "3"
# gte
def test_gte_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 >= v2
assert isinstance(v3.output_from, GreaterEqual)
assert v3.output_from.inputs["x0"] == v1
assert v3.output_from.inputs["x1"] == v2
def test_gte_variable_and_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 >= 4
assert isinstance(v2.output_from, GreaterEqual)
assert v2.output_from.inputs["x0"] == v1
assert isinstance(v2.output_from.inputs["x1"], ConstantVariable)
assert np.all(v2.output_from.inputs["x1"].data == 4)
def test_gte_scalar_and_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 >= v1
assert isinstance(v2.output_from, GreaterEqual)
assert isinstance(v2.output_from.inputs["x0"], ConstantVariable)
assert np.all(v2.output_from.inputs["x0"].data == 4)
assert v2.output_from.inputs["x1"] == v1
@raises(TypeError)
def test_gte_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) >= "3"
# lt
def test_lt_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 < v2
assert isinstance(v3.output_from, Greater)
assert v3.output_from.inputs["x0"] == v2
assert v3.output_from.inputs["x1"] == v1
def test_lt_variable_and_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 < 4
assert isinstance(v2.output_from, Greater)
assert isinstance(v2.output_from.inputs["x0"], ConstantVariable)
assert np.all(v2.output_from.inputs["x0"].data == 4)
assert v2.output_from.inputs["x1"] == v1
def test_lt_scalar_and_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 < v1
assert isinstance(v2.output_from, Greater)
assert v2.output_from.inputs["x0"] == v1
assert isinstance(v2.output_from.inputs["x1"], ConstantVariable)
assert np.all(v2.output_from.inputs["x1"].data == 4)
@raises(TypeError)
def test_lt_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) < "3"
# lte
def test_lte_with_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 3, 4, 5], OrderNHWC)
v3 = v1 <= v2
assert isinstance(v3.output_from, GreaterEqual)
assert v3.output_from.inputs["x0"] == v2
assert v3.output_from.inputs["x1"] == v1
def test_lte_variable_and_scalar():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1 <= 4
assert isinstance(v2.output_from, GreaterEqual)
assert isinstance(v2.output_from.inputs["x0"], ConstantVariable)
assert np.all(v2.output_from.inputs["x0"].data == 4)
assert v2.output_from.inputs["x1"] == v1
def test_lte_scalar_and_variable():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = 4 <= v1
assert isinstance(v2.output_from, GreaterEqual)
assert v2.output_from.inputs["x0"] == v1
assert isinstance(v2.output_from.inputs["x1"], ConstantVariable)
assert np.all(v2.output_from.inputs["x1"].data == 4)
@raises(TypeError)
def test_lte_invalid_type():
Variable([2, 3, 4, 5], OrderNHWC) <= "3"
def test_slice_with_ellipsis():
v1 = Variable([2, 3, 4, 5, 6], Order([None, None, None, None, None]))
v2 = v1[:, 2, ..., None, 2:4]
assert v2.shape == (2, 4, 5, 1, 2), v2.shape
assert v2.order.axes[0] == v1.order.axes[0]
assert v2.order.axes[1] == v1.order.axes[2]
assert v2.order.axes[2] == v1.order.axes[3]
assert v2.order.axes[4] == v1.order.axes[4]
assert isinstance(v2.output_from, Slice)
def test_slice_without_ellipsis():
v1 = Variable([2, 3, 4, 5, 6], Order([None, None, None, None, None]))
v2 = v1[:, 2, 3, :, None, 2:4]
assert v2.shape == (2, 5, 1, 2), v2.shape
assert v2.order.axes[0] == v1.order.axes[0]
assert v2.order.axes[1] == v1.order.axes[3]
assert v2.order.axes[3] == v1.order.axes[4]
assert isinstance(v2.output_from, Slice)
@raises(TypeError)
def test_slice_invalid_type():
v1 = Variable([2, 3, 4, 5, 6], Order([None, None, None, None, None]))
v1[:, 2, 3, :, None, "hoge"]
def test_reshape():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1.reshape(shape=[1, 6, 4, 5], order=OrderNCHW)
assert v2.shape_dict[Axis.N] == 1
assert v2.shape_dict[Axis.C] == 6
assert v2.shape_dict[Axis.H] == 4
assert v2.shape_dict[Axis.W] == 5
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_expand_dims_with_index():
v1 = Variable([2, 3], OrderNC)
v2 = v1.expand_dims(Axis.H, 1)
assert v2.order == Order([Axis.N, Axis.H, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.H] == 1
assert v2.shape_dict[Axis.C] == 3
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_expand_dims_without_index():
v1 = Variable([2, 3], OrderNC)
v2 = v1.expand_dims(Axis.H)
assert v2.order == Order([Axis.N, Axis.C, Axis.H])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.H] == 1
assert v2.shape_dict[Axis.C] == 3
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_squeeze_with_one_axis():
v1 = Variable([2, 1, 1, 3], OrderNHWC)
v2 = v1.squeeze(Axis.H)
assert v2.order == Order([Axis.N, Axis.W, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.W] == 1
assert v2.shape_dict[Axis.C] == 3
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_squeeze_with_axes():
v1 = Variable([2, 1, 1, 3], OrderNHWC)
v2 = v1.squeeze([Axis.H, Axis.W])
assert v2.order == Order([Axis.N, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.C] == 3
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_expand_dims_without_axis():
v1 = Variable([2, 1, 1, 3], OrderNHWC)
v2 = v1.squeeze()
assert v2.order == Order([Axis.N, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.C] == 3
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.inputs["x"] == v1
def test_combine_axes():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1.combine_axes([Axis.W, Axis.H], Axis.H)
assert v2.order == Order([Axis.N, Axis.H, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[Axis.H] == 12
assert v2.shape_dict[Axis.C] == 5
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.in_order == Order([Axis.N, Axis.W, Axis.H, Axis.C])
assert v2.output_from.out_order == Order([Axis.N, Axis.H, Axis.C])
assert v2.output_from.inputs["x"] == v1
def test_combine_axes_create_new_axis():
new_axis = Axis()
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1.combine_axes([Axis.W, Axis.H], new_axis)
assert v2.order == Order([Axis.N, new_axis, Axis.C])
assert v2.shape_dict[Axis.N] == 2
assert v2.shape_dict[new_axis] == 12
assert v2.shape_dict[Axis.C] == 5
assert isinstance(v2.output_from, Reshape)
assert v2.output_from.in_order == Order([Axis.N, Axis.W, Axis.H, Axis.C])
assert v2.output_from.out_order == Order([Axis.N, new_axis, Axis.C])
assert v2.output_from.inputs["x"] == v1
def test_reshape_like():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([1, 6, 4, 5], OrderNCHW)
v3 = v1.reshape_like(v2)
assert v3.shape_dict[Axis.N] == 1
assert v3.shape_dict[Axis.C] == 6
assert v3.shape_dict[Axis.H] == 4
assert v3.shape_dict[Axis.W] == 5
assert isinstance(v3.output_from, Reshape)
assert v3.output_from.inputs["x"] == v1
def test_transpose():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1.transpose(OrderNCHW)
assert v2.shape == (2, 5, 3, 4), v2.shape
assert v2.order == OrderNCHW
assert isinstance(v2.output_from, Transpose)
assert v2.output_from.inputs["x0"] == v1
def test_transpose_like():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = Variable([2, 5, 3, 4], OrderNCHW)
v3 = v1.transpose_like(v2)
assert v3.shape == (2, 5, 3, 4), v3.shape
assert v3.order == OrderNCHW
assert isinstance(v3.output_from, Transpose)
assert v3.output_from.inputs["x0"] == v1
def test_reinterpret_axes():
v1 = Variable([2, 3, 4, 5], OrderNHWC)
v2 = v1.reinterpret_axes(OrderNCHW)
assert v2.shape == (2, 3, 4, 5), v2.shape
assert v2.order == OrderNCHW
assert isinstance(v2.output_from, ReinterpretAxis)
assert v2.output_from.inputs["x"] == v1
| 29.230132 | 98 | 0.661739 |
82d6bbb305e43ec4b2ab7ad969458735ccbd26c1 | 7,823 | py | Python | tests/http/test_stream.py | Daanvdk/jackie | c8529ea7a585e1c53288a154e147a55f4c790531 | [
"MIT"
] | null | null | null | tests/http/test_stream.py | Daanvdk/jackie | c8529ea7a585e1c53288a154e147a55f4c790531 | [
"MIT"
] | 2 | 2021-02-22T01:56:44.000Z | 2021-02-22T02:15:22.000Z | tests/http/test_stream.py | daanvdk/jackie | c8529ea7a585e1c53288a154e147a55f4c790531 | [
"MIT"
] | null | null | null | import asyncio
import tempfile
import pytest
from jackie.http.stream import Stream, SendFile
# Very simple stream implementation that allows us to specify the content type
class ContentTypeStream(Stream):
def __init__(self, body=b'', content_type='text/plain; charset=UTF-8'):
super().__init__(body)
self._content_type = content_type
def _get_content_type(self):
return self._content_type
@pytest.mark.asyncio
async def test_async_chunks_to_chunks():
async def async_chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(async_chunks())
chunks = []
async for chunk in stream.chunks():
chunks.append(chunk)
assert chunks == [b'foo', b'bar']
@pytest.mark.asyncio
async def test_chunks_to_chunks():
def chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(chunks())
chunks = []
async for chunk in stream.chunks():
chunks.append(chunk)
assert chunks == [b'foo', b'bar']
@pytest.mark.asyncio
async def test_bytes_to_chunks():
stream = ContentTypeStream(b'foobar')
chunks = []
async for chunk in stream.chunks():
chunks.append(chunk)
assert chunks == [b'foobar']
@pytest.mark.asyncio
async def test_async_chunks_to_body():
async def async_chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(async_chunks())
body = await stream.body()
assert body == b'foobar'
@pytest.mark.asyncio
async def test_chunks_to_body():
def chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(chunks())
body = await stream.body()
assert body == b'foobar'
@pytest.mark.asyncio
async def test_bytes_to_body():
stream = ContentTypeStream(b'foobar')
body = await stream.body()
assert body == b'foobar'
@pytest.mark.asyncio
async def test_async_chunks_to_text():
async def async_chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(async_chunks())
text = await stream.text()
assert text == 'foobar'
@pytest.mark.asyncio
async def test_chunks_to_text():
def chunks():
yield b'foo'
yield b'bar'
stream = ContentTypeStream(chunks())
text = await stream.text()
assert text == 'foobar'
@pytest.mark.asyncio
async def test_bytes_to_text():
stream = ContentTypeStream(b'foobar')
text = await stream.text()
assert text == 'foobar'
@pytest.mark.asyncio
async def test_async_chunks_to_json():
async def async_chunks():
yield b'{'
yield b'"foo"'
yield b': '
yield b'"bar"'
yield b'}'
stream = ContentTypeStream(async_chunks())
data = await stream.json()
assert data == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_chunks_to_json():
def chunks():
yield b'{'
yield b'"foo"'
yield b': '
yield b'"bar"'
yield b'}'
stream = ContentTypeStream(chunks())
data = await stream.json()
assert data == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_bytes_to_json():
stream = ContentTypeStream(b'{"foo": "bar"}')
data = await stream.json()
assert data == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_stream_multiple_chunks():
stream = ContentTypeStream([b'foo', b'bar', b'baz'])
chunks1 = stream.chunks()
chunks2 = stream.chunks()
chunk1 = await chunks1.__anext__()
chunk2 = await chunks2.__anext__()
assert chunk1 == b'foo'
assert chunk2 == b'foo'
chunk1 = asyncio.ensure_future(chunks1.__anext__())
chunk2 = await chunks2.__anext__()
chunk1 = await chunk1
assert chunk1 == b'bar'
assert chunk2 == b'bar'
chunk1 = await chunks1.__anext__()
chunk2 = await chunks2.__anext__()
assert chunk1 == b'baz'
assert chunk2 == b'baz'
with pytest.raises(StopAsyncIteration):
await chunks1.__anext__()
with pytest.raises(StopAsyncIteration):
await chunks2.__anext__()
@pytest.mark.asyncio
async def test_parse_form_multipart():
stream = ContentTypeStream(
body=(
b'--boundary\n'
b'Content-Disposition: form-data; name=foo\n'
b'\n'
b'123\n'
b'--boundary\n'
b'Content-Disposition: form-data; name=bar\n'
b'\n'
b'456\n'
b'--boundary\n'
b'Content-Disposition: form-data; name=baz\n'
b'\n'
b'789\n'
b'--boundary--\n'
),
content_type='multipart/form-data; boundary=boundary',
)
assert await stream.form() == {
'foo': '123',
'bar': '456',
'baz': '789',
}
@pytest.mark.asyncio
async def test_parse_form_urlencoded():
stream = ContentTypeStream(
body=b'foo=123&bar=456&baz=789',
content_type='application/x-www-form-urlencoded',
)
assert await stream.form() == {
'foo': '123',
'bar': '456',
'baz': '789',
}
@pytest.mark.asyncio
async def test_parse_form_incorrect_content_type():
stream = ContentTypeStream(
body=b'foo=123&bar=456&baz=789',
content_type='image/png',
)
with pytest.raises(ValueError):
await stream.form()
def test_parse_no_content_type():
stream = ContentTypeStream(content_type=None)
assert stream.content_type is None
assert stream.charset == 'UTF-8'
with pytest.raises(ValueError):
stream.boundary
def test_parse_content_type_with_charset():
stream = ContentTypeStream(content_type='text/plain; charset=foo')
assert stream.content_type == 'text/plain'
assert stream.charset == 'foo'
with pytest.raises(ValueError):
stream.boundary
def test_parse_content_type_without_charset():
stream = ContentTypeStream(content_type='text/plain')
assert stream.content_type == 'text/plain'
assert stream.charset == 'UTF-8'
with pytest.raises(ValueError):
stream.boundary
def test_parse_content_type_with_boundary():
stream = ContentTypeStream(
content_type='multipart/form-data; boundary=foo',
)
assert stream.content_type == 'multipart/form-data'
assert stream.charset == 'UTF-8'
assert stream.boundary == 'foo'
@pytest.mark.asyncio
async def test_send_file_stream():
with tempfile.NamedTemporaryFile() as f:
f.write(b'foobar')
f.flush()
stream = ContentTypeStream(
body=[SendFile(f.name)],
content_type='text/plain; charset=UTF-8',
)
assert await stream.text() == 'foobar'
@pytest.mark.asyncio
async def test_send_file_chunks():
with tempfile.NamedTemporaryFile() as f:
f.write(b'foobar')
f.flush()
send_file = SendFile(f.name)
chunks = [chunk async for chunk in send_file.chunks(chunk_size=3)]
assert chunks == [b'foo', b'bar']
@pytest.mark.asyncio
async def test_send_file_big_chunk():
with tempfile.NamedTemporaryFile() as f:
f.write(b'foobar')
f.flush()
send_file = SendFile(f.name)
chunks = [chunk async for chunk in send_file.chunks(chunk_size=-1)]
assert chunks == [b'foobar']
@pytest.mark.asyncio
async def test_send_file_offset():
with tempfile.NamedTemporaryFile() as f:
f.write(b'foobar')
f.flush()
send_file = SendFile(f.name, offset=3)
chunks = [chunk async for chunk in send_file.chunks(chunk_size=-1)]
assert chunks == [b'bar']
@pytest.mark.asyncio
async def test_send_file_size():
with tempfile.NamedTemporaryFile() as f:
f.write(b'foobar')
f.flush()
send_file = SendFile(f.name, size=3)
chunks = [chunk async for chunk in send_file.chunks(chunk_size=-1)]
assert chunks == [b'foo']
| 25.235484 | 78 | 0.6339 |
0141d133513ddbd544dbb239ad851d1501cf8e08 | 3,697 | py | Python | mmdet3d/models/middle_encoders/pillar_scatter.py | BB88Lee/mmdetection3d | 62aeeadf70ac1229c595e3a4fe09d8a49df808f1 | [
"Apache-2.0"
] | 136 | 2021-06-03T06:37:56.000Z | 2022-03-29T13:29:03.000Z | mmdet3d/models/middle_encoders/pillar_scatter.py | BB88Lee/mmdetection3d | 62aeeadf70ac1229c595e3a4fe09d8a49df808f1 | [
"Apache-2.0"
] | 38 | 2021-06-05T12:41:30.000Z | 2022-03-23T07:31:28.000Z | mmdet3d/models/middle_encoders/pillar_scatter.py | BB88Lee/mmdetection3d | 62aeeadf70ac1229c595e3a4fe09d8a49df808f1 | [
"Apache-2.0"
] | 15 | 2021-04-23T01:13:28.000Z | 2022-03-27T11:15:25.000Z | import torch
from mmcv.runner import auto_fp16
from torch import nn
from ..registry import MIDDLE_ENCODERS
@MIDDLE_ENCODERS.register_module()
class PointPillarsScatter(nn.Module):
"""Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image.
Args:
in_channels (int): Channels of input features.
output_shape (list[int]): Required output shape of features.
"""
def __init__(self, in_channels, output_shape):
super().__init__()
self.output_shape = output_shape
self.ny = output_shape[0]
self.nx = output_shape[1]
self.in_channels = in_channels
self.fp16_enabled = False
@auto_fp16(apply_to=('voxel_features', ))
def forward(self, voxel_features, coors, batch_size=None):
"""Foraward function to scatter features."""
# TODO: rewrite the function in a batch manner
# no need to deal with different batch cases
if batch_size is not None:
return self.forward_batch(voxel_features, coors, batch_size)
else:
return self.forward_single(voxel_features, coors)
def forward_single(self, voxel_features, coors):
"""Scatter features of single sample.
Args:
voxel_features (torch.Tensor): Voxel features in shape (N, M, C).
coors (torch.Tensor): Coordinates of each voxel.
The first column indicates the sample ID.
"""
# Create the canvas for this sample
canvas = torch.zeros(
self.in_channels,
self.nx * self.ny,
dtype=voxel_features.dtype,
device=voxel_features.device)
indices = coors[:, 1] * self.nx + coors[:, 2]
indices = indices.long()
voxels = voxel_features.t()
# Now scatter the blob back to the canvas.
canvas[:, indices] = voxels
# Undo the column stacking to final 4-dim tensor
canvas = canvas.view(1, self.in_channels, self.ny, self.nx)
return [canvas]
def forward_batch(self, voxel_features, coors, batch_size):
"""Scatter features of single sample.
Args:
voxel_features (torch.Tensor): Voxel features in shape (N, M, C).
coors (torch.Tensor): Coordinates of each voxel in shape (N, 4).
The first column indicates the sample ID.
batch_size (int): Number of samples in the current batch.
"""
# batch_canvas will be the final output.
batch_canvas = []
for batch_itt in range(batch_size):
# Create the canvas for this sample
canvas = torch.zeros(
self.in_channels,
self.nx * self.ny,
dtype=voxel_features.dtype,
device=voxel_features.device)
# Only include non-empty pillars
batch_mask = coors[:, 0] == batch_itt
this_coors = coors[batch_mask, :]
indices = this_coors[:, 2] * self.nx + this_coors[:, 3]
indices = indices.type(torch.long)
voxels = voxel_features[batch_mask, :]
voxels = voxels.t()
# Now scatter the blob back to the canvas.
canvas[:, indices] = voxels
# Append to a list for later stacking.
batch_canvas.append(canvas)
# Stack to 3-dim tensor (batch-size, in_channels, nrows*ncols)
batch_canvas = torch.stack(batch_canvas, 0)
# Undo the column stacking to final 4-dim tensor
batch_canvas = batch_canvas.view(batch_size, self.in_channels, self.ny,
self.nx)
return batch_canvas
| 36.245098 | 79 | 0.606979 |
90b4393ffec358637b87f9b912edd1552c6910de | 724 | py | Python | client/merc/modules/setup/sucheck.py | jduck/drozer | 171b0346405c00808a53e94d25582adcba098cf1 | [
"BSD-Source-Code"
] | 3 | 2015-10-04T01:21:10.000Z | 2019-08-03T19:08:10.000Z | client/merc/modules/setup/sucheck.py | jduck/drozer | 171b0346405c00808a53e94d25582adcba098cf1 | [
"BSD-Source-Code"
] | null | null | null | client/merc/modules/setup/sucheck.py | jduck/drozer | 171b0346405c00808a53e94d25582adcba098cf1 | [
"BSD-Source-Code"
] | null | null | null | import os
from merc.lib.modules import Module
class sucheck(Module):
"""Description: Test if SU binary works on device
Credit: Tyrone Erasmus - MWR Labs"""
def __init__(self, *args, **kwargs):
Module.__init__(self, *args, **kwargs)
self.path = ["setup"]
def execute(self, session, _arg):
session.executeCommand("shell", "executeMercuryShell", {'args':'su'})
print "\n--------------<mercury_shell>--------------"
print session.executeCommand("shell", "readMercuryShell", None).getPaddedErrorOrData()
print "--------------</mercury_shell>-------------\n"
print "If this was successful there will be a root shell waiting for you in shell->persistent\n"
| 36.2 | 104 | 0.617403 |
834c60327c97a511205d23326f6a03f55fd710e9 | 6,526 | py | Python | src/test/python/apache/aurora/client/cli/test_restart.py | wickman/incubator-aurora | 9906d217093568ed4c9cfe620862818f15ce4150 | [
"Apache-2.0"
] | null | null | null | src/test/python/apache/aurora/client/cli/test_restart.py | wickman/incubator-aurora | 9906d217093568ed4c9cfe620862818f15ce4150 | [
"Apache-2.0"
] | null | null | null | src/test/python/apache/aurora/client/cli/test_restart.py | wickman/incubator-aurora | 9906d217093568ed4c9cfe620862818f15ce4150 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2014 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import functools
from apache.aurora.client.api.health_check import InstanceWatcherHealthCheck, Retriable
from apache.aurora.client.cli import EXIT_API_ERROR
from apache.aurora.client.cli.client import AuroraCommandLine
from apache.aurora.client.cli.util import AuroraClientCommandTest
from twitter.common.contextutil import temporary_file
from gen.apache.aurora.ttypes import (
AssignedTask,
JobKey,
PopulateJobResult,
ScheduledTask,
ScheduleStatusResult,
TaskConfig,
)
from mock import Mock, patch
class TestRestartCommand(AuroraClientCommandTest):
@classmethod
def setup_mock_scheduler_for_simple_restart(cls, api):
"""Set up all of the API mocks for scheduler calls during a simple restart"""
sched_proxy = api.scheduler_proxy
cls.setup_get_tasks_status_calls(sched_proxy)
cls.setup_populate_job_config(sched_proxy)
sched_proxy.restartShards.return_value = cls.create_simple_success_response()
@classmethod
def setup_populate_job_config(cls, api):
populate = cls.create_simple_success_response()
populate.result.populateJobResult = Mock(spec=PopulateJobResult)
api.populateJobConfig.return_value = populate
configs = [Mock(spec=TaskConfig) for i in range(20)]
populate.result.populateJobResult.populated = set(configs)
return populate
@classmethod
def setup_health_checks(cls, mock_api):
mock_health_check = Mock(spec=InstanceWatcherHealthCheck)
mock_health_check.health.return_value = Retriable.alive()
return mock_health_check
def test_restart_simple(self):
# Test the client-side restart logic in its simplest case: everything succeeds
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
mock_health_check = self.setup_health_checks(mock_api)
self.setup_mock_scheduler_for_simple_restart(mock_api)
with contextlib.nested(
patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.instance_watcher.InstanceWatcherHealthCheck',
return_value=mock_health_check),
patch('time.time', side_effect=functools.partial(self.fake_time, self)),
patch('time.sleep', return_value=None)
):
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', fp.name])
# Like the update test, the exact number of calls here doesn't matter.
# what matters is that it must have been called once before batching, plus
# at least once per batch, and there are 4 batches.
assert mock_scheduler_proxy.getTasksStatus.call_count >= 4
# called once per batch
assert mock_scheduler_proxy.restartShards.call_count == 4
# parameters for all calls are generated by the same code, so we just check one
mock_scheduler_proxy.restartShards.assert_called_with(JobKey(environment=self.TEST_ENV,
role=self.TEST_ROLE, name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
def test_restart_failed_status(self):
# Test the client-side updater logic in its simplest case: everything succeeds, and no rolling
# updates.
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
mock_health_check = self.setup_health_checks(mock_api)
self.setup_mock_scheduler_for_simple_restart(mock_api)
mock_scheduler_proxy.getTasksStatus.return_value = self.create_error_response()
with contextlib.nested(
patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.instance_watcher.InstanceWatcherHealthCheck',
return_value=mock_health_check),
patch('time.time', side_effect=functools.partial(self.fake_time, self)),
patch('time.sleep', return_value=None)):
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
result = cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', fp.name])
assert mock_scheduler_proxy.getTasksStatus.call_count == 1
assert mock_scheduler_proxy.restartShards.call_count == 0
assert result == EXIT_API_ERROR
def test_restart_failed_restart(self):
# Test the client-side updater logic in its simplest case: everything succeeds, and no rolling
# updates.
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
mock_health_check = self.setup_health_checks(mock_api)
self.setup_mock_scheduler_for_simple_restart(mock_api)
mock_scheduler_proxy.restartShards.return_value = self.create_error_response()
with contextlib.nested(
patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.instance_watcher.InstanceWatcherHealthCheck',
return_value=mock_health_check),
patch('time.time', side_effect=functools.partial(self.fake_time, self)),
patch('time.sleep', return_value=None)):
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
result = cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', fp.name])
assert mock_scheduler_proxy.getTasksStatus.call_count == 1
assert mock_scheduler_proxy.restartShards.call_count == 1
mock_scheduler_proxy.restartShards.assert_called_with(JobKey(environment=self.TEST_ENV,
role=self.TEST_ROLE, name=self.TEST_JOB), [0, 1, 2, 3, 4], None)
assert result == EXIT_API_ERROR
| 46.614286 | 99 | 0.740116 |
ba8294d80b9085f10a29890d8a0028900821dc72 | 6,324 | py | Python | tests/OpenMaya/test_MFnDependencyNode.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | 24 | 2017-01-11T15:57:46.000Z | 2020-09-23T06:18:30.000Z | tests/OpenMaya/test_MFnDependencyNode.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | null | null | null | tests/OpenMaya/test_MFnDependencyNode.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | 2 | 2017-03-06T23:52:08.000Z | 2020-09-23T06:19:03.000Z | #!/usr/bin/env mayapy
import os
import sys
import unittest
import maya.standalone
from maya import OpenMaya, cmds
_HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(_HERE, *((os.pardir,) * 2))))
import bana
import tests._util
bana.initialize()
maya.standalone.initialize()
def _nodeCount():
count = 0
iterator = OpenMaya.MItDependencyNodes()
while not iterator.isDone():
count += 1
iterator.next()
return count
class MFnDependencyNodeTest(unittest.TestCase):
def setUp(self):
OpenMaya.MFileIO.newFile(True)
context = tests._util.Context()
master = tests._util.createTransform(context, name='master')
tests._util.createTransform(context, name='node', parent=master)
tests._util.createTransform(context, name='awesome_node', parent=master)
tests._util.createTransform(context, name='node_awesome', parent=master)
tests._util.createTransform(context, name='n0de', parent=master)
root1 = tests._util.createTransform(context, name='root_1', parent=master)
child1 = tests._util.createTransform(context, name='child_1', parent=root1)
tests._util.createTransform(context, name='node', parent=child1)
root2 = tests._util.createTransform(context, name='root_2', parent=master)
child2 = tests._util.createTransform(context, name='child_2', parent=root2)
grandchild = tests._util.createTransform(context, name='grandchild', parent=child2)
tests._util.createTransform(context, name='node', parent=grandchild)
cube, cubeShape = tests._util.createPolyCube(context, name='cube', parent=master)
intermediary1 = tests._util.createDagNode(context, 'mesh', name='intermediary1', parent=cube)
context.dg.newPlugValueBool(intermediary1.findPlug('intermediateObject'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), intermediary1.findPlug('inMesh'))
intermediary2 = tests._util.createDagNode(context, 'mesh', name='intermediary2', parent=cube)
context.dg.newPlugValueBool(intermediary2.findPlug('intermediateObject'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), intermediary2.findPlug('inMesh'))
template = tests._util.createDagNode(context, 'mesh', name='template', parent=cube)
context.dg.newPlugValueBool(template.findPlug('template'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), template.findPlug('inMesh'))
sphere, sphereShape = tests._util.createNurbsSphere(context, name='sphere', parent=master)
circle, circleShape = tests._util.createNurbsCircle(context, name='circle', parent=master)
OpenMaya.MNamespace.addNamespace('awesome')
light = tests._util.createTransform(context, name='awesome:light', parent=master)
tests._util.createDagNode(context, 'pointLight', name='awesome:lightShape', parent=light)
context.dag.doIt()
context.dg.doIt()
cmds.projectCurve(circleShape.fullPathName(), sphereShape.fullPathName())
def test__hash__(self):
node1 = OpenMaya.MFnDependencyNode.bnGet(pattern='awesome:light')
node2 = OpenMaya.MFnDependencyNode.bnGet(pattern='awesome:light')
self.assertEqual(hash(node1), hash(node2))
def test__str__(self):
node = OpenMaya.MFnDependencyNode.bnGet(pattern='awesome:light')
self.assertEqual(str(node), 'awesome:light')
def testBnFind(self):
nodes = list(OpenMaya.MFnDependencyNode.bnFind())
self.assertEqual(len(nodes), _nodeCount())
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='child_*'))
self.assertEqual(len(nodes), 2)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['child_1', 'child_2'])
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='node'))
self.assertEqual(len(nodes), 3)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['node', 'node', 'node'])
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='*node'))
self.assertEqual(len(nodes), 4)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['awesome_node', 'node', 'node', 'node'])
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='node*'))
self.assertEqual(len(nodes), 4)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['node', 'node', 'node', 'node_awesome'])
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='n*de'))
self.assertEqual(len(nodes), 4)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['n0de', 'node', 'node', 'node'])
nodes = list(OpenMaya.MFnDependencyNode.bnFind(pattern='default*Set'))
self.assertEqual(len(nodes), 2)
self.assertTrue(all(type(node) is OpenMaya.MFnDependencyNode for node in nodes))
self.assertEqual(sorted(node.name() for node in nodes), ['defaultLightSet', 'defaultObjectSet'])
def testBnGet(self):
self.assertIsNone(OpenMaya.MFnDependencyNode.bnGet(pattern='node'))
self.assertIsNone(OpenMaya.MFnDependencyNode.bnGet(pattern='child_*'))
node = OpenMaya.MFnDependencyNode.bnGet(pattern='awesome:light')
self.assertIsInstance(node, OpenMaya.MFnDependencyNode)
self.assertEqual(node.name(), 'awesome:light')
node = OpenMaya.MFnDependencyNode.bnGet(pattern='n0de')
self.assertIsInstance(node, OpenMaya.MFnDependencyNode)
self.assertEqual(node.name(), 'n0de')
node = OpenMaya.MFnDependencyNode.bnGet(pattern='time1')
self.assertIsInstance(node, OpenMaya.MFnDependencyNode)
self.assertEqual(node.name(), 'time1')
if __name__ == '__main__':
from tests.run import run
run('__main__')
| 44.223776 | 105 | 0.699873 |
3bf34cd136dc6acade0caf3e5dc2f8eb0381ef06 | 6,635 | py | Python | native_client_sdk/src/build_tools/build_app.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | native_client_sdk/src/build_tools/build_app.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | native_client_sdk/src/build_tools/build_app.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
import buildbot_common
import build_projects
import build_version
import easy_template
import parse_dsc
from build_paths import SDK_SRC_DIR, OUT_DIR, SDK_RESOURCE_DIR
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
import oshelpers
def RemoveBuildCruft(outdir):
for root, _, files in os.walk(outdir):
for f in files:
path = os.path.join(root, f)
ext = os.path.splitext(path)[1]
# Remove unwanted files from the package. Also remove manifest.json files
# (which we usually want). These ones are the manifests of the invidual
# examples, though, which CWS complains about. The master manifest.json
# is generated after we call RemoveBuildCruft.
if (ext in ('.d', '.o') or
f == 'dir.stamp' or
f == 'manifest.json' or
re.search(r'_unstripped_.*?\.nexe', f)):
buildbot_common.RemoveFile(path)
def StripNexes(outdir, platform, pepperdir):
for root, _, files in os.walk(outdir):
for f in files:
path = os.path.join(root, f)
m = re.search(r'lib(32|64).*\.so', path)
arch = None
if m:
# System .so file. Must be x86, because ARM doesn't support glibc yet.
arch = 'x86_' + m.group(1)
else:
basename, ext = os.path.splitext(f)
if ext in ('.nexe', '.so'):
# We can get the arch from the filename...
valid_arches = ('x86_64', 'x86_32', 'arm')
for a in valid_arches:
if basename.endswith(a):
arch = a
break
if not arch:
continue
strip = GetStrip(pepperdir, platform, arch, 'newlib')
buildbot_common.Run([strip, path])
def GetStrip(pepperdir, platform, arch, toolchain):
base_arch = {'x86_32': 'x86', 'x86_64': 'x86', 'arm': 'arm'}[arch]
bin_dir = os.path.join(pepperdir, 'toolchain',
'%s_%s_%s' % (platform, base_arch, toolchain), 'bin')
strip_prefix = {'x86_32': 'i686', 'x86_64': 'x86_64', 'arm': 'arm'}[arch]
strip_name = '%s-nacl-strip' % strip_prefix
return os.path.join(bin_dir, strip_name)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--channel',
help='Channel to display in the name of the package.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_app.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options = parser.parse_args(args)
if options.channel:
if options.channel not in ('Dev', 'Beta'):
parser.error('Unknown channel: %s' % options.channel)
toolchains = ['newlib', 'glibc']
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
app_dir = os.path.join(OUT_DIR, 'naclsdk_app')
app_examples_dir = os.path.join(app_dir, 'examples')
sdk_resources_dir = SDK_RESOURCE_DIR
platform = getos.GetPlatform()
buildbot_common.RemoveDir(app_dir)
buildbot_common.MakeDir(app_dir)
# Add some dummy directories so build_projects doesn't complain...
buildbot_common.MakeDir(os.path.join(app_dir, 'tools'))
buildbot_common.MakeDir(os.path.join(app_dir, 'toolchain'))
config = 'Release'
filters = {}
filters['DISABLE_PACKAGE'] = False
filters['EXPERIMENTAL'] = False
filters['TOOLS'] = toolchains
filters['DEST'] = ['examples/api', 'examples/getting_started',
'examples/demo', 'examples/tutorial']
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateHelpers(app_dir, clobber=True)
build_projects.UpdateProjects(app_dir, tree, clobber=False,
toolchains=toolchains, configs=[config],
first_toolchain=True)
# Collect permissions from each example, and aggregate them.
def MergeLists(list1, list2):
return list1 + [x for x in list2 if x not in list1]
all_permissions = []
all_socket_permissions = []
all_filesystem_permissions = []
for _, project in parse_dsc.GenerateProjects(tree):
permissions = project.get('PERMISSIONS', [])
all_permissions = MergeLists(all_permissions, permissions)
socket_permissions = project.get('SOCKET_PERMISSIONS', [])
all_socket_permissions = MergeLists(all_socket_permissions,
socket_permissions)
filesystem_permissions = project.get('FILESYSTEM_PERMISSIONS', [])
all_filesystem_permissions = MergeLists(all_filesystem_permissions,
filesystem_permissions)
if all_socket_permissions:
all_permissions.append({'socket': all_socket_permissions})
if all_filesystem_permissions:
all_permissions.append({'fileSystem': all_filesystem_permissions})
pretty_permissions = json.dumps(all_permissions, sort_keys=True, indent=4)
for filename in ['background.js', 'icon128.png']:
buildbot_common.CopyFile(os.path.join(sdk_resources_dir, filename),
os.path.join(app_examples_dir, filename))
os.environ['NACL_SDK_ROOT'] = pepperdir
build_projects.BuildProjects(app_dir, tree, deps=False, clean=False,
config=config)
RemoveBuildCruft(app_dir)
StripNexes(app_dir, platform, pepperdir)
# Add manifest.json after RemoveBuildCruft... that function removes the
# manifest.json files for the individual examples.
name = 'Native Client SDK'
if options.channel:
name += ' (%s)' % options.channel
template_dict = {
'name': name,
'channel': options.channel,
'description':
'Native Client SDK examples, showing API use and key concepts.',
'key': False, # manifests with "key" are rejected when uploading to CWS.
'permissions': pretty_permissions,
'version': build_version.ChromeVersionNoTrunk()
}
easy_template.RunTemplateFile(
os.path.join(sdk_resources_dir, 'manifest.json.template'),
os.path.join(app_examples_dir, 'manifest.json'),
template_dict)
app_zip = os.path.join(app_dir, 'examples.zip')
os.chdir(app_examples_dir)
oshelpers.Zip([app_zip, '-r', '*'])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 34.921053 | 79 | 0.674755 |
5f9d1b037a1a3e14fa08e5fbc1e58e076d02efe8 | 1,246 | py | Python | ngallery/urls.py | manti-by/ngallery | e87340707d15f3f35ecf7cf761232dd5f40e1389 | [
"BSD-3-Clause"
] | null | null | null | ngallery/urls.py | manti-by/ngallery | e87340707d15f3f35ecf7cf761232dd5f40e1389 | [
"BSD-3-Clause"
] | null | null | null | ngallery/urls.py | manti-by/ngallery | e87340707d15f3f35ecf7cf761232dd5f40e1389 | [
"BSD-3-Clause"
] | null | null | null | """core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from ngallery.home.views import index_view
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("ngallery.api.urls")),
path("", index_view, name="index"),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 34.611111 | 80 | 0.728732 |
40dfbc651da1f1b6d6fb554498701691ec786031 | 918 | py | Python | src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/matrixkeypad_simpletest.py | mbaaba/solar_panel | 42059d8c61320494ad1298065dbc50cd9b3bd51e | [
"MIT"
] | 1 | 2020-04-13T16:10:53.000Z | 2020-04-13T16:10:53.000Z | infra/libs-400rc2-20190512/examples/matrixkeypad_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | infra/libs-400rc2-20190512/examples/matrixkeypad_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | import time
import digitalio
import board
import adafruit_matrixkeypad
# Membrane 3x4 matrix keypad - https://www.adafruit.com/product/419
cols = [digitalio.DigitalInOut(x) for x in (board.D9, board.D6, board.D5)]
rows = [digitalio.DigitalInOut(x) for x in (board.D13, board.D12, board.D11, board.D10)]
# 3x4 matrix keypad - Rows and columns are mixed up for https://www.adafruit.com/product/3845
# Use the same wiring as in the guide with the following setup lines:
# cols = [digitalio.DigitalInOut(x) for x in (board.D11, board.D13, board.D9)]
# rows = [digitalio.DigitalInOut(x) for x in (board.D12, board.D5, board.D6, board.D10)]
keys = ((1, 2, 3),
(4, 5, 6),
(7, 8, 9),
('*', 0, '#'))
keypad = adafruit_matrixkeypad.Matrix_Keypad(rows, cols, keys)
while True:
keys = keypad.pressed_keys
if keys:
print("Pressed: ", keys)
time.sleep(0.1)
| 34 | 94 | 0.659041 |
cb5c07a42b70347d581414e2b3223efc7c51b8ac | 4,755 | py | Python | visualizations/network_communities/community_detection.py | junhaodong/misc | de367573ebaae2d75cf319b62ed1d41d99508bf8 | [
"MIT"
] | 2 | 2016-03-21T20:28:07.000Z | 2017-09-24T13:53:23.000Z | visualizations/network_communities/community_detection.py | junhaodong/misc | de367573ebaae2d75cf319b62ed1d41d99508bf8 | [
"MIT"
] | null | null | null | visualizations/network_communities/community_detection.py | junhaodong/misc | de367573ebaae2d75cf319b62ed1d41d99508bf8 | [
"MIT"
] | null | null | null | import networkx as nx
from networkx.algorithms import community
import matplotlib.pyplot as plt
import random
import itertools
def girvan_newman(G):
"""Returns an iterator over tuples of sets of nodes, and the edges that have been removed
Each set is a community; each tuple is all the communities at one step in the algorithm
"""
if G.number_of_edges() == 0:
return
g = G.copy().to_undirected()
g.remove_edges_from(nx.selfloop_edges(g))
edges_removed = []
while g.number_of_edges() > 0:
yield _without_max_betweenness_edges(g, edges_removed)
def _without_max_betweenness_edges(G, edges_removed):
num_components = nx.number_connected_components(G)
num_new_components = num_components
while num_new_components <= num_components:
betweenness = nx.edge_betweenness_centrality(G)
edges = max(betweenness, key=betweenness.get)
G.remove_edge(*edges)
edges_removed.append(edges)
new_components = tuple(nx.connected_components(G))
num_new_components = len(new_components)
return new_components, list(edges_removed)
def build_test_graph(z_in, z_out, num_groups=4, group_size=32):
"""Return a test graph of random equal sized community groups
Parameters:
-----------
z_in -- average number of edges from a node to nodes in the same community
z_out -- average number of edges from a node to nodes in other communities
"""
# Create groups and edges within groups
groups = []
p_in = z_in / (group_size - 1)
for i in range(num_groups):
group = nx.erdos_renyi_graph(group_size, p_in)
nx.set_node_attributes(group, dict([(node, i) for node in group.nodes()]), 'group_id')
groups.append(group)
G = nx.disjoint_union_all(groups)
# Create edges between groups
p_out = z_out / ((num_groups - 1) * group_size)
edges = itertools.combinations(G.nodes(), 2)
for i, j in edges:
if G.node[i]['group_id'] != G.node[j]['group_id'] and random.random() < p_out:
G.add_edge(i, j)
return G
def calc_test_result(G, partitions, num_groups=4):
"""Return the ratio of correctly classified nodes over the total number of nodes.
Assumes the nodes in G are labeled based on their community.
"""
best_num_correct_nodes = 0
for perm in itertools.permutations(partitions, num_groups):
num_correct_nodes = 0
group_id = 0
for group in perm:
for node in group:
if G.node[node]['group_id'] == group_id:
num_correct_nodes += 1
group_id += 1
if num_correct_nodes > best_num_correct_nodes:
best_num_correct_nodes = num_correct_nodes
return best_num_correct_nodes / len(G.nodes())
def plot_test_results(initial_z_out, final_z_out, max_z_out=16, step_size=0.5):
z_out = initial_z_out
result = {}
while z_out < final_z_out:
G = build_test_graph(max_z_out - z_out, z_out)
communityIter = girvan_newman(G)
partition, removed_edges = max(communityIter, key=lambda c: community.modularity(G, c[0]))
result[z_out] = 100 * calc_test_result(G, partition)
z_out += step_size
plt.figure()
plt.title('Accuracy of Community Detection', fontsize=14)
plt.xlabel('Avg Out-Degree', fontsize=12)
plt.ylabel('% of Correct Nodes', fontsize=12)
plt.autoscale()
plt.plot(list(result.keys()), list(result.values()), 'bo--', linewidth=1)
plt.show()
def plot_communities(gml_file=None):
if gml_file:
G = nx.read_graphml(gml_file)
fileOut = gml_file.split('.')[0] + '.png'
else:
G = build_test_graph(15, 1)
fileOut = 'output.png'
communityIter = girvan_newman(G)
partition, removed_edges = max(communityIter, key=lambda c: community.modularity(G, c[0]))
pos=nx.spring_layout(G, weight=None, iterations=100)
plt.figure()
colors = ['violet','orange','cyan','yellow','pink','green','red','gray','blue', 'black', 'indigo']
for i in range(len(partition)):
nx.draw_networkx_nodes(G, pos, nodelist=list(partition[i]), node_color=colors[i], alpha=0.75)
nx.draw_networkx_edges(G, pos, edgelist=list(set(G.edges() - set(removed_edges))))
nx.draw_networkx_edges(G, pos, edgelist=removed_edges, edge_color='gray', style='dashed')
# nx.draw_networkx_labels(G, pos, font_size=8)
plt.title('Communities Found using Girvan-Newman', fontsize=14)
plt.axis('off')
plt.savefig(fileOut)
# plt.show()
print('Output saved to: ' + fileOut)
if __name__ == '__main__':
plot_communities()
plot_communities('protein_structure.graphml')
# plot_test_results(4, 15, step_size=0.5)
| 34.963235 | 102 | 0.671083 |
6b8925da169bd3fa8927e05369633f9ec7098e6d | 16,483 | py | Python | deepmusic/musicdata.py | RichardMcSorley/tensorflow-music | f6d80554c3fd8b1c54d8051aba3776e271886c1b | [
"Apache-2.0"
] | null | null | null | deepmusic/musicdata.py | RichardMcSorley/tensorflow-music | f6d80554c3fd8b1c54d8051aba3776e271886c1b | [
"Apache-2.0"
] | null | null | null | deepmusic/musicdata.py | RichardMcSorley/tensorflow-music | f6d80554c3fd8b1c54d8051aba3776e271886c1b | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Loads the midi song, build the dataset
"""
from tqdm import tqdm # Progress bar when creating dataset
import pickle # Saving the data
import os # Checking file existence
import numpy as np # Batch data
import json # Load initiators (inputs for generating new songs)
from deepmusic.moduleloader import ModuleLoader
from deepmusic.midiconnector import MidiConnector
from deepmusic.midiconnector import MidiInvalidException
import deepmusic.songstruct as music
class MusicData:
"""Dataset class
"""
def __init__(self, args):
"""Load all conversations
Args:
args: parameters of the model
"""
# Filename and directories constants
self.DATA_VERSION = '0.2' # Assert compatibility between versions
self.DATA_DIR_MIDI = 'data/midi' # Originals midi files
self.DATA_DIR_PLAY = 'data/play' # Target folder to show the reconstructed files
self.DATA_DIR_SAMPLES = 'data/samples' # Training/testing samples after pre-processing
self.DATA_SAMPLES_RAW = 'raw' # Unpreprocessed songs container tag
self.DATA_SAMPLES_EXT = '.pkl'
self.TEST_INIT_FILE = 'data/test/initiator.json' # Initial input for the generated songs
self.FILE_EXT = '.mid' # Could eventually add support for other format later ?
# Model parameters
self.args = args
# Dataset
self.songs = []
self.songs_train = None
self.songs_test = None
# TODO: Dynamic loading of the the associated dataset flag (ex: data/samples/pianoroll/...)
self.batch_builder = ModuleLoader.batch_builders.build_module(args)
if not self.args.test: # No need to load the dataset when testing
self._restore_dataset()
if self.args.play_dataset:
print('Play some songs from the formatted data')
# Generate songs
for i in range(min(10, len(self.songs))):
raw_song = self.batch_builder.reconstruct_song(self.songs[i])
MidiConnector.write_song(raw_song, os.path.join(self.DATA_DIR_PLAY, str(i)))
# TODO: Display some images corresponding to the loaded songs
raise NotImplementedError('Can\'t play a song for now')
self._split_dataset() # Warning: the list order will determine the train/test sets (so important that it don't change from run to run)
# Plot some stats:
print('Loaded: {} songs ({} train/{} test)'.format(
len(self.songs_train) + len(self.songs_test),
len(self.songs_train),
len(self.songs_test))
) # TODO: Print average, max, min duration
def _restore_dataset(self):
"""Load/create the conversations data
Done in two steps:
* Extract the midi files as a raw song format
* Transform this raw song as neural networks compatible input
"""
# Construct the dataset names
samples_path_generic = os.path.join(
self.args.root_dir,
self.DATA_DIR_SAMPLES,
self.args.dataset_tag + '-{}' + self.DATA_SAMPLES_EXT
)
samples_path_raw = samples_path_generic.format(self.DATA_SAMPLES_RAW)
samples_path_preprocessed = samples_path_generic.format(ModuleLoader.batch_builders.get_chosen_name())
# TODO: the _restore_samples from the raw songs and precomputed database should have different versions number
# Restoring precomputed database
if os.path.exists(samples_path_preprocessed):
print('Restoring dataset from {}...'.format(samples_path_preprocessed))
self._restore_samples(samples_path_preprocessed)
# First time we load the database: creating all files
else:
print('Training samples not found. Creating dataset from the songs...')
# Restoring raw songs
if os.path.exists(samples_path_raw):
print('Restoring songs from {}...'.format(samples_path_raw))
self._restore_samples(samples_path_raw)
# First time we load the database: creating all files
else:
print('Raw songs not found. Extracting from midi files...')
self._create_raw_songs()
print('Saving raw songs...')
self._save_samples(samples_path_raw)
# At this point, self.songs contain the list of the raw songs. Each
# song is then preprocessed by the batch builder
# Generating the data from the raw songs
print('Pre-processing songs...')
for i, song in tqdm(enumerate(self.songs), total=len(self.songs)):
self.songs[i] = self.batch_builder.process_song(song)
print('Saving dataset...')
np.random.shuffle(self.songs) # Important to do that before saving so the train/test set will be fixed each time we reload the dataset
self._save_samples(samples_path_preprocessed)
def _restore_samples(self, samples_path):
""" Load samples from file
Args:
samples_path (str): The path where to load the model (all dirs should exist)
Return:
List[Song]: The training data
"""
with open(samples_path, 'rb') as handle:
data = pickle.load(handle) # Warning: If adding something here, also modifying saveDataset
# Check the version
current_version = data['version']
if current_version != self.DATA_VERSION:
raise UserWarning('Present configuration version {0} does not match {1}.'.format(current_version, self.DATA_VERSION))
# Restore parameters
self.songs = data['songs']
def _save_samples(self, samples_path):
""" Save samples to file
Args:
samples_path (str): The path where to save the model (all dirs should exist)
"""
with open(samples_path, 'wb') as handle:
data = { # Warning: If adding something here, also modifying loadDataset
'version': self.DATA_VERSION,
'songs': self.songs
}
pickle.dump(data, handle, -1) # Using the highest protocol available
def _create_raw_songs(self):
""" Create the database from the midi files
"""
midi_dir = os.path.join(self.args.root_dir, self.DATA_DIR_MIDI, self.args.dataset_tag)
midi_files = [os.path.join(midi_dir, f) for f in os.listdir(midi_dir) if f.endswith(self.FILE_EXT)]
for filename in tqdm(midi_files):
try:
new_song = MidiConnector.load_file(filename)
except MidiInvalidException as e:
tqdm.write('File ignored ({}): {}'.format(filename, e))
os.remove(filename)
else:
self.songs.append(new_song)
tqdm.write('Song loaded {}: {} tracks, {} notes, {} ticks/beat'.format(
filename,
len(new_song.tracks),
sum([len(t.notes) for t in new_song.tracks]),
new_song.ticks_per_beat
))
if not self.songs:
raise ValueError('Empty dataset. Check that the folder exist and contains supported midi files.')
def _convert_song2array(self, song):
""" Convert a given song to a numpy multi-dimensional array (piano roll)
The song is temporally normalized, meaning that all ticks and duration will be converted to a specific
ticks_per_beat independent unit.
For now, the changes of tempo are ignored. Only 4/4 is supported.
Warning: The duration is ignored: All note have the same duration (1 unit)
Args:
song (Song): The song to convert
Return:
Array: the numpy array: a binary matrix of shape [NB_NOTES, song_length]
"""
# Convert the absolute ticks in standardized unit
song_length = len(song)
scale = self._get_scale(song)
# TODO: Not sure why this plot a decimal value (x.66). Investigate...
# print(song_length/scale)
# Use sparse array instead ?
piano_roll = np.zeros([music.NB_NOTES, int(np.ceil(song_length/scale))], dtype=int)
# Adding all notes
for track in song.tracks:
for note in track.notes:
piano_roll[note.get_relative_note()][note.tick//scale] = 1
return piano_roll
def _convert_array2song(self, array):
""" Create a new song from a numpy array
A note will be created for each non empty case of the array. The song will contain a single track, and use the
default beats_per_tick as midi resolution
For now, the changes of tempo are ignored. Only 4/4 is supported.
Warning: All note have the same duration, the default value defined in music.Note
Args:
np.array: the numpy array (Warning: could be a array of int or float containing the prediction before the sigmoid)
Return:
song (Song): The song to convert
"""
new_song = music.Song()
main_track = music.Track()
scale = self._get_scale(new_song)
for index, x in np.ndenumerate(array): # Add some notes
if x > 1e-12: # Note added (TODO: What should be the condition, =1 ? sigmoid>0.5 ?)
new_note = music.Note()
new_note.set_relative_note(index[0])
new_note.tick = index[1] * scale # Absolute time in tick from the beginning
main_track.notes.append(new_note)
new_song.tracks.append(main_track)
return new_song
def _split_dataset(self):
""" Create the test/train set from the loaded songs
The dataset has been shuffled when calling this function (Warning: the shuffling
is done and fixed before saving the dataset the first time so it is important to
NOT call shuffle a second time)
"""
split_nb = int(self.args.ratio_dataset * len(self.songs))
self.songs_train = self.songs[:split_nb]
self.songs_test = self.songs[split_nb:]
self.songs = None # Not needed anymore (free some memory)
def get_batches(self):
""" Prepare the batches for the current epoch
WARNING: The songs are not shuffled in this functions. We leave the choice
to the batch_builder to manage the shuffling
Return:
list[Batch], list[Batch]: The batches for the training and testing set (can be generators)
"""
return (
self.batch_builder.get_list(self.songs_train, name='train'),
self.batch_builder.get_list(self.songs_test, name='test'),
)
# def get_batches_test(self, ): # TODO: Should only return a single batch (loading done in main class)
# """ Return the batch which initiate the RNN when generating
# The initial batches are loaded from a json file containing the first notes of the song. The note values
# are the standard midi ones. Here is an examples of an initiator file:
# Args:
# TODO
# Return:
# Batch: The generated batch
# """
# assert self.args.batch_size == 1
# batch = None # TODO
# return batch
def get_batches_test_old(self): # TODO: This is the old version. Ideally should use the version above
""" Return the batches which initiate the RNN when generating
The initial batches are loaded from a json file containing the first notes of the song. The note values
are the standard midi ones. Here is an examples of an initiator file:
```
{"initiator":[
{"name":"Simple_C4",
"seq":[
{"notes":[60]}
]},
{"name":"some_chords",
"seq":[
{"notes":[60,64]}
{"notes":[66,68,71]}
{"notes":[60,64]}
]}
]}
```
Return:
List[Batch], List[str]: The generated batches with the associated names
"""
assert self.args.batch_size == 1
batches = []
names = []
with open(self.TEST_INIT_FILE) as init_file:
initiators = json.load(init_file)
for initiator in initiators['initiator']:
raw_song = music.Song()
main_track = music.Track()
current_tick = 0
for seq in initiator['seq']: # We add a few notes
for note_pitch in seq['notes']:
new_note = music.Note()
new_note.note = note_pitch
new_note.tick = current_tick
main_track.notes.append(new_note)
current_tick += 1
raw_song.tracks.append(main_track)
raw_song.normalize(inverse=True)
batch = self.batch_builder.process_batch(raw_song)
names.append(initiator['name'])
batches.append(batch)
return batches, names
@staticmethod
def _convert_to_piano_rolls(outputs):
""" Create songs from the decoder outputs.
Reshape the list of outputs to list of piano rolls
Args:
outputs (List[np.array]): The list of the predictions of the decoder
Return:
List[np.array]: the list of the songs (one song by batch) as piano roll
"""
# Extract the batches and recreate the array for each batch
piano_rolls = []
for i in range(outputs[0].shape[0]): # Iterate over the batches
piano_roll = None
for j in range(len(outputs)): # Iterate over the sample length
# outputs[j][i, :] has shape [NB_NOTES, 1]
if piano_roll is None:
piano_roll = [outputs[j][i, :]]
else:
piano_roll = np.append(piano_roll, [outputs[j][i, :]], axis=0)
piano_rolls.append(piano_roll.T)
return piano_rolls
def visit_recorder(self, outputs, base_dir, base_name, recorders, chosen_labels=None):
""" Save the predicted output songs using the given recorder
Args:
outputs (List[np.array]): The list of the predictions of the decoder
base_dir (str): Path were to save the outputs
base_name (str): filename of the output (without the extension)
recorders (List[Obj]): Interfaces called to convert the song into a file (ex: midi or png). The recorders
need to implement the method write_song (the method has to add the file extension) and the
method get_input_type.
chosen_labels (list[np.Array[batch_size, int]]): the chosen class at each timestep (useful to reconstruct the generated song)
"""
if not os.path.exists(base_dir):
os.makedirs(base_dir)
for batch_id in range(outputs[0].shape[0]): # Loop over batch_size
song = self.batch_builder.reconstruct_batch(outputs, batch_id, chosen_labels)
for recorder in recorders:
if recorder.get_input_type() == 'song':
input = song
elif recorder.get_input_type() == 'array':
#input = self._convert_song2array(song)
continue # TODO: For now, pianoroll desactivated
else:
raise ValueError('Unknown recorder input type.'.format(recorder.get_input_type()))
base_path = os.path.join(base_dir, base_name + '-' + str(batch_id))
recorder.write_song(input, base_path)
| 42.481959 | 147 | 0.612146 |
3c1954ce6b9b5604a0ea353118851798eec94df2 | 2,906 | py | Python | scripts/strava_auth_handler.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 11 | 2020-12-04T20:43:23.000Z | 2022-03-16T19:19:12.000Z | scripts/strava_auth_handler.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 45 | 2020-06-23T02:50:31.000Z | 2022-02-15T16:56:00.000Z | scripts/strava_auth_handler.py | pnposch/runpandas | 25388c18b52dfcc168e81922b8ba20ca93adad20 | [
"MIT"
] | 4 | 2021-11-11T15:23:04.000Z | 2022-02-02T13:02:12.000Z | """
Simple Script for getting the Access token
Set up a client using stravalib
If prompted enter client ID and client secret from https://www.strava.com/settings/api
Upon authorisation, retrieve the 'code=' section of the url and input when prompted
"""
import json
import os
import webbrowser
import argparse
import time
from stravalib.client import Client
def parse_args(args):
access_token_file_name = args.output_file
client = Client()
access_token_does_exist = os.path.isfile(access_token_file_name)
access_token_doesnt_exist = not (access_token_does_exist)
access_token_expired = True
if access_token_does_exist:
with open(access_token_file_name, "r") as f:
token_response = json.load(f)
token_response = json.loads(token_response)
token_expiry_time = token_response["expires_at"]
current_time = time.time()
access_token_expired = current_time > token_expiry_time
if access_token_doesnt_exist or access_token_expired:
client_id = args.client_id
client_secret = args.client_secret
scope = [
"read",
"read_all",
"profile:read_all",
"activity:read",
"activity:read_all",
]
authorize_url = client.authorization_url(
client_id=client_id,
redirect_uri="http://localhost:5000/authorized",
scope=scope,
)
# Open the authorization url
print("Opening: " + authorize_url)
webbrowser.open(authorize_url)
# Get code
entered_code = str(input("Please enter code: "))
# Exchange code for token:
token_response = client.exchange_code_for_token(
client_id=client_id, client_secret=client_secret, code=entered_code
)
# Save it to file so we can use it until it expires.
access_token_string = json.dumps(token_response)
with open(access_token_file_name, "w+") as f:
json.dump(access_token_string, f)
# Now we have a token_response dict either from file or from the
# Strava API
access_token = token_response["access_token"]
refresh_token = token_response["refresh_token"]
print("access_token:", access_token)
print("refresh_token", refresh_token)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Get your access token to fetch data using Strava API"
)
parser.add_argument(
"--client_id", required=True, help="The client id from your Strava App"
)
parser.add_argument(
"--client_secret", required=True, help="The client secret from your Strava App"
)
parser.add_argument(
"--output_file",
default="access_token.json",
help="JSON file which will be stored the access token and credentials",
)
parse_args(parser.parse_args())
| 33.022727 | 87 | 0.668961 |
4e92fd9fecbfa8cb0fb4475087c970f982ba00dd | 1,331 | py | Python | Unet_Mobile/predict.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | 1 | 2020-12-16T08:29:13.000Z | 2020-12-16T08:29:13.000Z | Unet_Mobile/predict.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | null | null | null | Unet_Mobile/predict.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | null | null | null | from nets.unet import mobilenet_unet
from PIL import Image
import numpy as np
import random
import copy
import os
random.seed(0)
class_colors = [[0,0,0],[0,255,0]]
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
model = mobilenet_unet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
model.load_weights("logs/ep015-loss0.070-val_loss0.076.h5")
imgs = os.listdir("./img")
for jpg in imgs:
img = Image.open("./img/"+jpg)
old_img = copy.deepcopy(img)
orininal_h = np.array(img).shape[0]
orininal_w = np.array(img).shape[1]
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
img = img.reshape(-1,HEIGHT,WIDTH,3)
pr = model.predict(img)[0]
pr = pr.reshape((int(HEIGHT/2), int(WIDTH/2),NCLASSES)).argmax(axis=-1)
seg_img = np.zeros((int(HEIGHT/2), int(WIDTH/2),3))
colors = class_colors
for c in range(NCLASSES):
seg_img[:,:,0] += ( (pr[:,: ] == c )*( colors[c][0] )).astype('uint8')
seg_img[:,:,1] += ((pr[:,: ] == c )*( colors[c][1] )).astype('uint8')
seg_img[:,:,2] += ((pr[:,: ] == c )*( colors[c][2] )).astype('uint8')
seg_img = Image.fromarray(np.uint8(seg_img)).resize((orininal_w,orininal_h))
image = Image.blend(old_img,seg_img,0.3)
image.save("./img_out/"+jpg)
| 27.163265 | 82 | 0.601052 |
633bf85cde584060fc4136c95bc20bb01efc0e11 | 169,888 | py | Python | dask/array/core.py | gmiretti/dask | a8bcd75f165196ba7058b52ac69138aa10042234 | [
"BSD-3-Clause"
] | null | null | null | dask/array/core.py | gmiretti/dask | a8bcd75f165196ba7058b52ac69138aa10042234 | [
"BSD-3-Clause"
] | null | null | null | dask/array/core.py | gmiretti/dask | a8bcd75f165196ba7058b52ac69138aa10042234 | [
"BSD-3-Clause"
] | null | null | null | import math
import operator
import os
import pickle
import re
import sys
import traceback
import uuid
import warnings
from bisect import bisect
from collections.abc import Iterable, Iterator, Mapping
from functools import partial, reduce, wraps
from itertools import product, zip_longest
from numbers import Integral, Number
from operator import add, getitem, mul
from threading import Lock
import numpy as np
from fsspec import get_mapper
from tlz import accumulate, concat, first, frequencies, groupby, partition
from tlz.curried import pluck
from .. import compute, config, core, threaded
from ..base import (
DaskMethodsMixin,
compute_as_if_collection,
dont_optimize,
is_dask_collection,
persist,
tokenize,
)
from ..blockwise import broadcast_dimensions
from ..context import globalmethod
from ..core import quote
from ..delayed import Delayed, delayed
from ..highlevelgraph import HighLevelGraph
from ..sizeof import sizeof
from ..utils import (
IndexCallable,
M,
SerializableLock,
cached_property,
concrete,
derived_from,
factors,
format_bytes,
funcname,
has_keyword,
ignoring,
is_arraylike,
is_dataframe_like,
is_index_like,
is_integer,
is_series_like,
ndeepmap,
ndimlist,
parse_bytes,
typename,
)
from . import chunk
from .chunk_types import is_valid_array_chunk, is_valid_chunk_type
# Keep einsum_lookup and tensordot_lookup here for backwards compatibility
from .dispatch import concatenate_lookup, einsum_lookup, tensordot_lookup # noqa: F401
from .numpy_compat import _Recurser
from .slicing import cached_cumsum, replace_ellipsis, setitem_array, slice_array
config.update_defaults({"array": {"chunk-size": "128MiB", "rechunk-threshold": 4}})
unknown_chunk_message = (
"\n\n"
"A possible solution: "
"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\n"
"Summary: to compute chunks sizes, use\n\n"
" x.compute_chunk_sizes() # for Dask Array `x`\n"
" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`"
)
class PerformanceWarning(Warning):
"""A warning given when bad chunking may cause poor performance"""
def getter(a, b, asarray=True, lock=None):
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(
None if x is None else slice(None, None)
for x in b
if not isinstance(x, Integral)
)
return getter(a, b2, asarray=asarray, lock=lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
# Below we special-case `np.matrix` to force a conversion to
# `np.ndarray` and preserve original Dask behavior for `getter`,
# as for all purposes `np.matrix` is array-like and thus
# `is_arraylike` evaluates to `True` in that case.
if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getter_nofancy(a, b, asarray=True, lock=None):
"""A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
def getter_inline(a, b, asarray=True, lock=None):
"""A getter function that optimizations feel comfortable inlining
Slicing operations with this function may be inlined into a graph, such as
in the following rewrite
**Before**
>>> a = x[:10] # doctest: +SKIP
>>> b = a + 1 # doctest: +SKIP
>>> c = a * 2 # doctest: +SKIP
**After**
>>> b = x[:10] + 1 # doctest: +SKIP
>>> c = x[:10] * 2 # doctest: +SKIP
This inlining can be relevant to operations when running off of disk.
"""
return getter(a, b, asarray=asarray, lock=lock)
from .optimization import fuse_slice, optimize
# __array_function__ dict for mapping aliases and mismatching names
_HANDLED_FUNCTIONS = {}
def implements(*numpy_functions):
"""Register an __array_function__ implementation for dask.array.Array
Register that a function implements the API of a NumPy function (or several
NumPy functions in case of aliases) which is handled with
``__array_function__``.
Parameters
----------
\\*numpy_functions : callables
One or more NumPy functions that are handled by ``__array_function__``
and will be mapped by `implements` to a `dask.array` function.
"""
def decorator(dask_func):
for numpy_function in numpy_functions:
_HANDLED_FUNCTIONS[numpy_function] = dask_func
return dask_func
return decorator
def _should_delegate(other) -> bool:
"""Check whether Dask should delegate to the other.
This implementation follows NEP-13:
https://numpy.org/neps/nep-0013-ufunc-overrides.html#behavior-in-combination-with-python-s-binary-operations
"""
if hasattr(other, "__array_ufunc__") and other.__array_ufunc__ is None:
return True
elif (
hasattr(other, "__array_ufunc__")
and not is_valid_array_chunk(other)
and type(other).__array_ufunc__ is not Array.__array_ufunc__
):
return True
return False
def check_if_handled_given_other(f):
"""Check if method is handled by Dask given type of other
Ensures proper deferral to upcast types in dunder operations without
assuming unknown types are automatically downcast types.
"""
@wraps(f)
def wrapper(self, other):
if _should_delegate(other):
return NotImplemented
else:
return f(self, other)
return wrapper
def slices_from_chunks(chunks):
"""Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices = [
[slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
return list(product(*slices))
def getem(
arr,
chunks,
getitem=getter,
shape=None,
out_name=None,
lock=False,
asarray=True,
dtype=None,
):
"""Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}
"""
out_name = out_name or arr
chunks = normalize_chunks(chunks, shape, dtype=dtype)
keys = product([out_name], *(range(len(bds)) for bds in chunks))
slices = slices_from_chunks(chunks)
if (
has_keyword(getitem, "asarray")
and has_keyword(getitem, "lock")
and (not asarray or lock)
):
values = [(getitem, arr, x, asarray, lock) for x in slices]
else:
# Common case, drop extra parameters
values = [(getitem, arr, x) for x in slices]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
"""Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def _concatenate2(arrays, axes=[]):
"""Recursively concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
If axes is an empty list or tuple, return arrays, or arrays[0] if
arrays is a list.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
Special Case
>>> _concatenate2([x, x], axes=())
array([[1, 2],
[3, 4]])
"""
if axes == ():
if isinstance(arrays, list):
return arrays[0]
else:
return arrays
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
concatenate = concatenate_lookup.dispatch(
type(max(arrays, key=lambda x: getattr(x, "__array_priority__", 0)))
)
return concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None):
"""
Tries to infer output dtype of ``func`` for a small set of input arguments.
Parameters
----------
func: Callable
Function for which output dtype is to be determined
args: List of array like
Arguments to the function, which would usually be used. Only attributes
``ndim`` and ``dtype`` are used.
kwargs: dict
Additional ``kwargs`` to the ``func``
funcname: String
Name of calling function to improve potential error messages
suggest_dtype: None/False or String
If not ``None`` adds suggestion to potential error message to specify a dtype
via the specified kwarg. Defaults to ``'dtype'``.
nout: None or Int
``None`` if function returns single output, integer if many.
Deafults to ``None``.
Returns
-------
: dtype or List of dtype
One or many dtypes (depending on ``nout``)
"""
args = [
np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x
for x in args
]
try:
with np.errstate(all="ignore"):
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
suggest = (
(
"Please specify the dtype explicitly using the "
"`{dtype}` kwarg.\n\n".format(dtype=suggest_dtype)
)
if suggest_dtype
else ""
)
msg = (
"`dtype` inference failed in `{0}`.\n\n"
"{1}"
"Original error is below:\n"
"------------------------\n"
"{2}\n\n"
"Traceback:\n"
"---------\n"
"{3}"
).format(funcname, suggest, repr(e), tb)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return o.dtype if nout is None else tuple(e.dtype for e in o)
def normalize_arg(x):
"""Normalize user provided arguments to blockwise or map_blocks
We do a few things:
1. If they are string literals that might collide with blockwise_token then we
quote them
2. IF they are large (as defined by sizeof) then we put them into the
graph on their own by using dask.delayed
"""
if is_dask_collection(x):
return x
elif isinstance(x, str) and re.match(r"_\d+", x):
return delayed(x)
elif isinstance(x, list) and len(x) >= 10:
return delayed(x)
elif sizeof(x) > 1e6:
return delayed(x)
else:
return x
def _pass_extra_kwargs(func, keys, *args, **kwargs):
"""Helper for :func:`dask.array.map_blocks` to pass `block_info` or `block_id`.
For each element of `keys`, a corresponding element of args is changed
to a keyword argument with that key, before all arguments re passed on
to `func`.
"""
kwargs.update(zip(keys, args))
return func(*args[len(keys) :], **kwargs)
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that ``map_blocks`` will attempt to automatically determine the output
array type by calling ``func`` on 0-d versions of the inputs. Please refer to
the ``meta`` keyword argument below if you expect that the function will not
succeed when operating on 0-d arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type and dtype of that returned when calling ``.compute()``
on the array returned by this function. When not provided, ``meta`` will be
inferred by applying the function to a small set of fake data, usually a
0-d array. It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it, providing
``meta`` will be required otherwise. If the output type is known beforehand
(e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype
can be passed, for example: ``meta=np.array((), dtype=np.int32)``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` or ``block_id`` keyword argument.
>>> def func(block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:
>>> def func(block_id=None):
... pass
This will receive the following information:
>>> block_id # doctest: +SKIP
(4, 3)
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP
dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide
a ``dtype``:
>>> import cupy # doctest: +SKIP
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP
>>> dt = np.float32
>>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP
dask.array<lambda, shape=(5,), dtype=float32, chunksize=(5,), chunktype=cupy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
def broadcast_chunks(*chunkss):
"""Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if not chunkss:
return ()
elif len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
def store(
sources,
targets,
lock=True,
regions=None,
compute=True,
return_stored=False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or Delayed or iterable of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or list of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately, return :class:`dask.delayed.Delayed` otherwise
return_stored: boolean, optional
Optionally return the stored result (default False).
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions]
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources)
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_dsk = Array.__dask_optimize__(
sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]
# Optimize all targets together
targets2 = []
targets_keys = []
targets_dsk = []
for e in targets:
if isinstance(e, Delayed):
targets2.append(e.key)
targets_keys.extend(e.__dask_keys__())
targets_dsk.append(e.__dask_graph__())
elif is_dask_collection(e):
raise TypeError("Targets must be either Delayed objects or array-likes")
else:
targets2.append(e)
targets_dsk = HighLevelGraph.merge(*targets_dsk)
targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)
load_stored = return_stored and not compute
toks = [str(uuid.uuid1()) for _ in range(len(sources))]
store_dsk = HighLevelGraph.merge(
*[
insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)
for s, t, r, tok in zip(sources2, targets2, regions, toks)
]
)
store_keys = list(store_dsk.keys())
store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)
store_dsk = HighLevelGraph.from_collections(id(store_dsk), dict(store_dsk))
if return_stored:
load_store_dsk = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk) for k in store_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)
result = tuple(
Array(load_store_dsk, "load-store-%s" % t, s.chunks, meta=s)
for s, t in zip(sources, toks)
)
return result
else:
name = "store-" + str(uuid.uuid1())
dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)
result = Delayed(name, dsk)
if compute:
result.compute(**kwargs)
return None
else:
return result
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError(
"Array chunk sizes are unknown. shape: %s, chunks: %s%s"
% (shape, chunks, unknown_chunk_message)
)
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(
((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))
for d, bd in zip(shape, chunks)
)
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
CHUNKS_NONE_ERROR_MESSAGE = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
https://docs.dask.org/en/latest/array-creation.html#chunks
""".strip()
class Array(DaskMethodsMixin):
"""Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
:func:`dask.array.from_array` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
dtype : str or dtype
Typecode or data-type for the new Dask Array
meta : empty ndarray
empty ndarray created with same NumPy backend, ndim and dtype as the
Dask Array being created (overrides dtype)
See Also
--------
dask.array.from_array
"""
__slots__ = "dask", "__name", "_cached_keys", "__chunks", "_meta", "__dict__"
def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):
self = super(Array, cls).__new__(cls)
assert isinstance(dask, Mapping)
if not isinstance(dask, HighLevelGraph):
dask = HighLevelGraph.from_collections(name, dask, dependencies=())
self.dask = dask
self._name = str(name)
meta = meta_from_array(meta, dtype=dtype)
if (
isinstance(chunks, str)
or isinstance(chunks, tuple)
and chunks
and any(isinstance(c, str) for c in chunks)
):
dt = meta.dtype
else:
dt = None
self._chunks = normalize_chunks(chunks, shape, dtype=dt)
if self.chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)
for plugin in config.get("array_plugins", ()):
result = plugin(self)
if result is not None:
self = result
try:
layer = self.dask.layers[name]
except (AttributeError, KeyError):
# self is no longer an Array after applying the plugins, OR
# a plugin replaced the HighLevelGraph with a plain dict, OR
# name is not the top layer's name (this can happen after the layer is
# manipulated, to avoid a collision)
pass
else:
if layer.collection_annotations is None:
layer.collection_annotations = {
"type": type(self),
"chunk_type": type(self._meta),
"chunks": self.chunks,
"dtype": dtype,
}
else:
layer.collection_annotations.update(
{
"type": type(self),
"chunk_type": type(self._meta),
"chunks": self.chunks,
"dtype": dtype,
}
)
return self
def __reduce__(self):
return (Array, (self.dask, self.name, self.chunks, self.dtype))
def __dask_graph__(self):
return self.dask
def __dask_layers__(self):
return (self.name,)
def __dask_keys__(self):
if self._cached_keys is not None:
return self._cached_keys
name, chunks, numblocks = self.name, self.chunks, self.numblocks
def keys(*args):
if not chunks:
return [(name,)]
ind = len(args)
if ind + 1 == len(numblocks):
result = [(name,) + args + (i,) for i in range(numblocks[ind])]
else:
result = [keys(*(args + (i,))) for i in range(numblocks[ind])]
return result
self._cached_keys = result = keys()
return result
def __dask_tokenize__(self):
return self.name
__dask_optimize__ = globalmethod(
optimize, key="array_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return self._rebuild, ()
def _rebuild(self, dsk, *, rename=None):
name = self._name
if rename:
name = rename.get(name, name)
return Array(dsk, name, self.chunks, self.dtype, self._meta)
def _reset_cache(self, key=None):
"""
Reset cached properties.
Parameters
----------
key : str, optional
Remove specified key. The default removes all items.
"""
if key is None:
self.__dict__.clear()
else:
self.__dict__.pop(key, None)
@cached_property
def numblocks(self):
return tuple(map(len, self.chunks))
@cached_property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
def compute_chunk_sizes(self):
"""
Compute the chunk sizes for a Dask array. This is especially useful
when the chunk sizes are unknown (e.g., when indexing one Dask array
with another).
Notes
-----
This function modifies the Dask array in-place.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)
>>> x.chunks
((2, 2, 1),)
>>> y = x[x <= 0]
>>> y.chunks
((nan, nan, nan),)
>>> y.compute_chunk_sizes() # in-place computation
dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> y.chunks
((2, 1, 0),)
"""
x = self
chunk_shapes = x.map_blocks(
_get_chunk_shape,
dtype=int,
chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),
new_axis=x.ndim,
)
c = []
for i in range(x.ndim):
s = x.ndim * [0] + [i]
s[i] = slice(None)
s = tuple(s)
c.append(tuple(chunk_shapes[s]))
# `map_blocks` assigns numpy dtypes
# cast chunk dimensions back to python int before returning
x._chunks = tuple(
[tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]
)
return x
@cached_property
def shape(self):
return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)
@property
def chunksize(self):
return tuple(max(c) for c in self.chunks)
@property
def dtype(self):
return self._meta.dtype
@property
def _chunks(self):
"""Non-public chunks property. Allows setting a chunk value."""
return self.__chunks
@_chunks.setter
def _chunks(self, chunks):
self.__chunks = chunks
# When the chunks changes the cached properties that was
# dependent on it needs to be deleted:
for key in ["numblocks", "npartitions", "shape", "ndim", "size"]:
self._reset_cache(key)
@property
def chunks(self):
"""Chunks property."""
return self.__chunks
@chunks.setter
def chunks(self, chunks):
raise TypeError(
"Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
f" x.rechunk({chunks})\n\n"
"If trying to avoid unknown chunks, use\n"
" x.compute_chunk_sizes()"
)
def __len__(self):
if not self.chunks:
raise TypeError("len() of unsized object")
return sum(self.chunks[0])
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if _should_delegate(x):
return NotImplemented
if method == "__call__":
if numpy_ufunc is np.matmul:
from .routines import matmul
# special case until apply_gufunc handles optional dimensions
return matmul(*inputs, **kwargs)
if numpy_ufunc.signature is not None:
from .gufunc import apply_gufunc
return apply_gufunc(
numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs
)
if numpy_ufunc.nout > 1:
from . import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc(*inputs, **kwargs)
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
elif method == "outer":
from . import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc.outer(*inputs, **kwargs)
else:
return NotImplemented
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>
"""
chunksize = str(self.chunksize)
name = self.name.rsplit("-", 1)[0]
return "dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>" % (
name,
self.shape,
self.dtype,
chunksize,
type(self._meta).__module__.split(".")[0],
type(self._meta).__name__,
)
def _repr_html_(self):
table = self._repr_html_table()
try:
grid = self.to_svg(size=config.get("array.svg.size", 120))
except NotImplementedError:
grid = ""
both = [
"<table>",
"<tr>",
"<td>",
table,
"</td>",
"<td>",
grid,
"</td>",
"</tr>",
"</table>",
]
return "\n".join(both)
def _repr_html_table(self):
if "sparse" in typename(type(self._meta)):
nbytes = None
cbytes = None
elif not math.isnan(self.nbytes):
nbytes = format_bytes(self.nbytes)
cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)
else:
nbytes = "unknown"
cbytes = "unknown"
table = [
"<table>",
" <thead>",
" <tr><td> </td><th> Array </th><th> Chunk </th></tr>",
" </thead>",
" <tbody>",
" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>"
% (nbytes, cbytes)
if nbytes is not None
else "",
" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>"
% (str(self.shape), str(self.chunksize)),
" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>"
% (len(self.__dask_graph__()), self.npartitions),
" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>"
% (
self.dtype,
type(self._meta).__module__.split(".")[0],
type(self._meta).__name__,
),
" </tbody>",
"</table>",
]
return "\n".join(table)
@cached_property
def ndim(self):
return len(self.shape)
@cached_property
def size(self):
"""Number of elements in array"""
return reduce(mul, self.shape, 1)
@property
def nbytes(self):
"""Number of bytes in array"""
return self.size * self.dtype.itemsize
@property
def itemsize(self):
"""Length of one array element in bytes"""
return self.dtype.itemsize
@property
def _name(self):
return self.__name
@_name.setter
def _name(self, val):
self.__name = val
# Clear the key cache when the name is reset
self._cached_keys = None
@property
def name(self):
return self.__name
@name.setter
def name(self, val):
raise TypeError(
"Cannot set name directly\n\n"
"Name is used to relate the array to the task graph.\n"
"It is uncommon to need to change it, but if you do\n"
"please set ``._name``"
)
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
def __array_function__(self, func, types, args, kwargs):
import dask.array as module
def handle_nonmatching_names(func, args, kwargs):
if func not in _HANDLED_FUNCTIONS:
warnings.warn(
"The `{}` function is not implemented by Dask array. "
"You may want to use the da.map_blocks function "
"or something similar to silence this warning. "
"Your code may stop working in a future release.".format(
func.__module__ + "." + func.__name__
),
FutureWarning,
)
# Need to convert to array object (e.g. numpy.ndarray or
# cupy.ndarray) as needed, so we can call the NumPy function
# again and it gets the chance to dispatch to the right
# implementation.
args, kwargs = compute(args, kwargs)
return func(*args, **kwargs)
return _HANDLED_FUNCTIONS[func](*args, **kwargs)
# First, verify that all types are handled by Dask. Otherwise, return NotImplemented.
if not all(type is Array or is_valid_chunk_type(type) for type in types):
return NotImplemented
# Now try to find a matching function name. If that doesn't work, we may
# be dealing with an alias or a function that's simply not in the Dask API.
# Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.
for submodule in func.__module__.split(".")[1:]:
try:
module = getattr(module, submodule)
except AttributeError:
return handle_nonmatching_names(func, args, kwargs)
if not hasattr(module, func.__name__):
return handle_nonmatching_names(func, args, kwargs)
da_func = getattr(module, func.__name__)
if da_func is func:
return handle_nonmatching_names(func, args, kwargs)
return da_func(*args, **kwargs)
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
r = store([self], [target], **kwargs)
if kwargs.get("return_stored", False):
r = r[0]
return r
def to_svg(self, size=500):
"""Convert chunks from Dask Array into an SVG Image
Parameters
----------
chunks: tuple
size: int
Rough size of the image
Examples
--------
>>> x.to_svg(size=500) # doctest: +SKIP
Returns
-------
text: An svg string depicting the array as a grid of chunks
"""
from .svg import svg
return svg(self.chunks, size=size)
def to_hdf5(self, filename, datapath, **kwargs):
"""Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None, index=None, meta=None):
"""Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether the array has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divsions is created.
Specifying ``index`` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
See Also
--------
dask.dataframe.from_dask_array
"""
from ..dataframe import from_dask_array
return from_dask_array(self, columns=columns, index=index, meta=meta)
def __bool__(self):
if self.size > 1:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.any() or a.all().".format(self.__class__.__name__)
)
else:
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted to Python scalars")
else:
return cast_type(self.compute())
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __float__(self):
return self._scalarfunc(float)
def __complex__(self):
return self._scalarfunc(complex)
def __index__(self):
return self._scalarfunc(operator.index)
def __setitem__(self, key, value):
if value is np.ma.masked:
value = np.ma.masked_all(())
## Use the "where" method for cases when key is an Array
if isinstance(key, Array):
from .routines import where
if isinstance(value, Array) and value.ndim > 1:
raise ValueError("boolean index array should have 1 dimension")
try:
y = where(key, value, self)
except ValueError as e:
raise ValueError(
"Boolean index assignment in Dask "
"expects equally shaped arrays.\nExample: da1[da2] = da3 "
"where da1.shape == (4,), da2.shape == (4,) "
"and da3.shape == (4,)."
) from e
self._meta = y._meta
self.dask = y.dask
self._name = y.name
self._chunks = y.chunks
return
# Still here? Then apply the assignment to other type of
# indices via the `setitem_array` function.
value = asanyarray(value)
out = "setitem-" + tokenize(self, key, value)
dsk = setitem_array(out, self, key, value)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])
y = Array(graph, out, chunks=self.chunks, dtype=self.dtype)
self._meta = y._meta
self.dask = y.dask
self._name = y.name
self._chunks = y.chunks
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if isinstance(index, str) or (
isinstance(index, list) and index and all(isinstance(i, str) for i in index)
):
if isinstance(index, str):
dt = self.dtype[index]
else:
dt = np.dtype(
{
"names": index,
"formats": [self.dtype.fields[name][0] for name in index],
"offsets": [self.dtype.fields[name][1] for name in index],
"itemsize": self.dtype.itemsize,
}
)
if dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(
getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis
)
else:
return self.map_blocks(getitem, index, dtype=dt)
if not isinstance(index, tuple):
index = (index,)
from .slicing import (
normalize_index,
slice_with_bool_dask_array,
slice_with_int_dask_array,
)
index2 = normalize_index(index, self.shape)
dependencies = {self.name}
for i in index2:
if isinstance(i, Array):
dependencies.add(i.name)
if any(isinstance(i, Array) and i.dtype.kind in "iu" for i in index2):
self, index2 = slice_with_int_dask_array(self, index2)
if any(isinstance(i, Array) and i.dtype == bool for i in index2):
self, index2 = slice_with_bool_dask_array(self, index2)
if all(isinstance(i, slice) and i == slice(None) for i in index2):
return self
out = "getitem-" + tokenize(self, index2)
dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])
meta = meta_from_array(self._meta, ndim=len(chunks))
if np.isscalar(meta):
meta = np.array(meta)
return Array(graph, out, chunks, meta=meta)
def _vindex(self, key):
if not isinstance(key, tuple):
key = (key,)
if any(k is None for k in key):
raise IndexError(
"vindex does not support indexing with None (np.newaxis), "
"got {}".format(key)
)
if all(isinstance(k, slice) for k in key):
if all(
k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)
):
return self
raise IndexError(
"vindex requires at least one non-slice to vectorize over "
"when the slices are not over the entire array (i.e, x[:]). "
"Use normal slicing instead when only using slices. Got: {}".format(key)
)
return _vindex(self, *key)
@property
def vindex(self):
"""Vectorized indexing with broadcasting.
This is equivalent to numpy's advanced indexing, using arrays that are
broadcast against each other. This allows for pointwise indexing:
>>> import dask.array as da
>>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x = da.from_array(x, chunks=2)
>>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()
array([1, 5, 9])
Mixed basic/advanced indexing with slices/arrays is also supported. The
order of dimensions in the result follows those proposed for
`ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:
the subspace spanned by arrays is followed by all slices.
Note: ``vindex`` provides more general functionality than standard
indexing, but it also has fewer optimizations and can be significantly
slower.
"""
return IndexCallable(self._vindex)
def _blocks(self, index):
from .slicing import normalize_index
if not isinstance(index, tuple):
index = (index,)
if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:
raise ValueError("Can only slice with a single list")
if any(ind is None for ind in index):
raise ValueError("Slicing with np.newaxis or None is not supported")
index = normalize_index(index, self.numblocks)
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)
name = "blocks-" + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index]
chunks = tuple(
tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)
)
keys = product(*(range(len(c)) for c in chunks))
layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return Array(graph, name, chunks, meta=self)
@property
def blocks(self):
"""Slice an array by blocks
This allows blockwise slicing of a Dask array. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new
dask array with every other block in the first row of blocks.
You can index blocks in any way that could index a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.numblocks). The dimension of the output array will be the same
as the dimension of this array, even if integer indices are passed.
This does not support slicing with ``np.newaxis`` or multiple lists.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(10, chunks=2)
>>> x.blocks[0].compute()
array([0, 1])
>>> x.blocks[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.blocks[::2].compute()
array([0, 1, 4, 5, 8, 9])
>>> x.blocks[[-1, 0]].compute()
array([8, 9, 0, 1])
Returns
-------
A Dask array
"""
return IndexCallable(self._blocks)
@property
def partitions(self):
"""Slice an array by partitions. Alias of dask array .blocks attribute.
This alias allows you to write agnostic code that works with both
dask arrays and dask dataframes.
This allows blockwise slicing of a Dask array. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new
dask array with every other block in the first row of blocks.
You can index blocks in any way that could index a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.numblocks). The dimension of the output array will be the same
as the dimension of this array, even if integer indices are passed.
This does not support slicing with ``np.newaxis`` or multiple lists.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(10, chunks=2)
>>> x.partitions[0].compute()
array([0, 1])
>>> x.partitions[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.partitions[::2].compute()
array([0, 1, 4, 5, 8, 9])
>>> x.partitions[[-1, 0]].compute()
array([8, 9, 0, 1])
>>> all(x.partitions[:].compute() == x.blocks[:].compute())
True
Returns
-------
A Dask array
"""
return self.blocks
@derived_from(np.ndarray)
def dot(self, other):
from .routines import tensordot
return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return self.transpose()
@derived_from(np.ndarray)
def transpose(self, *axes):
from .routines import transpose
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):
# no transpose necessary
return self
else:
return transpose(self, axes=axes)
@derived_from(np.ndarray)
def ravel(self):
from .routines import ravel
return ravel(self)
flatten = ravel
@derived_from(np.ndarray)
def choose(self, choices):
from .routines import choose
return choose(self, choices)
@derived_from(np.ndarray)
def reshape(self, *shape, merge_chunks=True):
"""
.. note::
See :meth:`dask.array.reshape` for an explanation of
the ``merge_chunks`` keyword.
"""
from .reshape import reshape
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape, merge_chunks=merge_chunks)
def topk(self, k, axis=-1, split_every=None):
"""The top k elements of an array.
See :func:`dask.array.topk` for docstring.
"""
from .reductions import topk
return topk(self, k, axis=axis, split_every=split_every)
def argtopk(self, k, axis=-1, split_every=None):
"""The indices of the top k elements of an array.
See :func:`dask.array.argtopk` for docstring.
"""
from .reductions import argtopk
return argtopk(self, k, axis=axis, split_every=split_every)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
"""
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {"casting", "copy"}
if extra:
raise TypeError(
"astype does not take the following keyword "
"arguments: {0!s}".format(list(extra))
)
casting = kwargs.get("casting", "unsafe")
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
elif not np.can_cast(self.dtype, dtype, casting=casting):
raise TypeError(
"Cannot cast array from {0!r} to {1!r}"
" according to the rule "
"{2!r}".format(self.dtype, dtype, casting)
)
return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
@check_if_handled_given_other
def __add__(self, other):
return elemwise(operator.add, self, other)
@check_if_handled_given_other
def __radd__(self, other):
return elemwise(operator.add, other, self)
@check_if_handled_given_other
def __and__(self, other):
return elemwise(operator.and_, self, other)
@check_if_handled_given_other
def __rand__(self, other):
return elemwise(operator.and_, other, self)
@check_if_handled_given_other
def __div__(self, other):
return elemwise(operator.div, self, other)
@check_if_handled_given_other
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
@check_if_handled_given_other
def __eq__(self, other):
return elemwise(operator.eq, self, other)
@check_if_handled_given_other
def __gt__(self, other):
return elemwise(operator.gt, self, other)
@check_if_handled_given_other
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
@check_if_handled_given_other
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
@check_if_handled_given_other
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
@check_if_handled_given_other
def __lt__(self, other):
return elemwise(operator.lt, self, other)
@check_if_handled_given_other
def __le__(self, other):
return elemwise(operator.le, self, other)
@check_if_handled_given_other
def __mod__(self, other):
return elemwise(operator.mod, self, other)
@check_if_handled_given_other
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
@check_if_handled_given_other
def __mul__(self, other):
return elemwise(operator.mul, self, other)
@check_if_handled_given_other
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
@check_if_handled_given_other
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
@check_if_handled_given_other
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
@check_if_handled_given_other
def __ror__(self, other):
return elemwise(operator.or_, other, self)
@check_if_handled_given_other
def __pow__(self, other):
return elemwise(operator.pow, self, other)
@check_if_handled_given_other
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
@check_if_handled_given_other
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
@check_if_handled_given_other
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
@check_if_handled_given_other
def __sub__(self, other):
return elemwise(operator.sub, self, other)
@check_if_handled_given_other
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
@check_if_handled_given_other
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
@check_if_handled_given_other
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
@check_if_handled_given_other
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
@check_if_handled_given_other
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
@check_if_handled_given_other
def __xor__(self, other):
return elemwise(operator.xor, self, other)
@check_if_handled_given_other
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@check_if_handled_given_other
def __matmul__(self, other):
from .routines import matmul
return matmul(self, other)
@check_if_handled_given_other
def __rmatmul__(self, other):
from .routines import matmul
return matmul(other, self)
@check_if_handled_given_other
def __divmod__(self, other):
from .ufunc import divmod
return divmod(self, other)
@check_if_handled_given_other
def __rdivmod__(self, other):
from .ufunc import divmod
return divmod(other, self)
@derived_from(np.ndarray)
def any(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def all(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def min(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def max(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def argmin(self, axis=None, split_every=None, out=None):
from .reductions import argmin
return argmin(self, axis=axis, split_every=split_every, out=out)
@derived_from(np.ndarray)
def argmax(self, axis=None, split_every=None, out=None):
from .reductions import argmax
return argmax(self, axis=axis, split_every=split_every, out=out)
@derived_from(np.ndarray)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import sum
return sum(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
from .reductions import trace
return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
@derived_from(np.ndarray)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import prod
return prod(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import mean
return mean(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def std(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
from .reductions import std
return std(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def var(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
from .reductions import var
return var(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
def moment(
self,
order,
axis=None,
dtype=None,
keepdims=False,
ddof=0,
split_every=None,
out=None,
):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments",
Technical Report SAND2008-6212, Sandia National Laboratories.
"""
from .reductions import moment
return moment(
self,
order,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
"""Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Note that this function will attempt to automatically determine the output
array type before computing it, please refer to the ``meta`` keyword argument
in :func:`map_blocks <dask.array.core.map_blocks>` if you expect that the function will not succeed when
operating on 0-d arrays.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of elements that each block should share with its neighbors
If a tuple or dict then this can be different per axis
boundary: str, tuple, dict
How to handle the boundaries.
Values include 'reflect', 'periodic', 'nearest', 'none',
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim ``depth`` elements from each block after
calling the map function.
Set this to False if your mapping function already does this for you
**kwargs:
Other keyword arguments valid in :func:`map_blocks <dask.array.core.map_blocks>`.
Examples
--------
>>> import dask.array as da
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = da.from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> import dask.array as da
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=np.array(()))
>>> y
dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>
>>> y.compute()
array([[ 4, 6, 8, 10],
[ 8, 10, 12, 14],
[20, 22, 24, 26],
[24, 26, 28, 30]])
>>> import cupy # doctest: +SKIP
>>> x = cupy.arange(16).reshape((5, 4)) # doctest: +SKIP
>>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP
>>> y = d.map_overlap(lambda x: x + x[2], depth=1, meta=cupy.array(())) # doctest: +SKIP
>>> y # doctest: +SKIP
dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>
>>> y.compute() # doctest: +SKIP
array([[ 4, 6, 8, 10],
[ 8, 10, 12, 14],
[20, 22, 24, 26],
[24, 26, 28, 30]])
"""
from .overlap import map_overlap
return map_overlap(
func, self, depth=depth, boundary=boundary, trim=trim, **kwargs
)
@derived_from(np.ndarray)
def cumsum(self, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the cumsum of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by
first taking the sum of each block and combines the sums via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
from .reductions import cumsum
return cumsum(self, axis, dtype, out=out, method=method)
@derived_from(np.ndarray)
def cumprod(self, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumprod. Default is 'sequential'.
* 'sequential' performs the cumprod of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first
taking the product of each block and combines the products via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
from .reductions import cumprod
return cumprod(self, axis, dtype, out=out, method=method)
@derived_from(np.ndarray)
def squeeze(self, axis=None):
from .routines import squeeze
return squeeze(self, axis)
def rechunk(
self, chunks="auto", threshold=None, block_size_limit=None, balance=False
):
"""See da.rechunk for docstring"""
from . import rechunk # avoid circular import
return rechunk(self, chunks, threshold, block_size_limit, balance)
@property
def real(self):
from .ufunc import real
return real(self)
@property
def imag(self):
from .ufunc import imag
return imag(self)
def conj(self):
from .ufunc import conj
return conj(self)
@derived_from(np.ndarray)
def clip(self, min=None, max=None):
from .ufunc import clip
return clip(self, min, max)
def view(self, dtype=None, order="C"):
"""Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array.
The default, None, results in the view having the same data-type
as the original array.
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
if dtype is None:
dtype = self.dtype
else:
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == "C":
chunks = self.chunks[:-1] + (
tuple(ensure_int(c * mult) for c in self.chunks[-1]),
)
elif order == "F":
chunks = (
tuple(ensure_int(c * mult) for c in self.chunks[0]),
) + self.chunks[1:]
else:
raise ValueError("Order must be one of 'C' or 'F'")
return self.map_blocks(
chunk.view, dtype, order=order, dtype=dtype, chunks=chunks
)
@derived_from(np.ndarray)
def swapaxes(self, axis1, axis2):
from .routines import swapaxes
return swapaxes(self, axis1, axis2)
@derived_from(np.ndarray)
def round(self, decimals=0):
from .routines import round
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
if self.npartitions == 1:
return self.map_blocks(M.copy)
else:
return Array(self.dask, self.name, self.chunks, meta=self)
def __deepcopy__(self, memo):
c = self.copy()
memo[id(self)] = c
return c
def to_delayed(self, optimize_graph=True):
"""Convert into an array of :class:`dask.delayed.Delayed` objects, one per chunk.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
:class:`dask.delayed.Delayed` objects.
See Also
--------
dask.array.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph
name = "delayed-" + self.name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)
return np.array(L, dtype=object)
@derived_from(np.ndarray)
def repeat(self, repeats, axis=None):
from .creation import repeat
return repeat(self, repeats, axis=axis)
@derived_from(np.ndarray)
def nonzero(self):
from .routines import nonzero
return nonzero(self)
def to_zarr(self, *args, **kwargs):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
See function :func:`dask.array.to_zarr` for parameters.
"""
return to_zarr(self, *args, **kwargs)
def to_tiledb(self, uri, *args, **kwargs):
"""Save array to the TileDB storage manager
See function :func:`dask.array.to_tiledb` for argument documentation.
See https://docs.tiledb.io for details about the format and engine.
"""
from .tiledb_io import to_tiledb
return to_tiledb(self, uri, *args, **kwargs)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):
"""Normalize chunks to tuple of tuples
This takes in a variety of input types and information and produces a full
tuple-of-tuples result for chunks, suitable to be passed to Array or
rechunk or any other operation that creates a Dask array.
Parameters
----------
chunks: tuple, int, dict, or string
The chunks to be normalized. See examples below for more details
shape: Tuple[int]
The shape of the array
limit: int (optional)
The maximum block size to target in bytes,
if freedom is given to choose
dtype: np.dtype
previous_chunks: Tuple[Tuple[int]] optional
Chunks from a previous array that we should use for inspiration when
rechunking auto dimensions. If not provided but auto-chunking exists
then auto-dimensions will prefer square-like chunk shapes.
Examples
--------
Specify uniform chunk sizes
>>> from dask.array.core import normalize_chunks
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Also passes through fully explicit tuple-of-tuples
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Cleans up lists to tuples
>>> normalize_chunks([[2, 2], [3, 3]])
((2, 2), (3, 3))
Expands integer inputs 10 -> (10, 10)
>>> normalize_chunks(10, shape=(30, 5))
((10, 10, 10), (5,))
Expands dict inputs
>>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))
((2, 2, 2), (3, 3))
The values -1 and None get mapped to full size
>>> normalize_chunks((5, -1), shape=(10, 10))
((5, 5), (10,))
Use the value "auto" to automatically determine chunk sizes along certain
dimensions. This uses the ``limit=`` and ``dtype=`` keywords to
determine how large to make the chunks. The term "auto" can be used
anywhere an integer can be used. See array chunking documentation for more
information.
>>> normalize_chunks(("auto",), shape=(20,), limit=5, dtype='uint8')
((5, 5, 5, 5),)
You can also use byte sizes (see :func:`dask.utils.parse_bytes`) in place of
"auto" to ask for a particular size
>>> normalize_chunks("1kiB", shape=(2000,), dtype='float32')
((250, 250, 250, 250, 250, 250, 250, 250),)
Respects null dimensions
>>> normalize_chunks((), shape=(0, 0))
((0,), (0,))
"""
if dtype and not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, (Number, str)):
chunks = (chunks,) * len(shape)
if isinstance(chunks, dict):
chunks = tuple(chunks.get(i, None) for i in range(len(shape)))
if isinstance(chunks, np.ndarray):
chunks = chunks.tolist()
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((0,),) * len(shape)
if (
shape
and len(shape) == 1
and len(chunks) > 1
and all(isinstance(c, (Number, str)) for c in chunks)
):
chunks = (chunks,)
if shape and len(chunks) != len(shape):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
if -1 in chunks or None in chunks:
chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))
# If specifying chunk size in bytes, use that value to set the limit.
# Verify there is only one consistent value of limit or chunk-bytes used.
for c in chunks:
if isinstance(c, str) and c != "auto":
parsed = parse_bytes(c)
if limit is None:
limit = parsed
elif parsed != limit:
raise ValueError(
"Only one consistent value of limit or chunk is allowed."
"Used %s != %s" % (parsed, limit)
)
# Substitute byte limits with 'auto' now that limit is set.
chunks = tuple("auto" if isinstance(c, str) and c != "auto" else c for c in chunks)
if any(c == "auto" for c in chunks):
chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)
if shape is not None:
chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum(
(
blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list))
else (c,)
for s, c in zip(shape, chunks)
),
(),
)
for c in chunks:
if not c:
raise ValueError(
"Empty tuples are not allowed in chunks. Express "
"zero length dimensions with 0(s) in chunks"
)
if shape is not None:
if len(chunks) != len(shape):
raise ValueError(
"Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" % (len(shape), len(chunks))
)
if not all(
c == s or (math.isnan(c) or math.isnan(s))
for c, s in zip(map(sum, chunks), shape)
):
raise ValueError(
"Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)
def _compute_multiplier(limit: int, dtype, largest_block: int, result):
"""
Utility function for auto_chunk, to fin how much larger or smaller the ideal
chunk size is relative to what we have now.
"""
return (
limit
/ dtype.itemsize
/ largest_block
/ np.prod(list(r if r != 0 else 1 for r in result.values()))
)
def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):
"""Determine automatic chunks
This takes in a chunks value that contains ``"auto"`` values in certain
dimensions and replaces those values with concrete dimension sizes that try
to get chunks to be of a certain size in bytes, provided by the ``limit=``
keyword. If multiple dimensions are marked as ``"auto"`` then they will
all respond to meet the desired byte limit, trying to respect the aspect
ratio of their dimensions in ``previous_chunks=``, if given.
Parameters
----------
chunks: Tuple
A tuple of either dimensions or tuples of explicit chunk dimensions
Some entries should be "auto"
shape: Tuple[int]
limit: int, str
The maximum allowable size of a chunk in bytes
previous_chunks: Tuple[Tuple[int]]
See also
--------
normalize_chunks: for full docstring and parameters
"""
if previous_chunks is not None:
previous_chunks = tuple(
c if isinstance(c, tuple) else (c,) for c in previous_chunks
)
chunks = list(chunks)
autos = {i for i, c in enumerate(chunks) if c == "auto"}
if not autos:
return tuple(chunks)
if limit is None:
limit = config.get("array.chunk-size")
if isinstance(limit, str):
limit = parse_bytes(limit)
if dtype is None:
raise TypeError("DType must be known for auto-chunking")
if dtype.hasobject:
raise NotImplementedError(
"Can not use auto rechunking with object dtype. "
"We are unable to estimate the size in bytes of object data"
)
for x in tuple(chunks) + tuple(shape):
if (
isinstance(x, Number)
and np.isnan(x)
or isinstance(x, tuple)
and np.isnan(x).any()
):
raise ValueError(
"Can not perform automatic rechunking with unknown "
"(nan) chunk sizes.%s" % unknown_chunk_message
)
limit = max(1, limit)
largest_block = np.prod(
[cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != "auto"]
)
if previous_chunks:
# Base ideal ratio on the median chunk size of the previous chunks
result = {a: np.median(previous_chunks[a]) for a in autos}
ideal_shape = []
for i, s in enumerate(shape):
chunk_frequencies = frequencies(previous_chunks[i])
mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])
if mode > 1 and count >= len(previous_chunks[i]) / 2:
ideal_shape.append(mode)
else:
ideal_shape.append(s)
# How much larger or smaller the ideal chunk size is relative to what we have now
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
last_multiplier = 0
last_autos = set()
while (
multiplier != last_multiplier or autos != last_autos
): # while things change
last_multiplier = multiplier # record previous values
last_autos = set(autos) # record previous values
# Expand or contract each of the dimensions appropriately
for a in sorted(autos):
if ideal_shape[a] == 0:
result[a] = 0
continue
proposed = result[a] * multiplier ** (1 / len(autos))
if proposed > shape[a]: # we've hit the shape boundary
autos.remove(a)
largest_block *= shape[a]
chunks[a] = shape[a]
del result[a]
else:
result[a] = round_to(proposed, ideal_shape[a])
# recompute how much multiplier we have left, repeat
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
for k, v in result.items():
chunks[k] = v
return tuple(chunks)
else:
size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))
small = [i for i in autos if shape[i] < size]
if small:
for i in small:
chunks[i] = (shape[i],)
return auto_chunks(chunks, shape, limit, dtype)
for i in autos:
chunks[i] = round_to(size, shape[i])
return tuple(chunks)
def round_to(c, s):
"""Return a chunk dimension that is close to an even multiple or factor
We want values for c that are nicely aligned with s.
If c is smaller than s then we want the largest factor of s that is less than the
desired chunk size, but not less than half, which is too much. If no such
factor exists then we just go with the original chunk size and accept an
uneven chunk at the end.
If c is larger than s then we want the largest multiple of s that is still
smaller than c.
"""
if c <= s:
try:
return max(f for f in factors(s) if c / 2 <= f <= c)
except ValueError: # no matching factors within factor of two
return max(1, int(c))
else:
return c // s * s
def _get_chunk_shape(a):
s = np.asarray(a.shape, dtype=int)
return s[len(s) * (None,) + (slice(None),)]
def from_array(
x,
chunks="auto",
name=None,
lock=False,
asarray=None,
fancy=True,
getitem=None,
meta=None,
inline_array=False,
):
"""Create dask array from something that looks like an array.
Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
- A size in bytes, like "100 MiB" which will choose a uniform
block-like shape
- The word "auto" which acts like the above, but uses a configuration
value ``array.chunk-size`` for the chunk size
-1 or None as a blocksize indicate the size of the corresponding
dimension.
name : str or bool, optional
The key name to use for the array. Defaults to a hash of ``x``.
Hashing is useful if the same value of ``x`` is used to create multiple
arrays, as Dask can then recognise that they're the same and
avoid duplicate computations. However, it can also be slow, and if the
array is not contiguous it is copied for hashing. If the array uses
stride tricks (such as :func:`numpy.broadcast_to` or
:func:`skimage.util.view_as_windows`) to have a larger logical
than physical size, this copy can cause excessive memory usage.
If you don't need the deduplication provided by hashing, use
``name=False`` to generate a random name instead of hashing, which
avoids the pitfalls described above. Using ``name=True`` is
equivalent to the default.
By default, hashing uses python's standard sha1. This behaviour can be
changed by installing cityhash, xxhash or murmurhash. If installed,
a large-factor speedup can be obtained in the tokenisation step.
.. note::
Because this ``name`` is used as the key in task graphs, you should
ensure that it uniquely identifies the data contained within. If
you'd like to provide a descriptive name that is still unique, combine
the descriptive name with :func:`dask.base.tokenize` of the
``array_like``. See :ref:`graphs` for more.
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
asarray : bool, optional
If True then call np.asarray on chunks to convert them to numpy arrays.
If False then chunks are passed through unchanged.
If None (default) then we use True if the ``__array_function__`` method
is undefined.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
meta : Array-like, optional
The metadata for the resulting dask array. This is the kind of array
that will result from slicing the input array.
Defaults to the input array.
inline_array : bool, default False
How to include the array in the task graph. By default
(``inline_array=False``) the array is included in a task by itself,
and each chunk refers to that task by its key.
.. code-block:: python
>>> x = h5py.File("data.h5")["/x"] # doctest: +SKIP
>>> a = da.from_array(x, chunks=500) # doctest: +SKIP
>>> dict(a.dask) # doctest: +SKIP
{
'array-original-<name>': <HDF5 dataset ...>,
('array-<name>', 0): (getitem, "array-original-<name>", ...),
('array-<name>', 1): (getitem, "array-original-<name>", ...)
}
With ``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph.
.. code-block:: python
>>> a = da.from_array(x, chunks=500, inline_array=True) # doctest: +SKIP
>>> dict(a.dask) # doctest: +SKIP
{
('array-<name>', 0): (getitem, <HDF5 dataset ...>, ...),
('array-<name>', 1): (getitem, <HDF5 dataset ...>, ...)
}
Note that there's no key in the task graph with just the array `x`
anymore. Instead it's placed directly in the values.
The right choice for ``inline_array`` depends on several factors,
including the size of ``x``, how expensive it is to create, which
scheduler you're using, and the pattern of downstream computations.
As a heuristic, ``inline_array=True`` may be the right choice when
the array ``x`` is cheap to serialize and deserialize (since it's
included in the graph many times) and if you're experiencing ordering
issues (see :ref:`order` for more).
This has no effect when ``x`` is a NumPy array.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr
datasets do) then a multiple of that chunk shape will be used if you
do not provide a chunk shape.
>>> a = da.from_array(x, chunks='auto') # doctest: +SKIP
>>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP
>>> a = da.from_array(x) # doctest: +SKIP
If providing a name, ensure that it is unique
>>> import dask.base
>>> token = dask.base.tokenize(x) # doctest: +SKIP
>>> a = da.from_array('myarray-' + token) # doctest: +SKIP
NumPy ndarrays are eagerly sliced and then embedded in the graph.
>>> import dask.array
>>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))
>>> a.dask[a.name, 0, 0][0]
array([1])
Chunks with exactly-specified, different sizes can be created.
>>> import numpy as np
>>> import dask.array as da
>>> x = np.random.random((100, 6))
>>> a = da.from_array(x, chunks=((67, 33), (6,)))
"""
if isinstance(x, Array):
raise ValueError(
"Array is already a dask array. Use 'asarray' or " "'rechunk' instead."
)
elif is_dask_collection(x):
warnings.warn(
"Passing an object to dask.array.from_array which is already a "
"Dask collection. This can lead to unexpected behavior."
)
if isinstance(x, (list, tuple, memoryview) + np.ScalarType):
x = np.array(x)
if asarray is None:
asarray = not hasattr(x, "__array_function__")
previous_chunks = getattr(x, "chunks", None)
chunks = normalize_chunks(
chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks
)
if name in (None, True):
token = tokenize(x, chunks)
original_name = "array-original-" + token
name = name or "array-" + token
elif name is False:
original_name = name = "array-" + str(uuid.uuid1())
else:
original_name = name
if lock is True:
lock = SerializableLock()
is_ndarray = type(x) is np.ndarray
is_single_block = all(len(c) == 1 for c in chunks)
# Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)
# because np.matrix is a subclass of np.ndarray.
if is_ndarray and not is_single_block and not lock:
# eagerly slice numpy arrays to prevent memory blowup
# GH5367, GH5601
slices = slices_from_chunks(chunks)
keys = product([name], *(range(len(bds)) for bds in chunks))
values = [x[slc] for slc in slices]
dsk = dict(zip(keys, values))
elif is_ndarray and is_single_block:
# No slicing needed
dsk = {(name,) + (0,) * x.ndim: x}
else:
if getitem is None:
if fancy:
getitem = getter
else:
getitem = getter_nofancy
if inline_array:
get_from = x
else:
get_from = original_name
dsk = getem(
get_from,
chunks,
getitem=getitem,
shape=x.shape,
out_name=name,
lock=lock,
asarray=asarray,
dtype=x.dtype,
)
if not inline_array:
dsk[original_name] = x
# Workaround for TileDB, its indexing is 1-based,
# and doesn't seems to support 0-length slicing
if x.__class__.__module__.split(".")[0] == "tiledb" and hasattr(x, "_ctx_"):
return Array(dsk, name, chunks, dtype=x.dtype)
if meta is None:
meta = x
return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, "dtype", None))
def from_zarr(
url,
component=None,
storage_options=None,
chunks=None,
name=None,
inline_array=False,
**kwargs,
):
"""Load array from the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be loaded, something like ``'foo/bar'``.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
chunks: tuple of ints or tuples of ints
Passed to :func:`dask.array.from_array`, allows setting the chunks on
initialisation, if the chunking scheme in the on-disc dataset is not
optimal for the calculations to follow.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
kwargs:
Passed to :class:`zarr.core.Array`.
inline_array : bool, default False
Whether to inline the zarr Array in the values of the task graph.
See :meth:`dask.array.from_array` for an explanation.
See Also
--------
from_array
"""
import zarr
storage_options = storage_options or {}
if isinstance(url, zarr.Array):
z = url
elif isinstance(url, str):
mapper = get_mapper(url, **storage_options)
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
else:
mapper = url
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
chunks = chunks if chunks is not None else z.chunks
if name is None:
name = "from-zarr-" + tokenize(z, component, storage_options, chunks, **kwargs)
return from_array(z, chunks, name=name, inline_array=inline_array)
def to_zarr(
arr,
url,
component=None,
storage_options=None,
overwrite=False,
compute=True,
return_stored=False,
**kwargs,
):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
arr: dask.array
Data to store
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be created/over-written.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
overwrite: bool
If given array already exists, overwrite=False will cause an error,
where overwrite=True will replace the existing data.
compute: bool
See :func:`~dask.array.store` for more details.
return_stored: bool
See :func:`~dask.array.store` for more details.
**kwargs:
Passed to the :func:`zarr.creation.create` function, e.g., compression options.
Raises
------
ValueError
If ``arr`` has unknown chunk sizes, which is not supported by Zarr.
See Also
--------
dask.array.store
dask.array.Array.compute_chunk_sizes
"""
import zarr
if np.isnan(arr.shape).any():
raise ValueError(
"Saving a dask array with unknown chunk sizes is not "
"currently supported by Zarr.%s" % unknown_chunk_message
)
if isinstance(url, zarr.Array):
z = url
if isinstance(z.store, (dict, zarr.DictStore)) and "distributed" in config.get(
"scheduler", ""
):
raise RuntimeError(
"Cannot store into in memory Zarr Array using "
"the Distributed Scheduler."
)
arr = arr.rechunk(z.chunks)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
if not _check_regular_chunks(arr.chunks):
raise ValueError(
"Attempt to save array to zarr with irregular "
"chunking, please call `arr.rechunk(...)` first."
)
storage_options = storage_options or {}
if isinstance(url, str):
mapper = get_mapper(url, **storage_options)
else:
# assume the object passed is already a mapper
mapper = url
chunks = [c[0] for c in arr.chunks]
z = zarr.create(
shape=arr.shape,
chunks=chunks,
dtype=arr.dtype,
store=mapper,
path=component,
overwrite=overwrite,
**kwargs,
)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
def _check_regular_chunks(chunkset):
"""Check if the chunks are regular
"Regular" in this context means that along every axis, the chunks all
have the same size, except the last one, which may be smaller
Parameters
----------
chunkset: tuple of tuples of ints
From the ``.chunks`` attribute of an ``Array``
Returns
-------
True if chunkset passes, else False
Examples
--------
>>> import dask.array as da
>>> arr = da.zeros(10, chunks=(5, ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))
>>> _check_regular_chunks(arr.chunks)
False
"""
for chunks in chunkset:
if len(chunks) == 1:
continue
if len(set(chunks[:-1])) > 1:
return False
if chunks[-1] > chunks[0]:
return False
return True
def from_delayed(value, shape, dtype=None, meta=None, name=None):
"""Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> import dask
>>> import dask.array as da
>>> import numpy as np
>>> value = dask.delayed(np.ones)(5)
>>> array = da.from_delayed(value, (5,), dtype=float)
>>> array
dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
>>> array.compute()
array([1., 1., 1., 1., 1.])
"""
from ..delayed import Delayed, delayed
if not isinstance(value, Delayed) and hasattr(value, "key"):
value = delayed(value)
name = name or "from-value-" + tokenize(value, shape, dtype, meta)
dsk = {(name,) + (0,) * len(shape): value.key}
chunks = tuple((d,) for d in shape)
# TODO: value._key may not be the name of the layer in value.dask
# This should be fixed after we build full expression graphs
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])
return Array(graph, name, chunks, dtype=dtype, meta=meta)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
"""Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
name = name or "from_func-" + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
"""Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
if not any(blockdims):
return ()
non_trivial_dims = set([d for d in blockdims if len(d) > 1])
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if np.isnan(sum(map(sum, blockdims))):
raise ValueError(
"Arrays' chunk sizes (%s) are unknown.\n\n"
"A possible solution:\n"
" x.compute_chunk_sizes()" % blockdims
)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
This utility function is used within other common operations like
:func:`dask.array.core.map_blocks` and :func:`dask.array.core.blockwise`.
It is not commonly used by end-users directly.
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
>>> unify_chunks(0, None)
({}, [0])
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
if not args:
return {}, []
arginds = [
(asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)
] # [x, ij, y, jk]
warn = kwargs.get("warn", True)
arrays, inds = zip(*arginds)
if all(ind is None for ind in inds):
return {}, list(arrays)
if all(ind == inds[0] for ind in inds) and all(
a.chunks == arrays[0].chunks for a in arrays
):
return dict(zip(inds[0], arrays[0].chunks)), arrays
nameinds = []
blockdim_dict = dict()
max_parts = 0
for a, ind in arginds:
if ind is not None:
nameinds.append((a.name, ind))
blockdim_dict[a.name] = a.chunks
max_parts = max(max_parts, a.npartitions)
else:
nameinds.append((a, ind))
chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)
nparts = np.prod(list(map(len, chunkss.values())))
if warn and nparts and nparts >= max_parts * 10:
warnings.warn(
"Increasing number of chunks by factor of %d" % (nparts / max_parts),
PerformanceWarning,
stacklevel=3,
)
arrays = []
for a, i in arginds:
if i is None:
arrays.append(a)
else:
chunks = tuple(
chunkss[j]
if a.shape[n] > 1
else a.shape[n]
if not np.isnan(sum(chunkss[j]))
else None
for n, j in enumerate(i)
)
if chunks != a.chunks and all(a.chunks):
arrays.append(a.rechunk(chunks))
else:
arrays.append(a)
return chunkss, arrays
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def block(arrays, allow_unknown_chunksizes=False):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated along the last
dimension (-1), then these are concatenated along the second-last
dimension (-2), and so on until the outermost list is reached
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``block`` is equivalent to an ndarray
call. So ``block([[1, 2], [3, 4]])`` is equivalent to
``array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``block([[A, B, ...], [p, q, ...]])``.
"""
# This was copied almost verbatim from numpy.core.shape_base.block
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
def atleast_nd(x, ndim):
x = asanyarray(x)
diff = max(ndim - x.ndim, 0)
if diff == 0:
return x
else:
return x[(None,) * diff + (Ellipsis,)]
def format_index(index):
return "arrays" + "".join("[{}]".format(i) for i in index)
rec = _Recurser(recurse_if=lambda x: type(x) is list)
# ensure that the lists are all matched in depth
list_ndim = None
any_empty = False
for index, value, entering in rec.walk(arrays):
if type(value) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
"{} is a tuple. "
"Only lists can be used to arrange blocks, and np.block does "
"not allow implicit conversion from tuple to ndarray.".format(
format_index(index)
)
)
if not entering:
curr_depth = len(index)
elif len(value) == 0:
curr_depth = len(index) + 1
any_empty = True
else:
continue
if list_ndim is not None and list_ndim != curr_depth:
raise ValueError(
"List depths are mismatched. First element was at depth {}, "
"but there is an element at depth {} ({})".format(
list_ndim, curr_depth, format_index(index)
)
)
list_ndim = curr_depth
# do this here so we catch depth mismatches first
if any_empty:
raise ValueError("Lists cannot be empty")
# convert all the arrays to ndarrays
arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)
# determine the maximum dimension of the elements
elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)
ndim = max(list_ndim, elem_ndim)
# first axis to concatenate along
first_axis = ndim - list_ndim
# Make all the elements the same dimension
arrays = rec.map_reduce(
arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list
)
# concatenate innermost lists on the right, outermost on the left
return rec.map_reduce(
arrays,
f_reduce=lambda xs, axis: concatenate(
list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes
),
f_kwargs=lambda axis: dict(axis=(axis + 1)),
axis=first_axis,
)
def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
from . import wrap
seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]
if not seq:
raise ValueError("Need array(s) to concatenate")
seq_metas = [meta_from_array(s) for s in seq]
_concatenate = concatenate_lookup.dispatch(
type(max(seq_metas, key=lambda x: getattr(x, "__array_priority__", 0)))
)
meta = _concatenate(seq_metas, axis=axis)
# Promote types to match meta
seq = [a.astype(meta.dtype) for a in seq]
# Find output array shape
ndim = len(seq[0].shape)
shape = tuple(
sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]
for i in range(ndim)
)
# Drop empty arrays
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = (
"Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d"
)
raise ValueError(msg % (ndim, axis))
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
elif n == 1:
return seq2[0]
if not allow_unknown_chunksizes and not all(
i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)
for i in range(ndim)
):
if any(map(np.isnan, seq2[0].shape)):
raise ValueError(
"Tried to concatenate arrays with unknown"
" shape %s.\n\nTwo solutions:\n"
" 1. Force concatenation pass"
" allow_unknown_chunksizes=True.\n"
" 2. Compute shapes with "
"[x.compute_chunk_sizes() for x in seq]" % str(seq2[0].shape)
)
raise ValueError("Shapes do not align: %s", [x.shape for x in seq2])
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq2, inds)))
_, seq2 = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq2]
chunks = (
seq2[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq2[0].chunks[axis + 1 :]
)
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))
names = [a.name for a in seq2]
name = "concatenate-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [
(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1 : axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[axis + 2 :]
for key in keys
]
dsk = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def load_store_chunk(x, out, index, lock, return_stored, load_stored):
"""
A function inserted in a Dask graph for storing a chunk.
Parameters
----------
x: array-like
An array (potentially a NumPy one)
out: array-like
Where to store results too.
index: slice-like
Where to store result from ``x`` in ``out``.
lock: Lock-like or False
Lock to use before writing to ``out``.
return_stored: bool
Whether to return ``out``.
load_stored: bool
Whether to return the array stored in ``out``.
Ignored if ``return_stored`` is not ``True``.
Examples
--------
>>> a = np.ones((5, 6))
>>> b = np.empty(a.shape)
>>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)
"""
result = None
if return_stored and not load_stored:
result = out
if lock:
lock.acquire()
try:
if x is not None:
if is_arraylike(x):
out[index] = x
else:
out[index] = np.asanyarray(x)
if return_stored and load_stored:
result = out[index]
finally:
if lock:
lock.release()
return result
def store_chunk(x, out, index, lock, return_stored):
return load_store_chunk(x, out, index, lock, return_stored, False)
def load_chunk(out, index, lock):
return load_store_chunk(None, out, index, lock, True, True)
def insert_to_ooc(
arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None
):
"""
Creates a Dask graph for storing chunks from ``arr`` in ``out``.
Parameters
----------
arr: da.Array
A dask array
out: array-like
Where to store results too.
lock: Lock-like or bool, optional
Whether to lock or with what (default is ``True``,
which means a :class:`threading.Lock` instance).
region: slice-like, optional
Where in ``out`` to store ``arr``'s results
(default is ``None``, meaning all of ``out``).
return_stored: bool, optional
Whether to return ``out``
(default is ``False``, meaning ``None`` is returned).
load_stored: bool, optional
Whether to handling loading from ``out`` at the same time.
Ignored if ``return_stored`` is not ``True``.
(default is ``False``, meaning defer to ``return_stored``).
tok: str, optional
Token to use when naming keys
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> insert_to_ooc(d, a) # doctest: +SKIP
"""
if lock is True:
lock = Lock()
slices = slices_from_chunks(arr.chunks)
if region:
slices = [fuse_slice(region, slc) for slc in slices]
name = "store-%s" % (tok or str(uuid.uuid1()))
func = store_chunk
args = ()
if return_stored and load_stored:
name = "load-%s" % name
func = load_store_chunk
args = args + (load_stored,)
dsk = {
(name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args
for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)
}
return dsk
def retrieve_from_ooc(keys, dsk_pre, dsk_post=None):
"""
Creates a Dask graph for loading stored ``keys`` from ``dsk``.
Parameters
----------
keys: Sequence
A sequence containing Dask graph keys to load
dsk_pre: Mapping
A Dask graph corresponding to a Dask Array before computation
dsk_post: Mapping, optional
A Dask graph corresponding to a Dask Array after computation
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> g = insert_to_ooc(d, a)
>>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP
"""
if not dsk_post:
dsk_post = {k: k for k in keys}
load_dsk = {
("load-" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]
for k in keys
}
return load_dsk
def asarray(a, allow_unknown_chunksizes=False, **kwargs):
"""Convert the input to a dask array.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a, allow_unknown_chunksizes=allow_unknown_chunksizes)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asarray(a)
return from_array(a, getitem=getter_inline, **kwargs)
def asanyarray(a):
"""Convert the input to a dask array.
Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asanyarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asanyarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asanyarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asanyarray(a)
return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
# the second half of shape_condition is essentially just to ensure that
# dask series / frame are treated as scalars in elemwise.
maybe_shape = getattr(arg, "shape", None)
shape_condition = not isinstance(maybe_shape, Iterable) or any(
is_dask_collection(x) for x in maybe_shape
)
return (
np.isscalar(arg)
or shape_condition
or isinstance(arg, np.dtype)
or (isinstance(arg, np.ndarray) and arg.ndim == 0)
)
def broadcast_shapes(*shapes):
"""
Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):
if np.isnan(sizes).any():
dim = np.nan
else:
dim = 0 if 0 in sizes else np.max(sizes)
if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):
raise ValueError(
"operands could not be broadcast together with "
"shapes {0}".format(" ".join(map(str, shapes)))
)
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, **kwargs):
"""Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See Also
--------
blockwise
"""
out = kwargs.pop("out", None)
if not set(["name", "dtype"]).issuperset(kwargs):
msg = "%s does not take the following keyword arguments %s"
raise TypeError(
msg % (op.__name__, str(sorted(set(kwargs) - set(["name", "dtype"]))))
)
args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]
shapes = []
for arg in args:
shape = getattr(arg, "shape", ())
if any(is_dask_collection(x) for x in shape):
# Want to exclude Delayed shapes and dd.Scalar
shape = ()
shapes.append(shape)
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(
broadcast_shapes(*shapes)
) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
need_enforce_dtype = False
if "dtype" in kwargs:
dt = kwargs["dtype"]
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations. Instead, we treat
# them just like other arrays, and if necessary cast the result of op
# to match.
vals = [
np.empty((1,) * max(1, a.ndim), dtype=a.dtype)
if not is_scalar_for_elemwise(a)
else a
for a in args
]
try:
dt = apply_infer_dtype(op, vals, {}, "elemwise", suggest_dtype=False)
except Exception:
return NotImplemented
need_enforce_dtype = any(
not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args
)
name = kwargs.get("name", None) or "%s-%s" % (funcname(op), tokenize(op, dt, *args))
blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip("_"))
if need_enforce_dtype:
blockwise_kwargs["enforce_dtype"] = dt
blockwise_kwargs["enforce_dtype_function"] = op
op = _enforce_dtype
result = blockwise(
op,
expr_inds,
*concat(
(a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)
for a in args
),
**blockwise_kwargs,
)
return handle_out(out, result)
def handle_out(out, result):
"""Handle out parameters
If out is a dask.array then this overwrites the contents of that array with
the result
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
if isinstance(out, Array):
if out.shape != result.shape:
raise ValueError(
"Mismatched shapes between result and out parameter. "
"out=%s, result=%s" % (str(out.shape), str(result.shape))
)
out._chunks = result.chunks
out.dask = result.dask
out._meta = result._meta
out._name = result.name
elif out is not None:
msg = (
"The out parameter is not fully supported."
" Received type %s, expected Dask Array" % type(out).__name__
)
raise NotImplementedError(msg)
else:
return result
def _enforce_dtype(*args, **kwargs):
"""Calls a function and converts its result to the given dtype.
The parameters have deliberately been given unwieldy names to avoid
clashes with keyword arguments consumed by blockwise
A dtype of `object` is treated as a special case and not enforced,
because it is used as a dummy value in some places when the result will
not be a block in an Array.
Parameters
----------
enforce_dtype : dtype
Result dtype
enforce_dtype_function : callable
The wrapped function, which will be passed the remaining arguments
"""
dtype = kwargs.pop("enforce_dtype")
function = kwargs.pop("enforce_dtype_function")
result = function(*args, **kwargs)
if hasattr(result, "dtype") and dtype != result.dtype and dtype != object:
if not np.can_cast(result, dtype, casting="same_kind"):
raise ValueError(
"Inferred dtype from function %r was %r "
"but got %r, which can't be cast using "
"casting='same_kind'"
% (funcname(function), str(dtype), str(result.dtype))
)
if np.isscalar(result):
# scalar astype method doesn't take the keyword arguments, so
# have to convert via 0-dimensional array and back.
result = result.astype(dtype)
else:
try:
result = result.astype(dtype, copy=False)
except TypeError:
# Missing copy kwarg
result = result.astype(dtype)
return result
def broadcast_to(x, shape, chunks=None, meta=None):
"""Broadcast an array to a new shape.
Parameters
----------
x : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
chunks : tuple, optional
If provided, then the result will use these chunks instead of the same
chunks as the source array. Setting chunks explicitly as part of
broadcast_to is more efficient than rechunking afterwards. Chunks are
only allowed to differ from the original shape along dimensions that
are new on the result or have size 1 the input array.
meta : empty ndarray
empty ndarray created with same NumPy backend, ndim and dtype as the
Dask Array being created (overrides dtype)
Returns
-------
broadcast : dask array
See Also
--------
:func:`numpy.broadcast_to`
"""
x = asarray(x)
shape = tuple(shape)
if meta is None:
meta = meta_from_array(x)
if x.shape == shape and (chunks is None or chunks == x.chunks):
return x
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(
new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1
):
raise ValueError("cannot broadcast shape %s to shape %s" % (x.shape, shape))
if chunks is None:
chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(
bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])
)
else:
chunks = normalize_chunks(
chunks, shape, dtype=x.dtype, previous_chunks=x.chunks
)
for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):
if old_bd != new_bd and old_bd != (1,):
raise ValueError(
"cannot broadcast chunks %s to chunks %s: "
"new chunks must either be along a new "
"dimension or a dimension of size 1" % (x.chunks, chunks)
)
name = "broadcast_to-" + tokenize(x, shape, chunks)
dsk = {}
enumerated_chunks = product(*(enumerate(bds) for bds in chunks))
for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):
old_index = tuple(
0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])
)
old_key = (x.name,) + old_index
new_key = (name,) + new_index
dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, dtype=x.dtype, meta=meta)
@derived_from(np)
def broadcast_arrays(*args, **kwargs):
subok = bool(kwargs.pop("subok", False))
to_array = asanyarray if subok else asarray
args = tuple(to_array(e) for e in args)
if kwargs:
raise TypeError("unsupported keyword argument(s) provided")
# Unify uneven chunking
inds = [list(reversed(range(x.ndim))) for x in args]
uc_args = concat(zip(args, inds))
_, args = unify_chunks(*uc_args, warn=False)
shape = broadcast_shapes(*(e.shape for e in args))
chunks = broadcast_chunks(*(e.chunks for e in args))
result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]
return result
def offset_func(func, offset, *args):
"""Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = "offset_" + func.__name__
return _offset
def chunks_from_arrays(arrays):
"""Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
"""First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def shapelist(a):
"""Get the shape of nested list"""
if type(a) is list:
return tuple([len(a)] + list(shapelist(a[0])))
else:
return ()
def reshapelist(shape, seq):
"""Reshape iterator to nested shape
>>> reshapelist((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshapelist(shape[1:], part) for part in partition(n, seq)]
def transposelist(arrays, axes, extradims=0):
"""Permute axes of nested list
>>> transposelist([[1,1,1],[1,1,1]], [2,1])
[[[1, 1], [1, 1], [1, 1]]]
>>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)
[[[[1], [1]], [[1], [1]], [[1], [1]]]]
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if extradims < 0:
raise ValueError("`newdims` should be positive")
if len(axes) > len(set(axes)):
raise ValueError("`axes` should be unique")
ndim = max(axes) + 1
shape = shapelist(arrays)
newshape = [
shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)
]
result = list(core.flatten(arrays))
return reshapelist(newshape, result)
def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}.".format(seq[0].shape, idx[0] + 1, idx[1].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def concatenate3(arrays):
"""Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
from .utils import IS_NEP18_ACTIVE
# We need this as __array_function__ may not exist on older NumPy versions.
# And to reduce verbosity.
NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, "__array_function__", None)
arrays = concrete(arrays)
if not arrays:
return np.empty(0)
advanced = max(
core.flatten(arrays, container=(list, tuple)),
key=lambda x: getattr(x, "__array_priority__", 0),
)
if IS_NEP18_ACTIVE and not all(
NDARRAY_ARRAY_FUNCTION
is getattr(type(arr), "__array_function__", NDARRAY_ARRAY_FUNCTION)
for arr in core.flatten(arrays, container=(list, tuple))
):
try:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=tuple(range(x.ndim)))
except TypeError:
pass
if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=list(range(x.ndim)))
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(
slices_from_chunks(chunks), core.flatten(arrays, container=(list, tuple))
):
if hasattr(arr, "ndim"):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
"""Recursively call np.concatenate along axes"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))
return concatenate3(transposelist(arrays, axes, extradims=extradims))
def to_hdf5(filename, *args, **kwargs):
"""Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
chunks = kwargs.pop("chunks", True)
import h5py
with h5py.File(filename, mode="a") as f:
dsets = [
f.require_dataset(
dp,
shape=x.shape,
dtype=x.dtype,
chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,
**kwargs,
)
for dp, x in data.items()
]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
"""Point wise indexing with broadcasting.
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = replace_ellipsis(x.ndim, indexes)
nonfancy_indexes = []
reduced_indexes = []
for i, ind in enumerate(indexes):
if isinstance(ind, Number):
nonfancy_indexes.append(ind)
elif isinstance(ind, slice):
nonfancy_indexes.append(ind)
reduced_indexes.append(slice(None))
else:
nonfancy_indexes.append(slice(None))
reduced_indexes.append(ind)
nonfancy_indexes = tuple(nonfancy_indexes)
reduced_indexes = tuple(reduced_indexes)
x = x[nonfancy_indexes]
array_indexes = {}
for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):
if not isinstance(ind, slice):
ind = np.array(ind, copy=True)
if ind.dtype.kind == "b":
raise IndexError("vindex does not support indexing with boolean arrays")
if ((ind >= size) | (ind < -size)).any():
raise IndexError(
"vindex key has entries out of bounds for "
"indexing along axis %s of size %s: %r" % (i, size, ind)
)
ind %= size
array_indexes[i] = ind
if array_indexes:
x = _vindex_array(x, array_indexes)
return x
def _vindex_array(x, dict_indexes):
"""Point wise indexing with only NumPy Arrays."""
try:
broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())
except ValueError as e:
# note: error message exactly matches numpy
shapes_str = " ".join(str(a.shape) for a in dict_indexes.values())
raise IndexError(
"shape mismatch: indexing arrays could not be "
"broadcast together with shapes " + shapes_str
) from e
broadcast_shape = broadcast_indexes[0].shape
lookup = dict(zip(dict_indexes, broadcast_indexes))
flat_indexes = [
lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)
]
flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))
flat_indexes = [
list(index) if index is not None else index for index in flat_indexes
]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]
axis = _get_axis(flat_indexes)
token = tokenize(x, flat_indexes)
out_name = "vindex-merge-" + token
points = list()
for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):
block_idx = [bisect(b, ind) - 1 for b, ind in zip(bounds2, idx)]
inblock_idx = [
ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))
]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else (0,))
chunks = tuple(chunks)
if points:
per_block = groupby(1, points)
per_block = dict((k, v) for k, v in per_block.items() if v)
other_blocks = list(
product(
*[
list(range(len(c))) if i is None else [None]
for i, c in zip(flat_indexes, x.chunks)
]
)
)
full_slices = [slice(None, None) if i is None else None for i in flat_indexes]
name = "vindex-slice-" + token
vindex_merge_name = "vindex-merge-" + token
dsk = {}
for okey in other_blocks:
for i, key in enumerate(per_block):
dsk[keyname(name, i, okey)] = (
_vindex_transpose,
(
_vindex_slice,
(x.name,) + interleave_none(okey, key),
interleave_none(
full_slices, list(zip(*pluck(2, per_block[key])))
),
),
axis,
)
dsk[keyname(vindex_merge_name, 0, okey)] = (
_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))],
)
result_1d = Array(
HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),
out_name,
chunks,
x.dtype,
meta=x._meta,
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
# output has a zero dimension, just create a new zero-shape array with the
# same dtype
from .wrap import empty
result_1d = empty(
tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
def _get_axis(indexes):
"""Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
"""Pull out point-wise slices from block"""
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
"""Rotate block so that points are on the first dimension"""
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
from .utils import empty_like_safe
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = empty_like_safe(values[0], dtype=dtype, shape=shape)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
def to_npy_stack(dirname, x, axis=0):
"""Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis::
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``info`` file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the :func:`dask.array.from_npy_stack` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.mkdir(dirname)
meta = {"chunks": chunks, "dtype": x.dtype, "axis": axis}
with open(os.path.join(dirname, "info"), "wb") as f:
pickle.dump(meta, f)
name = "to-npy-stack-" + str(uuid.uuid1())
dsk = {
(name, i): (np.save, os.path.join(dirname, "%d.npy" % i), key)
for i, key in enumerate(core.flatten(xx.__dask_keys__()))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])
compute_as_if_collection(Array, graph, list(dsk))
def from_npy_stack(dirname, mmap_mode="r"):
"""Load dask array from stack of npy files
See :func:`dask.array.to_npy_stack` for docstring.
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
"""
with open(os.path.join(dirname, "info"), "rb") as f:
info = pickle.load(f)
dtype = info["dtype"]
chunks = info["chunks"]
axis = info["axis"]
name = "from-npy-stack-%s" % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [
(np.load, os.path.join(dirname, "%d.npy" % i), mmap_mode)
for i in range(len(chunks[axis]))
]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
def new_da_object(dsk, name, chunks, meta=None, dtype=None):
"""Generic constructor for dask.array or dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
from ..dataframe.core import new_dd_object
assert all(len(c) == 1 for c in chunks[1:])
divisions = [None] * (len(chunks[0]) + 1)
return new_dd_object(dsk, name, meta, divisions)
else:
return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype)
from .blockwise import blockwise
from .utils import compute_meta, meta_from_array
| 32.45854 | 112 | 0.585021 |
82b41f0d0aec99e3285e4c680406a60350bde755 | 610 | py | Python | old/wolframaplah_query_simple.py | gray123/google_echo | 3d6a94a6e5d04b92296b0ad05972cfd06f266574 | [
"Apache-2.0"
] | null | null | null | old/wolframaplah_query_simple.py | gray123/google_echo | 3d6a94a6e5d04b92296b0ad05972cfd06f266574 | [
"Apache-2.0"
] | null | null | null | old/wolframaplah_query_simple.py | gray123/google_echo | 3d6a94a6e5d04b92296b0ad05972cfd06f266574 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys, urllib, urllib2, json
if len(sys.argv) != 2:
usage='Usage: '+sys.argv[0]+' question_within_double_quotation_mark'
print usage
else:
querystr=urllib.quote_plus(sys.argv[1]);
API_URL='http://api.wolframalpha.com/v2/query?input='+querystr+'&appid=YV4XG4-KYWKHRV526&podindex=1&output=json'
res=json.load(urllib2.urlopen(API_URL))
if res[u"queryresult"][u"success"]:
try:
print res[u"queryresult"][u"pods"][0][u"subpods"][0][u"plaintext"]
except ValueError:
print 'I cannot find an answer for that'
else:
print 'I cannot find an answer for that'
| 33.888889 | 114 | 0.701639 |
49f30d4dd1c9fe3e8906c6ef32676890adc329fe | 20,157 | py | Python | tests/test_string.py | amitwer/assertpy | d8f222ed8ccbf2c64475111134db555245831e48 | [
"BSD-3-Clause"
] | null | null | null | tests/test_string.py | amitwer/assertpy | d8f222ed8ccbf2c64475111134db555245831e48 | [
"BSD-3-Clause"
] | null | null | null | tests/test_string.py | amitwer/assertpy | d8f222ed8ccbf2c64475111134db555245831e48 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015-2018, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from assertpy import assert_that,fail
import sys
if sys.version_info[0] == 3:
unicode = str
else:
unicode = unicode
def test_is_length():
assert_that('foo').is_length(3)
def test_is_length_failure():
try:
assert_that('foo').is_length(4)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to be of length <4>, but was <3>.')
def test_contains():
assert_that('foo').contains('f')
assert_that('foo').contains('o')
assert_that('foo').contains('fo','o')
assert_that('fred').contains('d')
assert_that('fred').contains('fr','e','d')
def test_contains_single_item_failure():
try:
assert_that('foo').contains('x')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to contain item <x>, but did not.')
def test_contains_multi_item_failure():
try:
assert_that('foo').contains('f','x','z')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <foo> to contain items <'f', 'x', 'z'>, but did not contain <'x', 'z'>.")
def test_contains_multi_item_single_failure():
try:
assert_that('foo').contains('f','o', 'x')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <foo> to contain items <'f', 'o', 'x'>, but did not contain <x>.")
def test_contains_ignoring_case():
assert_that('foo').contains_ignoring_case('f')
assert_that('foo').contains_ignoring_case('F')
assert_that('foo').contains_ignoring_case('Oo')
assert_that('foo').contains_ignoring_case('f', 'o', 'F', 'O', 'Fo', 'Oo', 'FoO')
def test_contains_ignoring_case_type_failure():
try:
assert_that(123).contains_ignoring_case('f')
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string or iterable')
def test_contains_ignoring_case_missinge_item_failure():
try:
assert_that('foo').contains_ignoring_case()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('one or more args must be given')
def test_contains_ignoring_case_single_item_failure():
try:
assert_that('foo').contains_ignoring_case('X')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to case-insensitive contain item <X>, but did not.')
def test_contains_ignoring_case_single_item_type_failure():
try:
assert_that('foo').contains_ignoring_case(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be a string')
def test_contains_ignoring_case_multi_item_failure():
try:
assert_that('foo').contains_ignoring_case('F','X','Z')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <foo> to case-insensitive contain items <'F', 'X', 'Z'>, but did not contain <'X', 'Z'>.")
def test_contains_ignoring_case_multi_item_type_failure():
try:
assert_that('foo').contains_ignoring_case('F', 12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given args must all be strings')
def test_contains_ignoring_case_list():
assert_that(['foo']).contains_ignoring_case('Foo')
assert_that(['foo', 'bar', 'baz']).contains_ignoring_case('Foo')
assert_that(['foo', 'bar', 'baz']).contains_ignoring_case('Foo', 'bAr')
assert_that(['foo', 'bar', 'baz']).contains_ignoring_case('Foo', 'bAr', 'baZ')
def test_contains_ignoring_case_list_elem_type_failure():
try:
assert_that([123]).contains_ignoring_case('f')
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val items must all be strings')
def test_contains_ignoring_case_list_multi_elem_type_failure():
try:
assert_that(['foo', 123]).contains_ignoring_case('f')
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val items must all be strings')
def test_contains_ignoring_case_list_missinge_item_failure():
try:
assert_that(['foo']).contains_ignoring_case()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('one or more args must be given')
def test_contains_ignoring_case_list_single_item_failure():
try:
assert_that(['foo']).contains_ignoring_case('X')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <['foo']> to case-insensitive contain items <X>, but did not contain <X>.")
def test_contains_ignoring_case_list_single_item_type_failure():
try:
assert_that(['foo']).contains_ignoring_case(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given args must all be strings')
def test_contains_ignoring_case_list_multi_item_failure():
try:
assert_that(['foo','bar']).contains_ignoring_case('Foo','X','Y')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <['foo', 'bar']> to case-insensitive contain items <'Foo', 'X', 'Y'>, but did not contain <'X', 'Y'>.")
def test_contains_ignoring_case_list_multi_item_type_failure():
try:
assert_that(['foo','bar']).contains_ignoring_case('F', 12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given args must all be strings')
def test_does_not_contain():
assert_that('foo').does_not_contain('x')
assert_that('foo').does_not_contain('x','y')
def test_does_not_contain_single_item_failure():
try:
assert_that('foo').does_not_contain('f')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to not contain item <f>, but did.')
def test_does_not_contain_list_item_failure():
try:
assert_that('foo').does_not_contain('x','y','f')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <foo> to not contain items <'x', 'y', 'f'>, but did contain <f>.")
def test_does_not_contain_list_multi_item_failure():
try:
assert_that('foo').does_not_contain('x','f','o')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to("Expected <foo> to not contain items <'x', 'f', 'o'>, but did contain <'f', 'o'>.")
def test_is_empty():
assert_that('').is_empty()
def test_is_empty_failure():
try:
assert_that('foo').is_empty()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to be empty string, but was not.')
def test_is_not_empty():
assert_that('foo').is_not_empty()
def test_is_not_empty_failure():
try:
assert_that('').is_not_empty()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected not empty string, but was empty.')
def test_is_equal_ignoring_case():
assert_that('FOO').is_equal_to_ignoring_case('foo')
assert_that('foo').is_equal_to_ignoring_case('FOO')
assert_that('fOO').is_equal_to_ignoring_case('foo')
def test_is_equal_ignoring_case_failure():
try:
assert_that('foo').is_equal_to_ignoring_case('bar')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to be case-insensitive equal to <bar>, but was not.')
def test_is_equal_ignoring_case_bad_value_type_failure():
try:
assert_that(123).is_equal_to_ignoring_case(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_is_equal_ignoring_case_bad_arg_type_failure():
try:
assert_that('fred').is_equal_to_ignoring_case(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be a string')
def test_starts_with():
assert_that('fred').starts_with('f')
assert_that('fred').starts_with('fr')
assert_that('fred').starts_with('fred')
def test_starts_with_failure():
try:
assert_that('fred').starts_with('bar')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <fred> to start with <bar>, but did not.')
def test_starts_with_bad_value_type_failure():
try:
assert_that(123).starts_with(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string or iterable')
def test_starts_with_bad_arg_none_failure():
try:
assert_that('fred').starts_with(None)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given prefix arg must not be none')
def test_starts_with_bad_arg_type_failure():
try:
assert_that('fred').starts_with(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given prefix arg must be a string')
def test_starts_with_bad_arg_empty_failure():
try:
assert_that('fred').starts_with('')
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('given prefix arg must not be empty')
def test_ends_with():
assert_that('fred').ends_with('d')
assert_that('fred').ends_with('ed')
assert_that('fred').ends_with('fred')
def test_ends_with_failure():
try:
assert_that('fred').ends_with('bar')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <fred> to end with <bar>, but did not.')
def test_ends_with_bad_value_type_failure():
try:
assert_that(123).ends_with(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string or iterable')
def test_ends_with_bad_arg_none_failure():
try:
assert_that('fred').ends_with(None)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given suffix arg must not be none')
def test_ends_with_bad_arg_type_failure():
try:
assert_that('fred').ends_with(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given suffix arg must be a string')
def test_ends_with_bad_arg_empty_failure():
try:
assert_that('fred').ends_with('')
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('given suffix arg must not be empty')
def test_matches():
assert_that('fred').matches(r'\w')
assert_that('fred').matches(r'\w{2}')
assert_that('fred').matches(r'\w+')
assert_that('fred').matches(r'^\w{4}$')
assert_that('fred').matches(r'^.*?$')
assert_that('123-456-7890').matches(r'\d{3}-\d{3}-\d{4}')
def test_matches_failure():
try:
assert_that('fred').matches(r'\d+')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <fred> to match pattern <\\d+>, but did not.')
def test_matches_bad_value_type_failure():
try:
assert_that(123).matches(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_matches_bad_arg_type_failure():
try:
assert_that('fred').matches(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given pattern arg must be a string')
def test_matches_bad_arg_empty_failure():
try:
assert_that('fred').matches('')
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('given pattern arg must not be empty')
def test_does_not_match():
assert_that('fred').does_not_match(r'\d+')
assert_that('fred').does_not_match(r'\w{5}')
assert_that('123-456-7890').does_not_match(r'^\d+$')
def test_does_not_match_failure():
try:
assert_that('fred').does_not_match(r'\w+')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <fred> to not match pattern <\\w+>, but did.')
def test_does_not_match_bad_value_type_failure():
try:
assert_that(123).does_not_match(12)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_does_not_match_bad_arg_type_failure():
try:
assert_that('fred').does_not_match(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given pattern arg must be a string')
def test_does_not_match_bad_arg_empty_failure():
try:
assert_that('fred').does_not_match('')
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('given pattern arg must not be empty')
def test_is_alpha():
assert_that('foo').is_alpha()
def test_is_alpha_digit_failure():
try:
assert_that('foo123').is_alpha()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo123> to contain only alphabetic chars, but did not.')
def test_is_alpha_space_failure():
try:
assert_that('foo bar').is_alpha()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo bar> to contain only alphabetic chars, but did not.')
def test_is_alpha_punctuation_failure():
try:
assert_that('foo,bar').is_alpha()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo,bar> to contain only alphabetic chars, but did not.')
def test_is_alpha_bad_value_type_failure():
try:
assert_that(123).is_alpha()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_is_alpha_empty_value_failure():
try:
assert_that('').is_alpha()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('val is empty')
def test_is_digit():
assert_that('123').is_digit()
def test_is_digit_alpha_failure():
try:
assert_that('foo123').is_digit()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo123> to contain only digits, but did not.')
def test_is_digit_space_failure():
try:
assert_that('1 000 000').is_digit()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <1 000 000> to contain only digits, but did not.')
def test_is_digit_punctuation_failure():
try:
assert_that('-123').is_digit()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <-123> to contain only digits, but did not.')
def test_is_digit_bad_value_type_failure():
try:
assert_that(123).is_digit()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_is_digit_empty_value_failure():
try:
assert_that('').is_digit()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('val is empty')
def test_is_lower():
assert_that('foo').is_lower()
assert_that('foo 123').is_lower()
assert_that('123 456').is_lower()
def test_is_lower_failure():
try:
assert_that('FOO').is_lower()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <FOO> to contain only lowercase chars, but did not.')
def test_is_lower_bad_value_type_failure():
try:
assert_that(123).is_lower()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_is_lower_empty_value_failure():
try:
assert_that('').is_lower()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('val is empty')
def test_is_upper():
assert_that('FOO').is_upper()
assert_that('FOO 123').is_upper()
assert_that('123 456').is_upper()
def test_is_upper_failure():
try:
assert_that('foo').is_upper()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <foo> to contain only uppercase chars, but did not.')
def test_is_upper_bad_value_type_failure():
try:
assert_that(123).is_upper()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a string')
def test_is_upper_empty_value_failure():
try:
assert_that('').is_upper()
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('val is empty')
def test_is_unicode():
assert_that(unicode('unicorn')).is_unicode()
assert_that(unicode('unicorn 123')).is_unicode()
assert_that(unicode('unicorn')).is_unicode()
def test_is_unicode_failure():
try:
assert_that(123).is_unicode()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <123> to be unicode, but was <int>.')
def test_chaining():
assert_that('foo').is_type_of(str).is_length(3).contains('f').does_not_contain('x')
assert_that('fred').starts_with('f').ends_with('d').matches(r'^f.*?d$').does_not_match(r'\d')
| 37.053309 | 154 | 0.68661 |
8c04d70689518e38a9ae46beccd2410e8c026db0 | 6,507 | bzl | Python | setup.bzl | clintharrison/kythe | 9d690f19054e4627705da0f7c361174cbc39cb93 | [
"Apache-2.0"
] | null | null | null | setup.bzl | clintharrison/kythe | 9d690f19054e4627705da0f7c361174cbc39cb93 | [
"Apache-2.0"
] | null | null | null | setup.bzl | clintharrison/kythe | 9d690f19054e4627705da0f7c361174cbc39cb93 | [
"Apache-2.0"
] | null | null | null | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
def github_archive(name, repo_name, commit, kind = "zip", strip_prefix = "", **kwargs):
"""Defines a GitHub commit-based repository rule."""
project = repo_name[repo_name.index("/"):]
http_archive(
name = name,
strip_prefix = "{project}-{commit}/{prefix}".format(
project = project,
commit = commit,
prefix = strip_prefix,
),
urls = [u.format(commit = commit, repo_name = repo_name, kind = kind) for u in [
"https://mirror.bazel.build/github.com/{repo_name}/archive/{commit}.{kind}",
"https://github.com/{repo_name}/archive/{commit}.{kind}",
]],
**kwargs
)
def kythe_rule_repositories():
"""Defines external repositories for Kythe Bazel rules.
These repositories must be loaded before calling external.bzl%kythe_dependencies.
"""
maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
)
maybe(
http_archive,
name = "io_bazel_rules_go",
sha256 = "6f111c57fd50baf5b8ee9d63024874dd2a014b069426156c55adbf6d3d22cb7b",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.25.0/rules_go-v0.25.0.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.25.0/rules_go-v0.25.0.tar.gz",
],
)
maybe(
http_archive,
name = "rules_cc",
sha256 = "29daf0159f0cf552fcff60b49d8bcd4f08f08506d2da6e41b07058ec50cfeaec",
strip_prefix = "rules_cc-b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e.tar.gz",
"https://github.com/bazelbuild/rules_cc/archive/b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e.tar.gz",
],
)
maybe(
http_archive,
name = "rules_java",
url = "https://github.com/bazelbuild/rules_java/archive/973a93dd2d698929264d1028836f6b9cc60ff817.zip",
sha256 = "a6cb0dbe343b67c7d4f3f11a68e327acdfc71fee1e17affa4e605129fc56bb15",
strip_prefix = "rules_java-973a93dd2d698929264d1028836f6b9cc60ff817",
)
maybe(
http_archive,
name = "rules_proto",
sha256 = "e4fe70af52135d2ee592a07f916e6e1fc7c94cf8786c15e8c0d0f08b1fe5ea16",
strip_prefix = "rules_proto-97d8af4dc474595af3900dd85cb3a29ad28cc313",
url = "https://github.com/bazelbuild/rules_proto/archive/97d8af4dc474595af3900dd85cb3a29ad28cc313.zip",
)
maybe(
http_archive,
name = "bazel_gazelle",
sha256 = "b85f48fa105c4403326e9525ad2b2cc437babaa6e15a3fc0b1dbab0ab064bc7c",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.2/bazel-gazelle-v0.22.2.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.2/bazel-gazelle-v0.22.2.tar.gz",
],
)
maybe(
http_archive,
name = "build_bazel_rules_nodejs",
sha256 = "dd7ea7efda7655c218ca707f55c3e1b9c68055a70c31a98f264b3445bc8f4cb1",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/3.2.3/rules_nodejs-3.2.3.tar.gz"],
)
maybe(
http_archive,
name = "rules_jvm_external",
sha256 = "31701ad93dbfe544d597dbe62c9a1fdd76d81d8a9150c2bf1ecf928ecdf97169",
strip_prefix = "rules_jvm_external-4.0",
urls = ["https://github.com/bazelbuild/rules_jvm_external/archive/4.0.zip"],
)
maybe(
http_archive,
name = "rules_python",
sha256 = "e5470e92a18aa51830db99a4d9c492cc613761d5bdb7131c04bd92b9834380f6",
strip_prefix = "rules_python-4b84ad270387a7c439ebdccfd530e2339601ef27",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_python/archive/4b84ad270387a7c439ebdccfd530e2339601ef27.tar.gz",
"https://github.com/bazelbuild/rules_python/archive/4b84ad270387a7c439ebdccfd530e2339601ef27.tar.gz",
],
)
maybe(
http_archive,
name = "rules_rust",
sha256 = "f8c0132ea3855781d41ac574df0ca44959f17694d368c03c7cbaa5f29ef42d8b",
strip_prefix = "rules_rust-5bb12cc451317581452b5441692d57eb4310b839",
urls = [
"https://github.com/bazelbuild/rules_rust/archive/5bb12cc451317581452b5441692d57eb4310b839.tar.gz",
],
)
maybe(
http_archive,
name = "bazelruby_rules_ruby",
strip_prefix = "rules_ruby-0.4.1",
sha256 = "abfc2758cc379e0ff0eb9824e3b507c1633d4c8f99f24735aef63c7180be50f0",
urls = [
"https://github.com/bazelruby/rules_ruby/archive/v0.4.1.zip",
],
patches = [
"@io_kythe//third_party:rules_ruby_allow_empty.patch",
],
patch_args = ["-p1"],
)
maybe(
http_archive,
name = "rules_foreign_cc",
sha256 = "e60cfd0a8426fa4f5fd2156e768493ca62b87d125cb35e94c44e79a3f0d8635f",
strip_prefix = "rules_foreign_cc-0.2.0",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.2.0.zip",
)
# LLVM sticks the bazel configuration under a subdirectory, which expects to
# be the project root, so currently needs to be two distinct repositories
# from the same upstream source.
llvm_commit = "487f74a6c4151d13d3a7b54ee4ab7beaf3e87487"
llvm_sha256 = "cb8d1acf60e71894a692f273ab8c2a1fb6bd9e547de77cb9ee76829b2e429e7d"
maybe(
github_archive,
repo_name = "llvm/llvm-project",
commit = llvm_commit,
name = "llvm-project-raw",
build_file_content = "#empty",
sha256 = llvm_sha256,
)
maybe(
github_archive,
repo_name = "llvm/llvm-project",
commit = llvm_commit,
name = "llvm-bazel",
strip_prefix = "utils/bazel",
sha256 = llvm_sha256,
patch_args = ["-p2"],
patches = ["@io_kythe//third_party:llvm-bazel-glob.patch"],
)
| 39.198795 | 132 | 0.663132 |
dda2f2486451f01a09c8d44b2c01bda9c6d4176d | 391 | py | Python | PyLudus/wsgi.py | AAADevs/PyLudus | c74defec007d5699eba74b8c2c60bfebd1d41383 | [
"MIT"
] | 5 | 2021-02-17T10:47:26.000Z | 2021-03-04T12:31:08.000Z | PyLudus/wsgi.py | ccall48/PyLudus | 69f0770113b1df67e731897877aedbda1e663590 | [
"MIT"
] | 2 | 2021-02-24T22:39:07.000Z | 2021-03-14T13:21:36.000Z | PyLudus/wsgi.py | AAADevs/PyLudus | c74defec007d5699eba74b8c2c60bfebd1d41383 | [
"MIT"
] | 2 | 2021-02-16T01:06:50.000Z | 2021-05-26T11:12:47.000Z | """
WSGI config for PyLudus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PyLudus.settings")
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
ebb345d1f366cac4e0e95b49afe041067a84d482 | 3,845 | py | Python | qa/rpc-tests/maxblocksinflight.py | CHN-portal/CHN-Portal | 74c2a20f99add8fb45ce0722d1674496a3d28e7e | [
"MIT"
] | 1 | 2020-04-09T09:24:56.000Z | 2020-04-09T09:24:56.000Z | qa/rpc-tests/maxblocksinflight.py | CHN-portal/CHN-Portal | 74c2a20f99add8fb45ce0722d1674496a3d28e7e | [
"MIT"
] | 1 | 2019-12-14T01:05:46.000Z | 2019-12-14T18:24:42.000Z | qa/rpc-tests/maxblocksinflight.py | CHN-portal/CHN-Portal | 74c2a20f99add8fb45ce0722d1674496a3d28e7e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test nodes responses to having many blocks in flight.
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
self.log.info("Round %d: success (total requests: %d)" % (count, total_requests))
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "cbdhealthnetworkd"),
help="Binary to test max block requests behavior")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
# pass log handler through to the test manager object
test.log = self.log
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| 39.234694 | 110 | 0.63225 |
373a41af18e7f8c7a6deb0d85d416b5db0504556 | 3,094 | py | Python | examples/IFP.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | examples/IFP.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | examples/IFP.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
import gin
from pytorch_lightning.loggers.wandb import WandbLogger
import wandb
from disentanglement_lib.data.ground_truth.ground_truth_data import RandomAction
from disentanglement_lib.data.ground_truth.named_data import get_named_ground_truth_data
from disentanglement_lib.methods.shared.architectures import *
from disentanglement_lib.methods.unsupervised import train
from disentanglement_lib.methods.unsupervised import callbacks
import os
import pytorch_lightning as pl
from disentanglement_lib.methods.unsupervised.model import Regularizer
os.environ['WANDB_TAGS']='annealed_test'
os.environ['WANDB_PROJECT']='IFP'
@gin.configurable('annealed_test')
class AnnealedTest(Regularizer):
def __init__(self,
gamma = 10,
beta_max = 70,
stage_steps=gin.REQUIRED):
super().__init__()
self.stage_steps = stage_steps
self.gamma = gamma
self.beta_max = beta_max
def forward(self, data_batch, model, kl, z_mean, z_logvar, z_sampled):
"""Training compatible model function."""
global_step = model.global_step
k = global_step/self.stage_steps
beta = (np.exp(-self.gamma*k)*self.beta_max + 1)
model.summary['beta'] = beta
return beta * (kl.sum())
@gin.configurable('action')
def get_action(dataset,index=gin.REQUIRED):
return RandomAction(dataset,index)
parser = ArgumentParser()
parser.add_argument('--seed',type=int,default=99)
parser.add_argument('--dataset',type=str,default='dsprites_full')
parser.add_argument('--num_latent',type=int,default=1)
parser.add_argument('-s', '--steps', type=int, default=8000)
parser.add_argument('-g','--gamma',type=float,default=15)
if __name__ == "__main__":
args, unknown = parser.parse_known_args()
if len(unknown)==0:
seed= args.seed
steps = args.steps
gamma = args.gamma
bindings= [
"model.regularizers = [@annealed_test()]",
f"annealed_test.stage_steps={steps}",
f"annealed_test.gamma={gamma}",
f"model.seed={seed}",
f"dataset.name='{args.dataset}'",
f"model.num_latent={args.num_latent}"
]
gin.parse_config(bindings)
else:
unknown = [i.strip('--') for i in unknown] + ["model.regularizers = [@annealed_test()]"]
print(unknown)
gin.parse_config(unknown)
dataset = get_named_ground_truth_data()
action = get_action(dataset)
rs = np.random.RandomState(0)
w,h,c = dataset.observation_shape
pl_model = train.PLModel(input_shape=[c,w,h])
dl = torch.utils.data.DataLoader(train.Iterate(action), 64,num_workers=2,pin_memory=True)
logger = WandbLogger()
trainer = pl.Trainer(
logger,
# progress_bar_refresh_rate=500, # disable progress bar
max_steps=steps,
checkpoint_callback=False,
callbacks=[
callbacks.EarlyStop(),
callbacks.Visualization(2000),
],
gpus=1,)
trainer.fit(pl_model, dl)
wandb.join()
| 35.976744 | 96 | 0.676794 |
73b65d8e86234f10cea1d1d60ffaab805ff4f41f | 2,526 | py | Python | rconweb/api/auto_settings.py | FlorianSW/hll_rcon_tool | 26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88 | [
"MIT"
] | 49 | 2020-03-07T13:09:21.000Z | 2022-03-19T14:24:13.000Z | rconweb/api/auto_settings.py | FlorianSW/hll_rcon_tool | 26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88 | [
"MIT"
] | 48 | 2020-03-26T22:19:40.000Z | 2021-12-12T17:31:06.000Z | rconweb/api/auto_settings.py | FlorianSW/hll_rcon_tool | 26a37b07eaab34dfb5a6d10c0f02e0fcae51dd88 | [
"MIT"
] | 48 | 2020-03-03T09:44:36.000Z | 2022-03-18T07:33:39.000Z | import os, json
from django.views.decorators.csrf import csrf_exempt
from rcon.user_config import AutoSettingsConfig
from .auth import login_required, api_response
from .utils import _get_data
from .multi_servers import forward_request
from .services import get_supervisor_client
AUTO_SETTINGS_KEY_ORDER = ["always_apply_defaults", "defaults", "rules", "_available_commands", "_available_conditions"]
AUTO_SETTINGS_KEY_INDEX_MAP = {v: i for i, v in enumerate(AUTO_SETTINGS_KEY_ORDER)}
@csrf_exempt
@login_required
def get_auto_settings(request):
data = _get_data(request)
try:
server_number = int(data.get("server_number", os.getenv("SERVER_NUMBER")))
except ValueError:
return api_response(error="Invalid server number", command="get_auto_settings")
config = AutoSettingsConfig().get_settings()
ordered_config = {k: v for (k, v) in sorted(config.items(), key=lambda pair: AUTO_SETTINGS_KEY_INDEX_MAP[pair[0]])}
return api_response(
result=ordered_config,
command="get_auto_settings",
arguments=dict(server_number=server_number),
failed=False,
)
@csrf_exempt
@login_required
def set_auto_settings(request):
data = _get_data(request)
try:
server_number = int(data.get("server_number", os.getenv("SERVER_NUMBER")))
except ValueError:
return api_response(error="Invalid server number", failed=True, status_code=400)
do_restart_service = data.get("restart_service", True)
do_forward = data.get("forward", False)
settings = data.get("settings")
if not settings:
return api_response(error="No auto settings provided", command="set_auto_settings")
try:
# Check if valid JSON
settings = json.loads(settings)
except json.JSONDecodeError:
return api_response(error="No valid JSON provided", command="set_auto_settings")
config = AutoSettingsConfig()
config.set_settings(settings)
if do_restart_service:
client = get_supervisor_client()
process = client.supervisor.getProcessInfo('auto_settings')
if process['state'] == 20: # The process is running
client.supervisor.stopProcess('auto_settings')
client.supervisor.startProcess('auto_settings')
if do_forward:
forward_request(request)
return api_response(
result=settings,
command="set_auto_settings",
arguments=dict(server_number=server_number, restart_service=do_restart_service),
failed=False,
)
| 34.60274 | 120 | 0.719319 |
fe96039e1d54481f22bd231dcf303a02979b4f8f | 3,144 | py | Python | sct_custom/spinalcordtoolbox/scripts/sct_compute_mscc.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | 1 | 2021-09-07T08:52:21.000Z | 2021-09-07T08:52:21.000Z | sct_custom/spinalcordtoolbox/scripts/sct_compute_mscc.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | null | null | null | sct_custom/spinalcordtoolbox/scripts/sct_compute_mscc.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
#########################################################################################
#
# Compute maximum spinal cord compression.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Julien Cohen-Adad
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
from spinalcordtoolbox.utils import SCTArgumentParser, Metavar, init_sct, printv, set_global_loglevel
# PARSER
# ==========================================================================================
def get_parser():
parser = SCTArgumentParser(
description='Compute Maximum Spinal Cord Compression (MSCC) as in: Miyanji F, Furlan JC, Aarabi B, Arnold PM, '
'Fehlings MG. Acute cervical traumatic spinal cord injury: MR imaging findings correlated with '
'neurologic outcome--prospective study with 100 consecutive patients. Radiology 2007;243(3):820-'
'827.'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-di',
type=float,
required=True,
help='Anteroposterior cord distance (in mm) at the level of maximum injury. Example: 6.85',
metavar=Metavar.float,
)
mandatoryArguments.add_argument(
'-da',
type=float,
required=True,
help='Anteroposterior cord distance (in mm) at the nearest normal level above the level of injury.',
metavar=Metavar.float,
)
mandatoryArguments.add_argument(
'-db',
type=float,
required=True,
help='Anteroposterior cord distance (in mm) at the nearest normal level below the level of injury.',
metavar=Metavar.float,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
def mscc(di, da, db):
return (1 - float(di) / ((da + db) / float(2))) * 100
# MAIN
# ==========================================================================================
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# Get parser info
di = arguments.di
da = arguments.da
db = arguments.db
# Compute MSCC
MSCC = mscc(di, da, db)
# Display results
printv('\nMSCC = ' + str(MSCC) + '\n', verbose, 'info')
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| 32.081633 | 119 | 0.543575 |
57cdb5bbbc157eadb44484187405e3df64e290f9 | 417 | py | Python | Doxygen/index.py | tomkys144/WatchyBell | 75b57c961427d77ddfe4877bdb4919644bd44878 | [
"MIT"
] | null | null | null | Doxygen/index.py | tomkys144/WatchyBell | 75b57c961427d77ddfe4877bdb4919644bd44878 | [
"MIT"
] | null | null | null | Doxygen/index.py | tomkys144/WatchyBell | 75b57c961427d77ddfe4877bdb4919644bd44878 | [
"MIT"
] | null | null | null | import os
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, '../README.md'), 'r') as f:
readme = f.read()
f.close()
readme = readme.replace("WatchyBell", "%WatchyBell", 1)
readme = readme.replace("https://tomkys144.github.io/WatchyBell", "https://github.com/tomkys144/WatchyBell")
with open(os.path.join(dirname, "index.md"), 'w') as f_out:
f_out.write(readme)
f_out.close()
| 27.8 | 108 | 0.671463 |
1f746a95a4b15979bba4829997de8333653a5189 | 3,135 | py | Python | pyjsonnlp/annotation/relations.py | SemiringInc/Py-JSON-NLP | 7db98f5180f328dfa26418cb00a0a5c4ce8095a5 | [
"Apache-2.0"
] | null | null | null | pyjsonnlp/annotation/relations.py | SemiringInc/Py-JSON-NLP | 7db98f5180f328dfa26418cb00a0a5c4ce8095a5 | [
"Apache-2.0"
] | null | null | null | pyjsonnlp/annotation/relations.py | SemiringInc/Py-JSON-NLP | 7db98f5180f328dfa26418cb00a0a5c4ce8095a5 | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
from pyjsonnlp.annotation import Annotator
from pyjsonnlp.dependencies import UniversalDependencyParse
class RelationAnnotator(Annotator):
def annotate(self, nlp_json: OrderedDict) -> None:
for doc in nlp_json['documents'].values():
if 'relations' not in doc:
doc['relations'] = {}
r_id = len(doc['relations']) + 1
d = UniversalDependencyParse(doc['dependencies'][0], doc['tokenList'])
for s_id, sent in doc['sentences'].items():
if 'complex' not in sent:
raise BrokenPipeError('You must do clause extraction first!')
if not sent['complex']:
if sent.get('transitivity') == 'transitive':
doc['relations'][r_id] = self.build_relation(r_id=r_id,
predicate=sent['mainVerb'],
p_from=sent['subject'],
p_to=sent['object'])
elif sent.get('transitivity') == 'intransitive':
# these are attributes rather than relations (He died -> He is dead)
pass
elif sent.get('transitivity') == 'ditransitive':
pass
# the idea here is to combine the obj and iobj, but it needs more thought.
# trans_rel = self.build_relation(r_id=r_id, predicate=sent['mainVerb'],
# p_from=sent['subject'],
# p_to=sent['object'])
# intrans_rel = dict(trans_rel)
# doc['relations'][r_id] = trans_rel,
# r_id += 1
# intrans_rel['id'] = r_id,
# intrans_rel['predicate'].extend(intrans_rel['to'])
# intrans_rel['to'] = [t['id'] for t in d.get_leaves(sent['indirectObject'][0])]
# doc['relations'][r_id] = intrans_rel
r_id += 1
@staticmethod
def build_relation(r_id: int, predicate: dict, p_from: dict, p_to: dict) -> dict:
return {
'id': r_id,
'predicate': predicate,
'from': p_from,
'to': p_to,
}
class PresuppositionRelationAnnotator(Annotator):
def annotate(self, nlp_json: OrderedDict) -> None:
for doc in nlp_json['documents'].values():
if 'relations' not in doc:
doc['relations'] = {}
r_id = len(doc['relations']) + 1
d = UniversalDependencyParse(doc['dependencies'][0], doc['tokenList'])
for s_id, sent in doc['sentences'].items():
pass
def write_snap(nlp_json: OrderedDict, file='relations.csv'):
for doc in nlp_json.get('documents', {}).values():
for rel in doc.get('relations', {}).values():
pass
| 46.791045 | 104 | 0.484211 |
d4b17bf799478b77f9109ee830bab76e50fe070a | 14,816 | py | Python | tests/test_sklearn_adaboost_converter.py | Alexsandruss/sklearn-onnx | b612557615df439e471867a676c9eca8ae4a787c | [
"Apache-2.0"
] | null | null | null | tests/test_sklearn_adaboost_converter.py | Alexsandruss/sklearn-onnx | b612557615df439e471867a676c9eca8ae4a787c | [
"Apache-2.0"
] | null | null | null | tests/test_sklearn_adaboost_converter.py | Alexsandruss/sklearn-onnx | b612557615df439e471867a676c9eca8ae4a787c | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
import unittest
from distutils.version import StrictVersion
import onnx
from onnx.defs import onnx_opset_version
import onnxruntime
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
BooleanTensorType,
FloatTensorType,
Int64TensorType,
)
from skl2onnx.common.data_types import onnx_built_with_ml
from test_utils import (
dump_data_and_model,
fit_classification_model,
fit_regression_model,
TARGET_OPSET
)
class TestSklearnAdaBoostModels(unittest.TestCase):
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_samme_r(self):
model, X_test = fit_classification_model(AdaBoostClassifier(
n_estimators=10, algorithm="SAMME.R", random_state=42,
base_estimator=DecisionTreeClassifier(
max_depth=2, random_state=42)), 3)
model_onnx = convert_sklearn(
model,
"AdaBoost classification",
[("input", FloatTensorType((None, X_test.shape[1])))],
target_opset=10
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierSAMMER",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_samme_r_decision_function(self):
model, X_test = fit_classification_model(AdaBoostClassifier(
n_estimators=10, algorithm="SAMME.R", random_state=42,
base_estimator=DecisionTreeClassifier(
max_depth=2, random_state=42)), 4)
options = {id(model): {'raw_scores': True}}
model_onnx = convert_sklearn(
model,
"AdaBoost classification",
[("input", FloatTensorType((None, X_test.shape[1])))],
target_opset=10,
options=options,
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierSAMMERDecisionFunction",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
methods=['predict', 'decision_function'],
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_samme_r_logreg(self):
model, X_test = fit_classification_model(AdaBoostClassifier(
n_estimators=5, algorithm="SAMME.R",
base_estimator=LogisticRegression(
solver='liblinear')), 4)
model_onnx = convert_sklearn(
model,
"AdaBoost classification",
[("input", FloatTensorType((None, X_test.shape[1])))],
target_opset=10
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierSAMMERLogReg",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_samme(self):
model, X_test = fit_classification_model(AdaBoostClassifier(
n_estimators=5, algorithm="SAMME", random_state=42,
base_estimator=DecisionTreeClassifier(
max_depth=6, random_state=42)), 2)
model_onnx = convert_sklearn(
model,
"AdaBoostClSamme",
[("input", FloatTensorType((None, X_test.shape[1])))],
target_opset=10,
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierSAMMEDT",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"< StrictVersion('0.5.0')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_samme_decision_function(self):
model, X_test = fit_classification_model(AdaBoostClassifier(
n_estimators=5, algorithm="SAMME", random_state=42,
base_estimator=DecisionTreeClassifier(
max_depth=6, random_state=42)), 2)
options = {id(model): {'raw_scores': True}}
model_onnx = convert_sklearn(
model,
"AdaBoostClSamme",
[("input", FloatTensorType((None, X_test.shape[1])))],
target_opset=10,
options=options,
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierSAMMEDTDecisionFunction",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"< StrictVersion('0.5.0')",
methods=['predict', 'decision_function_binary'],
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_lr(self):
model, X_test = fit_classification_model(
AdaBoostClassifier(learning_rate=0.3, random_state=42), 3,
is_int=True)
model_onnx = convert_sklearn(
model,
"AdaBoost classification",
[("input", Int64TensorType((None, X_test.shape[1])))],
target_opset=10
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierLR",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_classifier_bool(self):
model, X_test = fit_classification_model(
AdaBoostClassifier(random_state=42), 3,
is_bool=True)
model_onnx = convert_sklearn(
model,
"AdaBoost classification",
[("input", BooleanTensorType((None, X_test.shape[1])))],
target_opset=10,
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnAdaBoostClassifierBool",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor(self):
model, X = fit_regression_model(
AdaBoostRegressor(n_estimators=5))
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=10)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressor-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.0') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor_lreg(self):
model, X = fit_regression_model(
AdaBoostRegressor(n_estimators=5,
base_estimator=LinearRegression()))
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=10)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressorLReg-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.0') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor_int(self):
model, X = fit_regression_model(
AdaBoostRegressor(n_estimators=5), is_int=True)
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=10)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressorInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.0') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor_lr10(self):
model, X = fit_regression_model(
AdaBoostRegressor(learning_rate=0.5, random_state=42))
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=10)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressorLR-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.0') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
verbose=False
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnxruntime.__version__) <
StrictVersion("0.5.9999")),
reason="not available")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor_lr11(self):
model, X = fit_regression_model(
AdaBoostRegressor(learning_rate=0.5, random_state=42))
if onnx_opset_version() < 11:
try:
convert_sklearn(
model, "AdaBoost regression",
[("input", FloatTensorType([None, X.shape[1]]))])
except RuntimeError:
return
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressorLR-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.9999') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
verbose=False
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf((StrictVersion(onnx.__version__) <
StrictVersion("1.5.0")),
reason="not available")
def test_ada_boost_regressor_bool(self):
model, X = fit_regression_model(
AdaBoostRegressor(learning_rate=0.5, random_state=42),
is_bool=True)
model_onnx = convert_sklearn(
model, "AdaBoost regression",
[("input", BooleanTensorType([None, X.shape[1]]))],
target_opset=10,
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnAdaBoostRegressorBool",
allow_failure="StrictVersion("
"onnxruntime.__version__) "
"< StrictVersion('0.5.0') or "
"StrictVersion(onnx.__version__) "
"== StrictVersion('1.4.1')",
verbose=False,
)
if __name__ == "__main__":
unittest.main()
| 37.989744 | 72 | 0.575459 |
f151ba9fd42c248c53d59f417c12ac01cb84144d | 1,842 | py | Python | cat2cat/cat2cat_merge_missing_attributes.py | Filter-Bubble/FormatConversions | 91c313d66edba077462740c1403a705aa1f96df4 | [
"Apache-2.0"
] | 3 | 2019-11-21T13:43:37.000Z | 2021-05-12T20:46:49.000Z | cat2cat/cat2cat_merge_missing_attributes.py | Filter-Bubble/FormatConversions | 91c313d66edba077462740c1403a705aa1f96df4 | [
"Apache-2.0"
] | 3 | 2018-05-22T13:07:43.000Z | 2020-03-14T17:31:15.000Z | cat2cat/cat2cat_merge_missing_attributes.py | Filter-Bubble/FormatConversions | 91c313d66edba077462740c1403a705aa1f96df4 | [
"Apache-2.0"
] | 2 | 2020-03-05T15:55:47.000Z | 2021-05-12T20:46:50.000Z | from lxml import etree
import sys
import os
def get_token_list(mark):
tanch = ''
for token in mark.findall('token_anchor'):
tanch += token.get('t_id')
return tanch
def create_annotation_dict(fname):
parser = etree.XMLParser(ns_clean=True)
correctedcat = etree.parse(fname, parser)
mark_dict = {}
markables = correctedcat.find('Markables')
for mark in markables:
mtype = mark.get('type')
tokenerr = mark.get('tokenization_error')
tanch = get_token_list(mark)
mark_dict[tanch] = [mtype, tokenerr]
return mark_dict
def merge_annotations(originfile, corrected_mark, outfname):
parser = etree.XMLParser(ns_clean=True)
correctedcat = etree.parse(originfile, parser)
markables = correctedcat.find('Markables')
for mark in markables:
tanch = get_token_list(mark)
if tanch in corrected_mark:
corrected_values = corrected_mark.get(tanch)
mark.set('type', corrected_values[0])
mark.set('tokenization_error', corrected_values[1])
my_out = etree.tounicode(correctedcat, pretty_print=True)
outfile = open(outfname, 'w')
print(my_out, file=outfile)
def merge_directory(cordir, oridir, outdir):
for f in os.listdir(cordir):
if f.endswith('xml'):
corrected_mark = create_annotation_dict(cordir + f)
merge_annotations(oridir + f, corrected_mark, outdir + f)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 3:
print('Usage for directory: python cat2cat_missing_attributes.py correctedir/ originaldir/' 'outdir/')
else:
merge_directory(argv[1], argv[2], argv[3])
if __name__ == '__main__':
main() | 25.583333 | 110 | 0.628664 |
c0e183d007e0c56628138c4999f75ee400cddf70 | 8,314 | py | Python | docs/conf.py | dbierer/linuxforcomposer | fb91a13ac3efa434cd57028676ccf8c95f581df9 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | dbierer/linuxforcomposer | fb91a13ac3efa434cd57028676ccf8c95f581df9 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | dbierer/linuxforcomposer | fb91a13ac3efa434cd57028676ccf8c95f581df9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simple Email Form documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 00:20:22 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Linux for Composer'
copyright = '2017-2020, Foreach Code Factory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_build/_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LinuxForComposerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'LinuxForComposer.tex', 'Linux For Composer Documentation',
'Foreach Code Factory', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'linuxforcomposer', 'Linux For Composer Documentation',
['Foreach Code Factory'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'LinuxForComposer', 'Linux For Composer Documentation',
'Foreach Code Factory', 'LinuxForComposer', 'Run any PHP, anywhere, anytime using Composer!',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.976923 | 96 | 0.71999 |
7b27a4a5af645cc82f74dde8a77d3c05c981addb | 11,152 | py | Python | core/validators.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | core/validators.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | core/validators.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Data type validators
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
import uuid
import socket
# Third-party modules
try:
from django.forms import ValidationError
# pragma: no cover
except Exception: # noqa
pass
# NOC modules
from noc.core.mac import MAC
#
# Regular expressions
#
rx_fqdn = re.compile(r"^([a-z0-9\-]+\.)+[a-z0-9\-]+$", re.IGNORECASE)
rx_asset = re.compile(r"^AS-[A-Z0-9\-]+$")
rx_extension = re.compile(r"^\.[a-zA-Z0-9]+$")
rx_mimetype = re.compile(r"^[a-zA-Z0-9\-]+/[a-zA-Z0-9\-]+$")
rx_email = re.compile(r"^[a-z0-9._\-+]+@([a-z0-9\-]+\.)+[a-z0-9\-]+$", re.IGNORECASE)
rx_oid = re.compile(r"^(\d+\.){5,}\d+$")
rx_objectid = re.compile(r"^[0-9a-f]{24}$")
#
# Validators returning boolean
#
def is_int(v):
"""
Check for valid integer
>>> is_int(10)
True
>>> is_int("10")
True
>>> is_int("Ten")
False
>>> is_int(None)
False
"""
try:
v = int(v)
except ValueError:
return False
except TypeError:
return False
return True
def is_float(v):
"""
Check for valid float
>>> is_float(10)
True
>>> is_float(10.2)
True
>>> is_float("10.2")
True
>>> is_float("Ten")
False
>>> is_float(None)
False
"""
try:
v = float(v)
except ValueError:
return False
except TypeError:
return False
return True
def is_asn(v):
"""
Check value is valid 2-byte or 4-byte autonomous system number
>>> is_asn(100)
True
>>> is_asn(100000)
True
>>> is_asn(-1)
False
"""
try:
v = int(v)
return v > 0
except ValueError:
return False
def is_ipv4(v):
"""
Check value is valid IPv4 address
>>> is_ipv4("192.168.0.1")
True
>>> is_ipv4("192.168.0")
False
>>> is_ipv4("192.168.0.1.1")
False
>>> is_ipv4("192.168.1.256")
False
>>> is_ipv4("192.168.a.250")
False
>>> is_ipv4("11.24.0.09")
False
"""
X = v.split(".")
if len(X) != 4:
return False
try:
return len([x for x in X if 0 <= int(x) <= 255]) == 4 and bool(socket.inet_aton(v))
except Exception:
return False
def is_ipv6(v):
"""
Check value is valid IPv6 address
>>> is_ipv6("::")
True
>>> is_ipv6("::1")
True
>>> is_ipv6("2001:db8::")
True
>>> is_ipv6("2001:db8:0000:0000:6232:6400::")
True
>>> is_ipv6("::ffff:192.168.0.1")
True
>>> is_ipv6("::ffff:192.168.0.256")
False
>>> is_ipv6("fe80::226:b0ff:fef7:c48c")
True
>>> is_ipv6("0:1:2:3:4:5:6:7:8")
False
>>> is_ipv6("0:1:2")
False
>>> is_ipv6("::g")
False
>>> is_ipv6("100:0:")
False
>>> is_ipv6("2a00:1118:f00f:fffe:c143:3284:1000::")
True
"""
if v == "::":
return True
parts = v.split(":")
if len(parts) != 8 and "::" not in v:
return False
if len(parts) == 9 and not parts[-1] and not parts[-2]:
parts = parts[:-1]
# Process IPv4 at the end
if parts and "." in parts[-1]:
if not is_ipv4(parts[-1]):
return False
p = [int(x) for x in parts[-1].split(".")]
parts = parts[:-1] + ["%02x%02x" % (p[0], p[1]), "%02x%02x" % (p[2], p[3])]
if len(parts) > 8:
return False
if len(parts) == 8:
# Replace empty parts with "0"
parts = [pp if pp else "0" for pp in parts]
else:
# Expand ::
try:
i = parts.index("")
except ValueError:
return False
h = []
if i > 0:
h = parts[:i]
if i + 1 < len(parts) and not parts[i + 1]:
i += 1
t = parts[i + 1 :]
parts = h + ["0"] * (8 - len(h) - len(t)) + t
# Check all parts
try:
for p in parts:
int(p, 16)
except ValueError:
return False
return True
def is_ipv4_prefix(v):
"""
Check value is valid IPv4 prefix
>>> is_ipv4_prefix("192.168.0.0")
False
>>> is_ipv4_prefix("192.168.0.0/16")
True
>>> is_ipv4_prefix("192.168.256.0/24")
False
>>> is_ipv4_prefix("192.168.0.0/g")
False
>>> is_ipv4_prefix("192.168.0.0/-1")
False
>>> is_ipv4_prefix("192.168.0.0/33")
False
"""
x = v.split("/")
if len(x) != 2:
return False
if not is_ipv4(x[0]):
return False
try:
y = int(x[1])
except Exception:
return False
return 0 <= y <= 32
is_cidr = is_ipv4_prefix
def is_ipv6_prefix(v):
"""
Check value is valid IPv6 prefix
>>> is_ipv6_prefix("1::/32")
True
>>> is_ipv6_prefix("1::/-1")
False
>>> is_ipv6_prefix("1::/129")
False
>>> is_ipv6_prefix("1::/1/2")
False
>>> is_ipv6_prefix("1::/g")
False
>>> is_ipv6_prefix("192.168.0.0/32")
False
"""
x = v.split("/")
if len(x) != 2:
return False
if not is_ipv6(x[0]):
return False
try:
y = int(x[1])
except Exception:
return False
return 0 <= y <= 128
def is_prefix(v):
"""
Check value is valid IPv4 or IPv6 prefix
>>> is_prefix("192.168.0.0/16")
True
>>> is_prefix("1::/32")
True
"""
return is_ipv4_prefix(v) or is_ipv6_prefix(v)
def is_rd(v):
"""
Check value is valid Route Distinguisher
Special case RD: 0:0
>>> is_rd("0:0")
True
Type 0 RD: <2byte ASN> : <ID>
>>> is_rd("100:10")
True
>>> is_rd("100:0")
True
>>> is_rd("100:4294967295")
True
>>> is_rd("0:-10")
False
>>> is_rd("0:4294967296")
False
Type 1 RD: <IPv4> : <ID>
>>> is_rd("10.10.10.10:0")
True
>>> is_rd("10.10.10.10:65535")
True
>>> is_rd("10.10.10.10:-1")
False
>>> is_rd("10.10.10.10:65536")
False
Type 2 RD: <4byte ASN> : <ID>
>>> is_rd("100000:0")
True
>>> is_rd("100000:65535")
True
Error handling
>>> is_rd("100000:-1")
False
>>> is_rd("100000:65536")
False
>>> is_rd("10:20:30")
False
>>> is_rd("100:b")
False
"""
if v == "0:0":
return True
x = v.split(":")
if len(x) != 2:
return False
a, b = x
try:
b = int(b)
except ValueError:
return False
if is_asn(a):
a = int(a)
if a <= 65535:
# Type 0 RD: <2byte ASN>: <ID>
return 0 <= b <= 4294967295
# Type 2 RD: <4 byte ASN>: <ID>
return 0 <= b <= 65535
if is_ipv4(a):
# Type 1 RD: <ipv4>:<ID>
return 0 <= b <= 65535
return False
def is_as_set(v):
"""
Check value is valuid AS-SET
>>> is_as_set("AS-TEST")
True
>>> is_as_set("AS100")
False
"""
return rx_asset.match(v) is not None
def is_fqdn(v):
"""
Check value is valid FQDN
>>> is_fqdn("test.example.com")
True
>>> is_fqdn("test")
False
"""
return rx_fqdn.match(v) is not None
def is_re(v):
"""
Check value is valid regular expression
>>> is_re("1{1,2}")
True
>>> is_re("1[")
False
"""
try:
re.compile(v)
return True
except Exception:
return False
def is_vlan(v):
"""
Check value is valid VLAN ID
>>> is_vlan(1)
True
>>> is_vlan(-1)
False
>>> is_vlan(4095)
True
>>> is_vlan(4096)
False
>>> is_vlan("g")
False
"""
try:
v = int(v)
return 1 <= v <= 4095
except ValueError:
return False
def is_mac(v):
"""
>>> is_mac("1234.5678.9ABC")
True
>>> is_mac("1234.5678.9abc")
True
>>> is_mac("0112.3456.789a.bc")
True
>>> is_mac("1234.5678.9abc.def0")
False
>>> is_mac("12:34:56:78:9A:BC")
True
>>> is_mac("12-34-56-78-9A-BC")
True
>>> is_mac("0:13:46:50:87:5")
True
>>> is_mac("123456-789abc")
True
>>> is_mac("12-34-56-78-9A-BC-DE")
False
>>> is_mac("AB-CD-EF-GH-HJ-KL")
False
>>> is_mac("aabb-ccdd-eeff")
True
>>> is_mac("aabbccddeeff")
True
>>> is_mac("AABBCCDDEEFF")
True
>>> is_mac("\\xa8\\xf9K\\x80\\xb4\\xc0")
False
"""
if v is None or len(v) < 12:
return False
try:
MAC(v)
return True
except ValueError:
return False
def is_email(v):
"""
Check value is valid email
>>> is_email("test@example.com")
True
>>> is_email("test")
False
"""
return rx_email.match(v) is not None
def is_oid(v):
"""
Check OID
>>> is_oid("1.3.6.1.6.3.1.1.4.1.0")
True
>>> is_oid("1.3.6.1.6.3.1.1.4.a.1.0")
False
"""
return rx_oid.match(v) is not None
def is_extension(v):
"""
Check value is file extension starting with dot
>>> is_extension(".txt")
True
>>> is_extension("txt")
False
"""
return rx_extension.match(v) is not None
def is_mimetype(v):
"""
Check value is MIME Type
>>> is_mimetype("application/octet-stream")
True
>>> is_mimetype("application")
False
"""
return rx_mimetype.match(v) is not None
def is_uuid(v):
"""
Check value is UUID
:param v:
:return:
"""
try:
uuid.UUID(v)
return True
except ValueError:
return False
def is_objectid(v):
"""
Check value is mongodb's ObjectId
:param v:
:return:
"""
return v and rx_objectid.match(v) is not None
def generic_validator(check, error_message):
"""
Validator factory
>>> v = generic_validator(is_int, "invalid int")
>>> v(6)
6
>>> v("g")
Traceback (most recent call last):
...
ValidationError: [u'invalid int']
"""
# Validator closure
def inner_validator(value, *args, **kwargs):
if not check(value):
raise ValidationError(error_message)
return value
return inner_validator
#
# Validators
#
check_asn = generic_validator(is_asn, "Invalid ASN")
check_prefix = generic_validator(is_prefix, "Invalid prefix")
check_ipv4 = generic_validator(is_ipv4, "Invalid IPv4")
check_ipv6 = generic_validator(is_ipv6, "Invalid IPv6")
check_ipv4_prefix = generic_validator(is_ipv4_prefix, "Invalid IPv4 prefix")
check_ipv6_prefix = generic_validator(is_ipv6_prefix, "Invalid IPv6 prefix")
check_cidr = generic_validator(is_cidr, "Invalid CIDR")
check_rd = generic_validator(is_rd, "Invalid RD")
check_fqdn = generic_validator(is_fqdn, "Invalid FQDN")
check_re = generic_validator(is_re, "Invalid Regular Expression")
check_as_set = generic_validator(is_as_set, "Invalid AS-SET")
check_vlan = generic_validator(is_vlan, "Invalid VLAN")
check_email = generic_validator(is_email, "Invalid EMail")
check_extension = generic_validator(is_extension, "Invalid extension")
check_mimetype = generic_validator(is_mimetype, "Invalid MIME type")
| 20.057554 | 91 | 0.530219 |
733a445a6d70fcc6de04dcd7df49bb39ebc5e0d7 | 855 | py | Python | examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/modeling/registry.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 567 | 2018-09-13T05:07:49.000Z | 2020-11-23T11:52:11.000Z | examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/modeling/registry.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 222 | 2018-09-14T10:15:39.000Z | 2020-11-20T22:21:09.000Z | examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/modeling/registry.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 279 | 2018-09-16T12:40:29.000Z | 2020-11-17T14:22:52.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.utils.registry import Registry
BACKBONES = Registry()
ROI_BOX_FEATURE_EXTRACTORS = Registry()
ROI_BOX_PREDICTOR = Registry()
RPN_HEADS = Registry()
| 38.863636 | 74 | 0.77193 |
6fed441587ae6cabeb2cbb92c508e6ef56486529 | 1,009 | py | Python | src/ocr/helpers.py | FalconMadhab/HTR_notebooks | 4f2438193ba434b413e0d74edf53f0a417ee7429 | [
"MIT"
] | 540 | 2017-02-13T19:53:03.000Z | 2022-03-28T10:04:06.000Z | src/ocr/helpers.py | FalconMadhab/HTR_notebooks | 4f2438193ba434b413e0d74edf53f0a417ee7429 | [
"MIT"
] | 149 | 2018-01-27T09:13:13.000Z | 2022-03-09T06:32:49.000Z | src/ocr/helpers.py | FalconMadhab/HTR_notebooks | 4f2438193ba434b413e0d74edf53f0a417ee7429 | [
"MIT"
] | 207 | 2017-08-06T17:27:17.000Z | 2022-03-12T17:41:46.000Z | # -*- coding: utf-8 -*-
"""
Helper functions for ocr project
"""
import matplotlib.pyplot as plt
import numpy as np
import cv2
SMALL_HEIGHT = 800
def implt(img, cmp=None, t=''):
"""Show image using plt."""
plt.imshow(img, cmap=cmp)
plt.title(t)
plt.show()
def resize(img, height=SMALL_HEIGHT, always=False):
"""Resize image to given height."""
if (img.shape[0] > height or always):
rat = height / img.shape[0]
return cv2.resize(img, (int(rat * img.shape[1]), height))
return img
def ratio(img, height=SMALL_HEIGHT):
"""Getting scale ratio."""
return img.shape[0] / height
def img_extend(img, shape):
"""Extend 2D image (numpy array) in vertical and horizontal direction.
Shape of result image will match 'shape'
Args:
img: image to be extended
shape: shape (touple) of result image
Returns:
Extended image
"""
x = np.zeros(shape, np.uint8)
x[:img.shape[0], :img.shape[1]] = img
return x | 22.422222 | 74 | 0.622398 |
21c9ec6219eb4023b398a7cb546b57205bb908d7 | 1,209 | py | Python | tests/test_storage.py | vsvarunsharma10/pqai | 3ef1351fbc39671916517917de9074a62b092eef | [
"MIT"
] | 17 | 2021-06-23T04:17:06.000Z | 2022-03-25T16:03:49.000Z | tests/test_storage.py | vsvarunsharma10/pqai | 3ef1351fbc39671916517917de9074a62b092eef | [
"MIT"
] | 15 | 2021-06-22T10:14:15.000Z | 2022-03-12T00:58:37.000Z | tests/test_storage.py | vsvarunsharma10/pqai | 3ef1351fbc39671916517917de9074a62b092eef | [
"MIT"
] | 3 | 2021-06-27T18:37:53.000Z | 2022-03-15T04:41:21.000Z | import unittest
import sys
from pathlib import Path
TEST_DIR = Path(__file__).parent
BASE_DIR = TEST_DIR.parent
sys.path.append(str(BASE_DIR.resolve()))
from core.storage import Storage, Folder, JSONDocumentsFolder, MongoCollection
from pymongo import MongoClient
class TestFolderClass(unittest.TestCase):
def test_can_save_and_retrieve_file(self):
folder = Folder(str(TEST_DIR.resolve()))
folder.put('test_file', 'test_contents')
contents = folder.get('test_file')
self.assertEqual('test_contents', contents)
class TestJSONDocumentsFolder(unittest.TestCase):
def test_can_save_and_retrieve_document(self):
folder = JSONDocumentsFolder(str(TEST_DIR.resolve()))
folder.put('test_doc', { 'title': 'test doc title' })
retrieved = folder.get('test_doc')
self.assertEqual({ 'title': 'test doc title' }, retrieved)
class TestMongoDBClass(unittest.TestCase):
def test_can_retrieve_document(self):
client = MongoClient('localhost', 27017)
query = { 'publicationNumber': 'US7654321B2' }
mongo_coll = MongoCollection(client.pqai.bibliography)
doc = mongo_coll.get(query)
self.assertEqual('US7654321B2', doc['publicationNumber'])
if __name__ == '__main__':
unittest.main() | 29.487805 | 78 | 0.764268 |
f6299ebaa369a199d4bc01ee88ae8d8f40c398a8 | 3,206 | py | Python | cinder/policies/group_snapshots.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2015-04-02T21:44:36.000Z | 2016-04-29T21:19:04.000Z | cinder/policies/group_snapshots.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 28 | 2017-08-17T14:46:05.000Z | 2022-03-29T12:42:12.000Z | cinder/policies/group_snapshots.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 4 | 2016-01-27T00:25:52.000Z | 2021-03-25T19:54:08.000Z | # Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from cinder.policies import base
CREATE_POLICY = 'group:create_group_snapshot'
DELETE_POLICY = 'group:delete_group_snapshot'
UPDATE_POLICY = 'group:update_group_snapshot'
GET_POLICY = 'group:get_group_snapshot'
GET_ALL_POLICY = 'group:get_all_group_snapshots'
GROUP_SNAPSHOT_ATTRIBUTES_POLICY = 'group:group_snapshot_project_attribute'
group_snapshots_policies = [
policy.DocumentedRuleDefault(
name=GET_ALL_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="List group snapshots.",
operations=[
{
'method': 'GET',
'path': '/group_snapshots'
},
{
'method': 'GET',
'path': '/group_snapshots/detail'
}
]),
policy.DocumentedRuleDefault(
name=CREATE_POLICY,
check_str="",
description="Create group snapshot.",
operations=[
{
'method': 'POST',
'path': '/group_snapshots'
}
]),
policy.DocumentedRuleDefault(
name=GET_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Show group snapshot.",
operations=[
{
'method': 'GET',
'path': '/group_snapshots/{group_snapshot_id}'
}
]),
policy.DocumentedRuleDefault(
name=DELETE_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Delete group snapshot.",
operations=[
{
'method': 'DELETE',
'path': '/group_snapshots/{group_snapshot_id}'
}
]),
policy.DocumentedRuleDefault(
name=UPDATE_POLICY,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Update group snapshot.",
operations=[
{
'method': 'PUT',
'path': '/group_snapshots/{group_snapshot_id}'
}
]),
policy.DocumentedRuleDefault(
name=GROUP_SNAPSHOT_ATTRIBUTES_POLICY,
check_str=base.RULE_ADMIN_API,
description="List group snapshots or show group "
"snapshot with project attributes.",
operations=[
{
'method': 'GET',
'path': '/group_snapshots/{group_snapshot_id}'
},
{
'method': 'GET',
'path': '/group_snapshots/detail'
}
]),
]
def list_rules():
return group_snapshots_policies
| 30.826923 | 78 | 0.587024 |
cdf69ea2ac2189bd875f9285b11b0e6476622007 | 2,315 | py | Python | p160.py | arpit0891/Project-euler | ab36b33c578578595bb518508fa2fe5862f4a044 | [
"MIT"
] | 1 | 2020-05-14T09:22:32.000Z | 2020-05-14T09:22:32.000Z | p160.py | prve17/Project-Euler | 1ff72404ca9ebe7de2eab83d43960d86bc487515 | [
"MIT"
] | 1 | 2020-03-13T12:42:28.000Z | 2020-05-13T13:26:32.000Z | p160.py | prve17/Project-Euler | 1ff72404ca9ebe7de2eab83d43960d86bc487515 | [
"MIT"
] | 3 | 2020-05-13T13:39:46.000Z | 2020-06-26T10:44:53.000Z | def compute():
ans = factorial_suffix(10**12)
return str(ans)
# The last 5 digits of n!, excluding trailing zeros.
def factorial_suffix(n):
twos = count_factors(n, 2) - count_factors(n, 5) # Always non-negative for every n
# We can reduce 'twos' because there is a cycle: 2^5 = 2^2505 = 32 mod 100000
if twos >= 2505:
twos = (twos - 5) % 2500 + 5
return factorialish(n) * pow(2, twos, 100000) % 100000
# Equal to n! but with all factors of 2 and 5 removed and then modulo 10^5.
# The identity factorialIsh(n) = odd_factorialish(n) * even_factorialish(n) (mod 10^5) is true by definition.
def factorialish(n):
return even_factorialish(n) * odd_factorialish(n) % 100000
# The product of {all even numbers from 1 to n}, but with all factors of 2 and 5 removed and then modulo 10^5.
# For example, even_factorialish(9) only considers the numbers {2, 4, 6, 8}. Divide each number by 2 to get {1, 2, 3, 4}. Thus even_factorialish(9) = factorialish(4).
def even_factorialish(n):
if n == 0:
return 1
else:
return factorialish(n // 2)
# The product of {all odd numbers from 1 to n}, but with all factors of 2 and 5 removed and then modulo 10^5.
# By definition, odd_factorialish() never considers any number that has a factor of 2. The product of the numbers that not a multiple of 5 are accumulated by factorial_coprime().
# Those that are a multiple of 5 are handled recursively by odd_factorialish(), noting that they are still odd after dividing by 5.
def odd_factorialish(n):
if n == 0:
return 1
else:
return odd_factorialish(n // 5) * factorial_coprime(n) % 100000
# The product of {all numbers from 1 to n that are coprime with 10}, modulo 10^5.
# The input argument can be taken modulo 10^5 because factorialoid(10^5) = 1, and each block of 10^5 numbers behaves the same.
def factorial_coprime(n):
n %= 100000
product = 1
for i in range(1, n + 1):
if i % 2 != 0 and i % 5 != 0:
product = i * product % 100000
return product
# Counts the number of factors of n in the set of integers {1, 2, ..., end}.
# For example, count_factors(25, 5) = 6 because {5, 10, 15, 20} each has one factor of 5, and 25 has two factors of 5.
def count_factors(end, n):
if end == 0:
return 0
else:
return end // n + count_factors(end // n, n)
if __name__ == "__main__":
print(compute())
| 37.33871 | 178 | 0.700216 |
50722319bed1b414509501bc36f4767e46debd40 | 3,805 | py | Python | tools/config_validator.py | Task-Force-Dagger/Dagger | 56b9ffe3387f74830419a987eed5a0f386674331 | [
"MIT"
] | 14 | 2021-02-11T23:23:21.000Z | 2021-09-08T05:36:47.000Z | tools/config_validator.py | Task-Force-Dagger/Dagger | 56b9ffe3387f74830419a987eed5a0f386674331 | [
"MIT"
] | 130 | 2021-09-09T21:43:16.000Z | 2022-03-30T09:00:37.000Z | tools/config_validator.py | Task-Force-Dagger/Dagger | 56b9ffe3387f74830419a987eed5a0f386674331 | [
"MIT"
] | 11 | 2021-02-18T19:55:51.000Z | 2021-09-01T17:08:47.000Z | #!/usr/bin/env python3
#by PabstMirror - python script to verify all addons using MakePbo's lint checking and extFile Checking
#Arguments (eg: `config_validator.py full`):
#full dump full deRaped config of problem
#skipExt skips checking external file references
import os
import sys
import subprocess
import timeit
import time
######## GLOBALS #########
MAINPREFIX = "Z"
PREFIX = "ACE"
##########################
def Fract_Sec(s):
temp = float()
temp = float(s) / (60*60*24)
d = int(temp)
temp = (temp - d) * 24
h = int(temp)
temp = (temp - h) * 60
m = int(temp)
temp = (temp - m) * 60
sec = temp
return d,h,m,sec
def CheckPBO(p,useMakePbo,checkExternalFiles,errors):
try:
if useMakePbo:
makePboArgs = "-PGU"
if not checkExternalFiles:
makePboArgs = "-PU"
subprocess.run([
"makepbo",
makePboArgs,
"-@={}\\{}\\addons\\{}".format(MAINPREFIX,PREFIX.rstrip("_"),p),
p,
"{}_{}.pbo".format(PREFIX,p)
], stdin=None, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
else:
makePboArgs = "-LEP"
if not checkExternalFiles:
makePboArgs = "-LP"
subprocess.run([
"rapify",
makePboArgs,
p
], stdin=None, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as e:
print("!! Problem With {} ret {} !!".format(p, e.returncode))
print(" stderr: {}".format(e.stderr))
errors.append(p)
else:
print(" Checked ok {}".format(p))
return
def fullDump(p):
try:
subprocess.run([
"makepbo",
"-PGUS", #G Check external references -S show deRap - P dont pause
"-@={}\\{}\\addons\\{}".format(MAINPREFIX,PREFIX.rstrip("_"),p),
p,
"{}_{}.pbo".format(PREFIX,p)
], stdin=None, input=None, check=True)
except subprocess.CalledProcessError as e:
input("Press Enter to continue...")
return
def main(argv):
print("""
####################
# ACE3 Config Check #
####################
""")
start_time = timeit.default_timer()
addonspath = os.path.join("P:\\",MAINPREFIX,PREFIX,"addons")
print("Switching to dir: {}".format(addonspath))
try:
os.chdir(addonspath)
except:
raise Exception("Failed to switch to addon dir on P:")
useMakePbo = False
checkExternalFiles = True
if "skipExt" in argv:
print("Skipping External Files Check");
checkExternalFiles = False
if "make" in argv:
# will check more files like RTM and RVMats but twice as slow
# This also actually builds a pbo (in same spot as build.py)
print("Using makePbo to verify all files");
useMakePbo = True
errors = []
for p in os.listdir(addonspath):
path = os.path.join(addonspath, p)
if not os.path.isdir(path):
continue
if p[0] == ".":
continue
CheckPBO(p,useMakePbo,checkExternalFiles,errors)
d,h,m,s = Fract_Sec(timeit.default_timer() - start_time)
print("\n# Done with {0} errrors [took: {1:2}h {2:2}m {3:4.5f}s]".format(len(errors),h,m,s))
if (len(errors) > 0):
if "full" in argv:
input("Dumping Full DeRap: Press Enter to continue...")
for p in errors:
fullDump(p)
else:
print("use 'full' arg to show derap")
ret = len(errors)
print("return {}".format(ret))
return ret
if __name__ == "__main__":
main(sys.argv)
| 28.825758 | 103 | 0.546649 |
92932e05c107b1b8e3849f78aae778b2a464c4a6 | 237 | py | Python | ionyweb/plugin_app/plugin_website_title/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 4 | 2015-09-28T10:07:39.000Z | 2019-10-18T20:14:07.000Z | ionyweb/plugin_app/plugin_website_title/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2021-03-19T21:41:33.000Z | 2021-03-19T21:41:33.000Z | ionyweb/plugin_app/plugin_website_title/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2017-10-12T09:25:19.000Z | 2017-10-12T09:25:19.000Z | # -*- coding: utf-8 -*-
import floppyforms as forms
from ionyweb.forms import ModuloModelForm
from models import Plugin_WebsiteTitle
class Plugin_WebsiteTitleForm(ModuloModelForm):
class Meta:
model = Plugin_WebsiteTitle
| 19.75 | 47 | 0.767932 |
c55de89a42b12453b4aeacc3d910a868b242e8e4 | 2,282 | py | Python | utils_xhr/xhr_logger.py | jhfwb/utils_xhr | ad58553c5aef3d3c4e942b95a2676e99618cf7c0 | [
"MIT"
] | null | null | null | utils_xhr/xhr_logger.py | jhfwb/utils_xhr | ad58553c5aef3d3c4e942b95a2676e99618cf7c0 | [
"MIT"
] | null | null | null | utils_xhr/xhr_logger.py | jhfwb/utils_xhr | ad58553c5aef3d3c4e942b95a2676e99618cf7c0 | [
"MIT"
] | null | null | null | import logging
from logging import handlers
from _xhr_tool._annotate import singleObj
@singleObj
class Logger(object):
DEBUG=logging.DEBUG
INFO=logging.INFO
WARNING=logging.WARNING
ERROR=logging.ERROR
CRITICAL=logging.CRITICAL
level_relations = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'crit': logging.CRITICAL
} # 日志级别关系映射
def __init__(self,savePath="save.log"):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
# 2.1创建一个Handler 用来写入日志文件
fileHandler = logging.FileHandler(savePath)
# 2.2创建一个Handler 用来在控制台显示
streamHandler = logging.StreamHandler()
# 创建一个
th = handlers.TimedRotatingFileHandler(filename=savePath, when='D',interval=2, backupCount=3)
"""class logging.handlers.TimedRotatingFileHandler(filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False)
参数when决定了时间间隔的类型,参数interval决定了多少的时间间隔。如when=‘D’,interval=2,就是指两天的时间间隔,backupCount决定了能留几个日志文件。超过数量就会丢弃掉老的日志文件。
when的参数决定了时间间隔的类型。两者之间的关系如下:"""
# 3.定义Handler输出的格式
foramtter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s - %(pathname)s[line:%(lineno)d]')
th.setFormatter(foramtter)
fileHandler.setFormatter(foramtter)
streamHandler.setFormatter(foramtter)
# 4.添加日志消息处理器
self.logger.addHandler(fileHandler)
self.logger.addHandler(streamHandler)
self.logger.addHandler(th)
def getLogger(self):
return self.logger
def setLevel(self,level=logging.INFO):
self.logger.setLevel(level)
def debug(self,message=''):
return self.logger.debug(message)
def info(self,message=''):
return self.logger.info(message)
def warning(self,message=''):
return self.logger.warning(message)
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logging.warning('Protocol problem: %s', 'connection reset', extra=d)
# l=Logger()
# l.setLevel(level=l.DEBUG)
# l.logger.debug('你好,我是初始信息')
| 39.344828 | 144 | 0.669588 |
11e89315b1a68f90b32eddef67c5defa9d63191a | 1,374 | py | Python | pytorch_agents/common/type_aliases.py | AiForAlpha/pytorch_agents | 0151b16f0a8cf7131cede83c369cbc728435b3c2 | [
"MIT"
] | null | null | null | pytorch_agents/common/type_aliases.py | AiForAlpha/pytorch_agents | 0151b16f0a8cf7131cede83c369cbc728435b3c2 | [
"MIT"
] | null | null | null | pytorch_agents/common/type_aliases.py | AiForAlpha/pytorch_agents | 0151b16f0a8cf7131cede83c369cbc728435b3c2 | [
"MIT"
] | null | null | null | """Common aliases for type hints"""
from enum import Enum
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import gym
import numpy as np
import torch as th
from pytorch_agents.common import callbacks, vec_env
GymEnv = Union[gym.Env, vec_env.VecEnv]
GymObs = Union[Tuple, Dict[str, Any], np.ndarray, int]
GymStepReturn = Tuple[GymObs, float, bool, Dict]
TensorDict = Dict[str, th.Tensor]
OptimizerStateDict = Dict[str, Any]
MaybeCallback = Union[None, Callable, List[callbacks.BaseCallback], callbacks.BaseCallback]
# A schedule takes the remaining progress as input
# and ouputs a scalar (e.g. learning rate, clip range, ...)
Schedule = Callable[[float], float]
class RolloutBufferSamples(NamedTuple):
observations: th.Tensor
actions: th.Tensor
old_values: th.Tensor
old_log_prob: th.Tensor
advantages: th.Tensor
returns: th.Tensor
class ReplayBufferSamples(NamedTuple):
observations: th.Tensor
actions: th.Tensor
next_observations: th.Tensor
dones: th.Tensor
rewards: th.Tensor
class RolloutReturn(NamedTuple):
episode_reward: float
episode_timesteps: int
n_episodes: int
continue_training: bool
class TrainFrequencyUnit(Enum):
STEP = "step"
EPISODE = "episode"
class TrainFreq(NamedTuple):
frequency: int
unit: TrainFrequencyUnit # either "step" or "episode"
| 24.981818 | 91 | 0.737263 |
09bd8d7ee09a6034511b881b6be3e3aefd58c5c2 | 1,550 | py | Python | api/client/test/test_api_list_components_response.py | srishtipithadia/mlx | 2fb61a8840696c7ede77cd600caa8922178ec8b0 | [
"Apache-2.0"
] | null | null | null | api/client/test/test_api_list_components_response.py | srishtipithadia/mlx | 2fb61a8840696c7ede77cd600caa8922178ec8b0 | [
"Apache-2.0"
] | 1 | 2021-09-21T23:31:13.000Z | 2021-09-21T23:31:13.000Z | api/client/test/test_api_list_components_response.py | srishtipithadia/mlx | 2fb61a8840696c7ede77cd600caa8922178ec8b0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_components_response import ApiListComponentsResponse # noqa: E501
from swagger_client.rest import ApiException
class TestApiListComponentsResponse(unittest.TestCase):
"""ApiListComponentsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListComponentsResponse(self):
"""Test ApiListComponentsResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_list_components_response.ApiListComponentsResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.703704 | 110 | 0.736774 |
f0a9663dbe00ec7d427d9345d495b9ef6eb54c5c | 2,291 | py | Python | sncosmo/tests/test_magsystems.py | rbiswas4/sncosmo | 813b707044fd21e8e35e7a1cdc650b48417f0ebc | [
"BSD-3-Clause"
] | 1 | 2019-03-27T09:46:46.000Z | 2019-03-27T09:46:46.000Z | sncosmo/tests/test_magsystems.py | jasminelujia/sncosmo | 6ca3be6a52f7a096b874e181c21b93f711610f12 | [
"BSD-3-Clause"
] | null | null | null | sncosmo/tests/test_magsystems.py | jasminelujia/sncosmo | 6ca3be6a52f7a096b874e181c21b93f711610f12 | [
"BSD-3-Clause"
] | 1 | 2019-02-08T16:07:46.000Z | 2019-02-08T16:07:46.000Z | # Licensed under a 3-clause BSD style license - see LICENSES
import math
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from astropy import units as u
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import remote_data
import pytest
import sncosmo
def test_abmagsystem():
magsys = sncosmo.ABMagSystem()
m = magsys.band_flux_to_mag(1.0, 'bessellb')
f = magsys.band_mag_to_flux(m, 'bessellb')
assert_almost_equal(f, 1.0)
def test_spectralmagsystem():
"""Check that SpectralMagSystem matches ABMagSystem when the spectrum is
the same as AB."""
# construct a spectrum with same flux as AB: 3631 x 10^{-23} erg/s/cm^2/Hz
# Use a fine grid to reduce linear interpolation errors when integrating
# in Spectrum.bandflux().
wave = np.linspace(1000., 20000., 100000) # fine grid
flux = 3631.e-23 * np.ones_like(wave)
unit = u.erg / u.s / u.cm**2 / u.Hz
s = sncosmo.Spectrum(wave, flux, unit=unit)
magsys1 = sncosmo.SpectralMagSystem(s)
magsys2 = sncosmo.ABMagSystem()
assert_allclose(magsys1.zpbandflux('bessellb'),
magsys2.zpbandflux('bessellb'))
@remote_data
def test_csp_magsystem():
csp = sncosmo.get_magsystem('csp')
# filter zeropoints (copied from
# http://csp.obs.carnegiescience.edu/data/filters
# on 13 April 2017)
zps = {"cspu": 12.986,
"cspg": 15.111,
"cspr": 14.902,
"cspi": 14.535,
"cspb": 14.328,
"cspv3014": 14.437,
"cspv3009": 14.388,
"cspv9844": 14.439,
"cspys": 13.921,
"cspjs": 13.836,
"csphs": 13.510,
"cspk": 11.968,
"cspyd": 13.770,
"cspjd": 13.866,
"csphd": 13.502}
# The "zero point bandflux" should be the flux that corresponds to
# magnitude zero. So, 0 = zp - 2.5 log(F)
for band, zp in zps.items():
assert abs(2.5 * math.log10(csp.zpbandflux(band)) - zp) < 0.015
@remote_data
def test_compositemagsystem_band_error():
"""Test that CompositeMagSystem raises an error when band is
not in system."""
csp = sncosmo.get_magsystem('csp')
with pytest.raises(ValueError):
csp.zpbandflux('desi')
| 29.371795 | 78 | 0.638149 |
bb3aeb09c3aea8c544f490c4a77e8be464aa47c5 | 6,355 | py | Python | Image.py | darius-tan/Fourier-and-Images | b18eb800abbd35984244727a42f2b87596d37019 | [
"MIT"
] | 81 | 2018-07-10T16:32:48.000Z | 2021-11-25T13:23:01.000Z | Image.py | darius-tan/Fourier-and-Images | b18eb800abbd35984244727a42f2b87596d37019 | [
"MIT"
] | null | null | null | Image.py | darius-tan/Fourier-and-Images | b18eb800abbd35984244727a42f2b87596d37019 | [
"MIT"
] | 6 | 2018-07-12T03:43:50.000Z | 2021-11-25T16:15:33.000Z | import cv2
import numpy as np
# Image.find_order(contours)
from bisect import bisect
# Image.find_paths(contours)
from scipy.spatial.distance import cdist
from collections import defaultdict
class Image(object):
def __init__(self, img_loc, shape = None):
self.img = cv2.imread(img_loc)
if shape:
self.img = cv2.resize(self.img, shape)
def sort(self):
contours = self.find_contours()
return np.vstack([contours[idx][start::-1] if start is None and end is None and stride == -1
else contours[idx] if start is None and end is None and stride == 1
else contours[idx][:end:-1] if start is None and end is not None and stride == -1
else contours[idx][start:end:stride] for idx, (start, end, stride) in self.find_order(contours)])
def find_contours(self):
# img = cv2.GaussianBlur(self.img, (5,5), 0)
edges = cv2.Canny(self.img, 100, 255)
ret, thresh = cv2.threshold(edges, 127, 255, 0)
contours, __ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def find_order(self, contours):
# This function was written as recursive originally.
# This function obtains a dictionary of connections from find_paths(contours)
# and "recursively" goes through the dictionary to find the slice notations that connects all contours together.
order = []
stack = [(0, 0, 0)]
paths = self.find_paths(contours)
while stack:
cur_contour, cur_pos, original_pos = stack.pop(-1)
if paths[cur_contour]:
pos = bisect([start for _, (start,_) in paths[cur_contour]], cur_pos)
# Check connections to the left and then to the right
next_contour, (start, end) = paths[cur_contour].pop(pos-1 if pos>0 else 0)
# Order imitates slicing notation
# For example, (cur_pos, start+1, 1) indicates a slice of cur_pos:start+1:1
order.append((cur_contour, (cur_pos, start+1, 1) if start+1 > cur_pos else (cur_pos, start-1 if start>0 else None, -1)))
stack.append((cur_contour, start, original_pos))
if next_contour in paths:
stack.append((next_contour, end, end))
else:
order.append((next_contour, (end, None, -1)))
order.append((next_contour, (None, None, 1)))
order.append((next_contour, (None, end-1 if end > 0 else None, -1)))
else:
order.append((cur_contour, (cur_pos, None, 1)))
order.append((cur_contour, (None, original_pos-1 if original_pos > 0 else None, -1)))
return order
def find_paths(self, contours):
# This function first gets a distance matrix from cdist(points, points)
# Then consider a "blob" that contains contours[0] (all the points of contours[0])
# This function then uses that distance matrix to find the closest point to blob
# And then adding said closest point into the blob because it is now connected
# And then ignoring said closest point's distance to the blob and vice versa by setting the distance in the distance matrix to np.inf.
# Finally construct a dictionary of connections.
points = np.vstack(contours)
points = points.reshape((points.shape[0], 2))
dist = cdist(points, points)
len_arr = np.array([len(contour) for contour in contours], dtype = np.int_)
end_points = np.add.accumulate(len_arr)
start = 0
start_end = []
for end in end_points:
dist[start:end:, start:end:] = np.inf
start_end.append((start, end))
start = end
paths = defaultdict(list)
# temp_order keeps track of the order in temp_dist
# temp_start_end keeps track of the starts and ends of each contour in temp_dist
# temp_dist is a slice (in terms of rows) of the original distance matrix, mainly to reduce np.argmin calculations.
temp_order = [0]
temp_start_end = [start_end[0]]
temp_dist = dist[start_end[0][0]:start_end[0][1]]
# The first connection connects two contours, and the rest connects only one contour
while len(temp_order) < end_points.size:
row_min = np.argmin(temp_dist, axis = 0)
cols = np.indices(row_min.shape)
col_min = np.argmin(temp_dist[row_min, cols])
# row_min[col_min] gives the row min of temp_dist
temp_row, temp_col = row_min[col_min], col_min
temp_cur_contour = self.find_contour_index(temp_row, temp_start_end)
cur_contour = temp_order[temp_cur_contour]
# express row in terms of the index inside contours[cur_contour]
row = temp_row - temp_start_end[temp_cur_contour][0]
next_contour = self.find_contour_index(temp_col, start_end)
col = temp_col - start_end[next_contour][0]
paths[cur_contour].append((next_contour, (row, col)))
# Ignore the distance from connected points to other connected points
start, end = start_end[next_contour]
for order in temp_order:
new_start, new_end = start_end[order]
dist[new_start:new_end:, start:end:] = np.inf
dist[start:end:, new_start:new_end:] = np.inf
temp_order.append(next_contour)
temp_len_arr = np.array([len(contours[order]) for order in temp_order], dtype = np.int_)
temp_end_points = np.add.accumulate(temp_len_arr)
temp_start_end.append((temp_start_end[-1][-1], temp_start_end[-1][-1]+temp_len_arr[-1]))
temp_dist = dist[np.hstack([np.arange(start_end[order][0], start_end[order][1]) for order in temp_order])]
for contour in paths:
paths[contour].sort(key = lambda x: x[1][0])
return paths
def find_contour_index(self, idx, start_end):
for i, (start, end) in enumerate(start_end):
if start <= idx < end:
return i
return len(start_end) - 1
| 48.884615 | 148 | 0.610071 |
b5a5c3015dc88bb6819d3d28551a185ef11255b5 | 22,373 | py | Python | generate.py | tmercswims/tracker | 7db12af840f358ddde9114ed6f47ed6c1c65807d | [
"MIT"
] | null | null | null | generate.py | tmercswims/tracker | 7db12af840f358ddde9114ed6f47ed6c1c65807d | [
"MIT"
] | null | null | null | generate.py | tmercswims/tracker | 7db12af840f358ddde9114ed6f47ed6c1c65807d | [
"MIT"
] | null | null | null | from math import ceil, floor
import os
from turtle import onclick
import yaml
import re
import dominate
from dominate.tags import *
from dominate.util import raw
from more_itertools import peekable
doc = dominate.document(title="Roundtable Tracker")
doc.set_attribute('lang', 'en')
def to_snake_case(name):
name = "".join(name.split())
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower()
dropdowns = []
pages = []
with open('pages.yaml', 'r', encoding='utf-8') as pages_yaml:
yml = yaml.safe_load(pages_yaml)
for dropdown in yml['dropdowns']:
dropdown_ids = []
for page in dropdown['pages']:
with open(os.path.join('data', page), 'r', encoding='utf-8') as data:
yml = yaml.safe_load(data)
pages.append(yml)
dropdown_ids.append((yml['title'], yml['id']))
dropdowns.append((dropdown['name'], dropdown_ids))
page_ids = set()
section_ids = set()
for page in pages:
if page['id'] in page_ids:
print("Duplicate page id '" + page['id'] + "' found. All page ids must be unique.")
quit(1)
else:
page_ids.add(page['id'])
section_nums = set()
for section in page['sections']:
if section['id'] in section_ids:
print("Duplicate section id '" + section['id'] + "' found in page '" + page['id'] + "'. All section ids must be unique.")
quit(1)
else:
section_ids.add(section['id'])
if section['num'] in section_nums:
print("Duplicate section num '" + str(section['num']) + "' found in page '" + page['id'] + "'. All section nums must be unique.")
quit(1)
else:
section_nums.add(section['num'])
item_nums = set()
items = peekable(section['items'])
for item in items:
if not isinstance(item, list):
continue
if item[0] in item_nums:
print("Duplicate item num '" + str(item[0]) + "' in section '" + section['id'] + "' found in page '" + page['id'] + "'. All item nums must be unique within it's section.")
quit(1)
else:
item_nums.add(item[0])
if isinstance(items.peek([0])[0], list):
sub_item_nums = set()
item = next(items)
for subitem in item:
if subitem[0] in sub_item_nums:
print("Duplicate sub-item num '" + str(subitem[0]) + "' in section '" + section['id'] + "' found in page '" + page['id'] + "'. All item nums must be unique within it's section.")
quit(1)
else:
sub_item_nums.add(subitem[0])
with doc.head:
meta(charset="utf-8")
meta(name="viewport", content="width=device-width, initial-scale=1.0")
link(rel="shortcut icon", type="image/x-icon", href="img/favicon.ico?")
link(rel="apple-touch-icon-precomposed", href="img/favicon-152.png")
link(rel="mask-icon", href="img/pinned-tab-icon.svg", color="#000000")
meta(name="description", content="Cheat sheet for Elden Ring. Checklist of things to do, items to get etc.")
meta(name="author", content="Ben Lambeth")
meta(name="mobile-web-app-capable", content="yes")
link(href="css/bootstrap.min.css", rel="stylesheet", id="bootstrap")
link(rel="stylesheet", href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.8.1/font/bootstrap-icons.css")
link(href="css/main.css", rel="stylesheet")
with doc:
with nav(cls="navbar navbar-expand-md bg-dark navbar-dark"):
with div(cls="container-fluid"):
with div(cls="navbar-header"):
with button(type="button", cls="navbar-toggler", data_bs_toggle="collapse", data_bs_target="#nav-collapse", aria_expanded="false", aria_controls="nav-collapse", aria_label="Toggle navigation"):
span(cls="navbar-toggler-icon")
with div(cls="collapse navbar-collapse", id="nav-collapse"):
with ul(cls="nav navbar-nav mr-auto"):
with li(cls="nav-item"):
a(href="#tabMain", data_bs_toggle="tab", cls="nav-link hide-buttons").add(i(cls="bi bi-house-fill"))
for name, l in dropdowns:
with li(cls="dropdown nav-item"):
a(name, cls="nav-link dropdown-toggle", href="#", data_bs_toggle="dropdown", aria_haspopup="true", aria_expanded="false").add(span(cls="caret"))
with ul(cls="dropdown-menu"):
for guide in l:
li().add(a(guide[0], cls="dropdown-item show-buttons", href="#tab" + guide[1], data_bs_toggle="tab", data_bs_target="#tab" + guide[1]))
# li().add(a(guide[0], cls="dropdown-item", href="#tab" + guide[1], data_bs_toggle="tab", data_bs_target="#tab" + guide[1]))
with li(cls="nav-item"):
a(href="#tabOptions", data_bs_toggle="tab", cls="nav-link hide-buttons").add(i(cls="bi bi-gear-fill"), " Options")
with div(cls="container"):
with div(cls="row"):
with div(cls="col-md-12 text-center"):
h1("Roundtable Tracker", cls="mt-3")
text = p(cls="lead")
text += "Contribute to the guide at the "
text += a("Github Page", href="https://github.com/Roundtable-Hold/tracker")
with div(cls="tab-content gab-2"):
# Hide completed toggle
with div(id="btnHideCompleted", cls="fade mb-3"):
with div(cls="form-check form-switch"):
input_(cls="form-check-input", type="checkbox", id='toggleHideCompleted')
label("Hide Completed", cls="form-check-label", _for='toggleHideCompleted')
for page in pages:
with div(cls="tab-pane fade", id="tab" + page['id'], role="tabpanel"):
# Filter buttons
h = h2()
h += page['title']
h += span(id=page['id'] + "_overall_total")
with ul(cls="table_of_contents"):
for section in page['sections']:
with li():
a(section['title'], href="#" + section['id'])
span(id=page['id'] + "_nav_totals_" + str(section['num']))
with div(cls="input-group"):
input_(type="search", id=page['id'] + "_search", cls="form-control my-3", placeholder="Start typing to filter results...")
with div(id=page['id']+"_list"):
for section in page['sections']:
with h4(id=section['id'], cls="mt-1"):
with a(href="#" + section['id'] + "_col", data_bs_toggle="collapse", data_bs_target="#" + section['id'] + "_col", cls="btn btn-primary btn-sm me-2 collapse-button", role="button"):
i(cls='bi bi-chevron-up')
if 'link' in section:
a(section['title'], href=section['link'])
else:
span(section['title'])
span(id=page['id'] + "_totals_" + str(section['num']), cls="ms-2 mt-0")
if 'table' in section:
with div(id=section['id'] + "_col", cls="collapse show row", aria_expanded="true"):
if isinstance(section['table'], list):
table_cols = len(section['table'])
size = floor(12 / table_cols)
else:
table_cols = section['table']
size = floor(12 / table_cols)
items = peekable(section['items'])
if not isinstance(items.peek(), list):
item = next(items)
h5(item)
with ul(cls='list-group list-group-flush'):
if isinstance(section['table'], list):
with li(cls="list-group-item d-md-block d-none").add(div(cls="row form-check")):
with div(cls="col-auto"):
input_(cls="form-check-input invisible", type='checkbox')
with div(cls="col").add(div(cls="row")):
for idx, header in enumerate(section['table']):
if 'table_widths' in page:
col_size = str(page['table_widths'][idx])
else:
col_size = str(size)
div(cls="col-md-" + col_size).add(strong(header))
for item in items:
id = str(item[0])
with li(cls="list-group-item", data_id=page['id'] + '_' + str(section['num']) + '_' + id):
if not isinstance(item, list):
h5(item)
continue
with div(cls="row form-check checkbox"):
with div(cls="col-auto"):
input_(cls="form-check-input", type="checkbox", value="",
id=page['id'] + '_' + str(section['num']) + '_' + id)
with div(cls="col").add(div(cls="row")):
for pos in range(1, 1+table_cols):
if 'table_widths' in page:
col_size = str(page['table_widths'][pos-1])
else:
col_size = str(size)
with div(cls="col-md-" + col_size + ' col-xs-12'):
with label(cls="form-check-label item_content ms-0 ps-0", _for=page['id'] + '_' + str(section['num']) + '_' + id):
if isinstance(section['table'], list) and item[pos]:
strong(section['table'][pos-1] + ': ', cls="d-md-none d-block")
if item[pos]:
raw(item[pos])
else:
with div(id=section['id'] + "_col", cls="collapse show", aria_expanded="true"):
items = peekable(section['items'])
if not isinstance(items.peek(), list):
item = next(items)
h5(item)
u = ul(cls="list-group-flush")
for item in items:
if not isinstance(item, list):
h5(item)
u = ul(cls="list-group-flush")
continue
id = str(item[0])
with u.add(li(data_id=page['id'] + "_" + str(section['num']) + "_" + id, cls="list-group-item")):
with div(cls="form-check checkbox"):
input_(cls="form-check-input", type="checkbox", value="", id=page['id'] + '_' + str(section['num']) + '_' + id)
label(cls="form-check-label item_content", _for=page['id'] + '_' + str(section['num']) + '_' + id).add(raw(item[1]))
if isinstance(items.peek([0])[0], list):
item = next(items)
with u.add(ul(cls="list-group-flush")):
for subitem in item:
with li(data_id=page['id'] + "_" + str(section['num']) + "_" + id + "_" + str(subitem[0]), cls="list-group-item " + subitem[1]):
with div(cls="form-check checkbox"):
input_(cls="form-check-input", type="checkbox", value="", id=page['id'] + '_' + str(section['num']) + '_' + id + '_' + str(subitem[0]))
label(cls="form-check-label item_content", _for=page['id'] + '_' + str(section['num']) + '_' + id + '_' + str(subitem[0])).add(raw(subitem[1]))
with div(cls="tab-pane fade", id="tabMain"):
raw(
"""
<h3>Welcome to the Roundtable Tracker</h3>
<p>The comprehensive tracker for Elden Ring, made by completionists, for completionists.</p>
<p>This site is still a work in-progress. We are working on it every day.</p>
<h3>I have feedback, how can I contribute?</h3>
<p>You can visit the <a href="https://github.com/Roundtable-Hold/tracker">GitHub repository</a> and <a href="https://github.com/Roundtable-Hold/tracker/issues">report Issues</a> or create a fork and submit a Pull Request.</p>
<p>You can also reach out to Azy on reddit <a href="https://www.reddit.com/user/azy2/">/u/azy2</a> or Discord Azy#9592 or reach out to Quivorian on reddit <a href="https://www.reddit.com/user/quivorian">/u/quivorian</a> or Discord: Quivorian#6564</p>
<h3>Can I use this for multiple characters?</h3>
<p>Yup, use the profile selector and buttons in the options tab at the top of the page to setup multiple profiles.</p>
<h3>How does the checklist status get saved?</h3>
<p>The checklist is saved to your browser's local storage. Be careful when clearing your browser's cache as it will also destroy your saved progress.</p>
<h3>Thanks</h3>
<p>This sheet would not be possible without the incredible work already done by the team at Fextralife, the team behind MapGenie, fellow redditors /u/Athrek and /u/AcceptablePackMule, and the rest of the community.</p>
<p>The foundation of this website was based on <a href="https://github.com/ZKjellberg">ZKjellberg</a>'s <a href="https://github.com/ZKjellberg/dark-souls-3-cheat-sheet">Dark Soul's 3 Cheat Sheet</a> source code.</p>
<h3>DISCLAIMER</h3>
<p>This tracker is still a work in progress, and as such, we apologize for any issues that might come about as we update the checklist and iron out bugs.</p>
<p>We will do our best to ensure that such issues remain few and far between.</p>
""")
with div(cls="tab-pane fade", id="tabOptions"):
h2("Options")
with div(cls="row"):
div(cls="col col-xs-12 col-sm-4 col-md-6").add(h4("Theme selection:"))
div(cls="col col-xs-12 col-sm-4 col-md-6").add(select(cls="form-select", id="themes"))
with div(cls="row"):
div(cls="col col-xs-12 col-sm-4 col-md-6").add(h4("Profile management:"))
with div(cls="col col-xs-12 col-sm-4 col-md-6"):
with form(cls="form-inline input-group pull-right gap-1"):
select(cls="form-select", id="profiles")
with div(cls="btn-group"):
button("Add", cls="btn btn-primary", type="button", id="profileAdd")
with div(cls="btn-group"):
button("Edit", cls="btn btn-primary", type="button", id="profileEdit")
with div(cls="btn-group"):
button("NG+", cls="btn btn-primary", type="button", id="profileNG+")
with div(cls="row"):
div(cls="col col-xs-12 col-sm-4 col-md-6").add(h4("Data import/export:"))
with div(cls="col col-xs-12 col-sm-4 col-md-6"):
with form(cls="form-inline"):
with div(cls="btn-group pull-left"):
button("Import file", cls="btn btn-primary", type="button", id="profileImport")
with div(cls="btn-group pull-left"):
button("Export file", cls="btn btn-primary", type="button", id="profileExport")
with div(cls="btn-group pull-right"):
button("Import textbox", cls="btn btn-primary", type="button", id="profileImportText")
with div(cls="btn-group pull-right"):
button("Export clipboard", cls="btn btn-primary", type="button", id="profileExportText")
with div(cls="col col-xs-12"):
textarea(id="profileText", cls="form-control")
with div(id="profileModal", cls="modal fade", tabindex="-1", role="dialog"):
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
with div(cls="modal-header"):
h3("Profile", id="profileModalTitle", cls="modal-title")
button(type="button", cls="btn-close", data_bs_dismiss="modal", aria_label="Close")
with div(cls="modal-body"):
with form(cls="form-horizontal"):
with div(cls="control-group"):
label("Name", cls="control-label", _for="profileModalName")
div(cls="controls").add(input_(type="text", cls="form-control", id="profileModalName", placeholder="Enter Profile name"))
with div(cls="modal-footer"):
button("Close", id="profileModalClose", cls="btn btn-secondary", data_bs_dismiss="modal")
a("Add", href="#", id="profileModalAdd", cls="btn btn-primary", data_bs_dismiss="modal")
a("Update", href="#", id="profileModalUpdate", cls="btn btn-primary")
a("Delete", href="#", id="profileModalDelete", cls="btn btn-primary")
with div(id="NG+Modal", cls="modal fade", tabindex="-1", role="dialog"):
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
with div(cls="modal-header"):
h3("Begin next journey?", id="profileModalTitle", cls="modal-title")
button(type="button", cls="btn-close", data_bs_dismiss="modal", aria_label="Close")
div('If you begin the next journey, all progress on the "Playthrough" and "Misc" tabs of this profile will be reset, while achievement and collection checklists will be kept.', cls="modal-body")
with div(cls="modal-footer"):
a("No", href="#", cls="btn btn-primary", data_bs_dismiss="modal")
a("Yes", href="#", cls="btn btn-danger", id="NG+ModalYes")
div(cls="hiddenfile").add(input_(name="upload", type="file", id="fileInput"))
a(cls="btn btn-primary btn-sm fadingbutton back-to-top").add(raw("Back to Top "), span(cls="bi bi-arrow-up"))
script(src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js")
script(src="https://cdn.rawgit.com/andris9/jStorage/v0.4.12/jstorage.min.js")
script(src="js/bootstrap.bundle.min.js")
script(src="https://cdnjs.cloudflare.com/ajax/libs/jets/0.8.0/jets.min.js")
script(src="js/jquery.highlight.js")
script(src="js/main.js")
script(src="js/search.js")
raw("""
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-B7FMWDCTF5"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-B7FMWDCTF5');
</script>
""")
with open('index.html', 'w', encoding='utf-8', newline='\n') as index:
index.write(doc.render())
with open(os.path.join('js', 'search.js'), 'w', encoding='utf-8', newline='\n') as jsfile:
jsfile.writelines([
'(function($) {\n',
" 'use strict';\n",
' $(function() {\n',
' var jets = [new Jets({\n'
])
for i, page in enumerate(pages):
jsfile.writelines([
' searchTag: "#' + page['id'] + '_search",\n',
' contentTag: "#' + page['id'] + '_list ' + ('tbody"\n' if 'table' in page['sections'][0] else 'ul"\n'),
' }), new Jets({\n' if i < len(pages) - 1 else '})];\n'
])
for i, page in enumerate(pages):
jsfile.writelines([
' $("#' + page['id'] + '_search").keyup(function() {\n',
' $("#' + page['id'] + '_list").unhighlight();\n',
' $("#' + page['id'] + '_list").highlight($(this).val());\n',
' });\n'
])
jsfile.write('});\n')
jsfile.write('})( jQuery );\n')
| 64.29023 | 250 | 0.47611 |
61bc7234c7cb3d4c2220c480da50c2cb9845fcc3 | 985 | py | Python | tests/utils/matching/tokens.py | Wallyhs04/httpie | 6bdcdf1eba5a0b15e025388c84e34eecbe0394ee | [
"BSD-3-Clause"
] | 8,454 | 2020-08-13T10:50:14.000Z | 2022-03-31T21:20:51.000Z | tests/utils/matching/tokens.py | Wallyhs04/httpie | 6bdcdf1eba5a0b15e025388c84e34eecbe0394ee | [
"BSD-3-Clause"
] | 415 | 2020-08-13T11:23:54.000Z | 2022-03-31T15:23:42.000Z | tests/utils/matching/tokens.py | Wallyhs04/httpie | 6bdcdf1eba5a0b15e025388c84e34eecbe0394ee | [
"BSD-3-Clause"
] | 591 | 2020-08-13T19:38:58.000Z | 2022-03-28T20:44:19.000Z | from enum import Enum, auto
class Expect(Enum):
"""
Predefined token types we can expect in the output.
"""
REQUEST_HEADERS = auto()
RESPONSE_HEADERS = auto()
BODY = auto()
SEPARATOR = auto()
class ExpectSequence:
"""
Standard combined chunks. These predefined requests and responses assume a body.
"""
RAW_REQUEST = [
Expect.REQUEST_HEADERS,
Expect.BODY,
]
RAW_RESPONSE = [
Expect.RESPONSE_HEADERS,
Expect.BODY,
]
RAW_EXCHANGE = [
*RAW_REQUEST,
Expect.SEPARATOR, # Good choice?
*RAW_RESPONSE,
]
RAW_BODY = [
Expect.BODY,
]
TERMINAL_REQUEST = [
*RAW_REQUEST,
Expect.SEPARATOR,
]
TERMINAL_RESPONSE = [
*RAW_RESPONSE,
Expect.SEPARATOR,
]
TERMINAL_EXCHANGE = [
*TERMINAL_REQUEST,
*TERMINAL_RESPONSE,
]
TERMINAL_BODY = [
RAW_BODY,
Expect.SEPARATOR
]
| 18.942308 | 84 | 0.570558 |
6dab36561c8a81a854ae95c23c74fd9613a0ab69 | 44,445 | py | Python | peripheral/pio_11004/config/pio.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/pio_11004/config/pio.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/pio_11004/config/pio.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | # coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
import re
print("Loading Pin Manager for " + Variables.get("__PROCESSOR"))
global pioSymChannel
pioSymChannel = ["A", "B", "C", "D", "E"]
global uniquePinout
uniquePinout = 1
##package pinout map
global package
package = {}
global pin_map
pin_map = {}
global pin_position
pin_position = []
global sort_alphanumeric
global availablePinDictionary
availablePinDictionary = {}
registerNodeTemplate = "/avr-tools-device-file/modules/module@[name=\"{0}\"]/register-group@[name=\"{1}\"]/register@[name=\"{2}\"]"
slewRateControlPresent = coreComponent.createBooleanSymbol("PIO_SLEWR_PRESENT", None)
slewRateControlPresent.setVisible(False)
slewRateControlPresent.setDefaultValue(ATDF.getNode(registerNodeTemplate.format("PIO", "PIO", "PIO_SLEWR")) is not None)
driverControlPresent = coreComponent.createBooleanSymbol("PIO_DRIVER_PRESENT", None)
driverControlPresent.setVisible(False)
driverControlPresent.setDefaultValue(ATDF.getNode(registerNodeTemplate.format("PIO", "PIO", "PIO_DRIVER")) is not None)
###################################################################################################
########################### Callback functions for dependencies #################################
###################################################################################################
global getAvailablePins
# API used by core to return available pins to sender component
def getAvailablePins():
return availablePinDictionary
# Dependency Function to show or hide the warning message depending on Interrupt
def InterruptStatusWarning(symbol, event):
global portInterrupt
channelIndex = pioSymChannel.index((symbol.getID()).split("_")[1])
if portInterrupt[channelIndex].getValue() == True and Database.getSymbolValue("core", pioSymInterruptVectorUpdate[channelIndex]) == True:
symbol.setVisible(True)
else:
symbol.setVisible(False)
# Dependency Function to show or hide the warning message depending on Clock
def ClockStatusWarning(symbol, event):
if event["value"] == False:
symbol.setVisible(True)
else:
symbol.setVisible(False)
# Dependency Function to pass interrupt related info to Interrupt Manager.
# This function will be entered only by internal change happening to PORT channel interrupt, never by manual
# change because channel interrupt is not user configurable directly.
def pioInterruptControl(pioInterrupt, event):
i = []
# splitting of ID below is dependent on ID name, if ID name is changed, below code may need a change as well
# Split the id name by "_" and put all the split names in the list "i"
i = event["id"].split("_")
k = pioSymChannel.index(i[1])
if (event["value"] == True):
Database.setSymbolValue("core", pioSymInterruptVector[k], True, 1)
Database.setSymbolValue("core", pioSymInterruptHandler[k], "PIO" + i[1] + "_InterruptHandler", 1)
Database.setSymbolValue("core", pioSymInterruptHandlerLock[k], True, 1)
else :
Database.setSymbolValue("core", pioSymInterruptVector[k], False, 1)
Database.setSymbolValue("core", pioSymInterruptHandler[k], "PIO" + i[1] + "_Handler", 1)
Database.setSymbolValue("core", pioSymInterruptHandlerLock[k], False, 1)
def pinLatchCal(pin, event):
global pioSym_PIO_SODR
global pinDirection
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
SODR_Value = pioSym_PIO_SODR[channelIndex].getValue()
if event["value"] == "High":
SODR_Value |= 1 << bit_pos
else:
SODR_Value &= ~(1 << bit_pos)
pioSym_PIO_SODR[channelIndex].setValue(SODR_Value, 2)
def pinDirCal(pin, event):
global pioSym_PIO_OER
global pinChannel
global pinBitPosition
global pinLatch
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
OER_Value = pioSym_PIO_OER[channelIndex].getValue()
if event["value"] == "Out":
OER_Value |= 1 << bit_pos
else:
OER_Value &= ~(1 << bit_pos)
pioSym_PIO_OER[channelIndex].setValue(OER_Value, 2)
def pinFunctionCal(pType, pFunction):
global sysioPresent
global pioSym_PIO_PDR
global pioSym_PIO_ABCDSR1
global pioSym_PIO_ABCDSR2
global pioMatrixSym_CCFG_SYSIO
global pinChannel
global pinBitPosition
pin_num = int((pType.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
PDR_Value = pioSym_PIO_PDR[channelIndex].getValue()
ABCDSR1_Value = pioSym_PIO_ABCDSR1[channelIndex].getValue()
ABCDSR2_Value = pioSym_PIO_ABCDSR2[channelIndex].getValue()
if (pFunction["value"] == "A") or (pFunction["value"] == "B") or (pFunction["value"] == "C") or (pFunction["value"] == "D"):
PDR_Value |= 1 << bit_pos
if (pFunction["value"] == "A"):
ABCDSR1_Value &= ~(1 << bit_pos)
ABCDSR2_Value &= ~(1 << bit_pos)
elif (pFunction["value"] == "B"):
ABCDSR1_Value |= (1 << bit_pos)
ABCDSR2_Value &= ~(1 << bit_pos)
elif (pFunction["value"] == "C"):
ABCDSR1_Value &= ~(1 << bit_pos)
ABCDSR2_Value |= (1 << bit_pos)
else:
ABCDSR1_Value |= (1 << bit_pos)
ABCDSR2_Value |= (1 << bit_pos)
else:
ABCDSR1_Value &= ~(1 << bit_pos)
ABCDSR2_Value &= ~(1 << bit_pos)
PDR_Value &= ~(1 << bit_pos)
pioSym_PIO_PDR[channelIndex].setValue(PDR_Value, 2)
pioSym_PIO_ABCDSR1[channelIndex].setValue(ABCDSR1_Value, 2)
pioSym_PIO_ABCDSR2[channelIndex].setValue(ABCDSR2_Value, 2)
def pinInterruptCal(pin, event):
global pioSym_PIO_AIMER
global pioSym_PIO_LSR
global pioSym_PIO_REHLSR
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
boolValue = True
if event["value"] == "":
# if interrupt has been disabled for a particular pin, then see if is it disabled for all the pins of
# corresponding channel; if so, then uncheck corresponding port interrupt in GUI.
boolValue = False
for pinNumber in range(1, packagePinCount+1):
if portChannel == pinChannel[pinNumber-1].getValue():
if pinInterrupt[pinNumber-1].getValue() != "":
boolValue = True
break
portInterrupt[channelIndex].setValue(boolValue, 1)
bit_pos = pinBitPosition[pin_num-1].getValue()
AIMER_Value = pioSym_PIO_AIMER[channelIndex].getValue()
LSR_Value = pioSym_PIO_LSR[channelIndex].getValue()
REHLSR_Value = pioSym_PIO_REHLSR[channelIndex].getValue()
if (event["value"] == "Falling Edge") or (event["value"] == "Raising Edge") or (event["value"] == "Low Level") or (event["value"] == "High Level"):
AIMER_Value |= 1 << bit_pos
if (event["value"] == "Falling Edge"):
LSR_Value &= ~(1 << bit_pos)
REHLSR_Value &= ~(1 << bit_pos)
elif (event["value"] == "Raising Edge"):
LSR_Value &= ~(1 << bit_pos)
REHLSR_Value |= (1 << bit_pos)
elif (event["value"] == "Low Level"):
LSR_Value |= (1 << bit_pos)
REHLSR_Value &= ~(1 << bit_pos)
else:
LSR_Value |= (1 << bit_pos)
REHLSR_Value |= (1 << bit_pos)
else:
AIMER_Value &= ~(1 << bit_pos)
LSR_Value &= ~(1 << bit_pos)
REHLSR_Value &= ~(1 << bit_pos)
pioSym_PIO_LSR[channelIndex].setValue(LSR_Value, 2)
pioSym_PIO_REHLSR[channelIndex].setValue(REHLSR_Value, 2)
pioSym_PIO_AIMER[channelIndex].setValue(AIMER_Value, 2)
def pinOpenDrainCal(pin, event):
global pioSym_PIO_MDER
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
MDER_Value = pioSym_PIO_MDER[channelIndex].getValue()
if event["value"] == "True":
MDER_Value |= 1 << bit_pos
else:
MDER_Value &= ~(1 << bit_pos)
pioSym_PIO_MDER[channelIndex].setValue(MDER_Value, 2)
def pinPullUpCal(pin, event):
global pioSym_PIO_PUER
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
PUER_Value = pioSym_PIO_PUER[channelIndex].getValue()
if event["value"] == "True":
PUER_Value |= 1 << bit_pos
else:
PUER_Value &= ~(1 << bit_pos)
pioSym_PIO_PUER[channelIndex].setValue(PUER_Value, 2)
def pinPullDownCal(pin, event):
global pioSym_PIO_PPDEN
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
PPDEN_Value = pioSym_PIO_PPDEN[channelIndex].getValue()
if event["value"] == "True":
PPDEN_Value |= 1 << bit_pos
else:
PPDEN_Value &= ~(1 << bit_pos)
pioSym_PIO_PPDEN[channelIndex].setValue(PPDEN_Value, 2)
def pinFilterCal(pin, event):
global pioSym_PIO_IFER
global pioSym_PIO_IFSCER
global pinChannel
global pinBitPosition
pin_num = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pin_num-1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bit_pos = pinBitPosition[pin_num-1].getValue()
IFER_Value = pioSym_PIO_IFER[channelIndex].getValue()
IFSCER_Value = pioSym_PIO_IFSCER[channelIndex].getValue()
if (event["value"] == "Debounce Filter"):
IFSCER_Value |= 1 << bit_pos
IFER_Value |= 1 << bit_pos
elif (event["value"] == "Glitch Filter"):
IFER_Value |= 1 << bit_pos
IFSCER_Value &= ~(1 << bit_pos)
else:
IFSCER_Value &= ~(1 << bit_pos)
IFER_Value &= ~(1 << bit_pos)
pioSym_PIO_IFER[channelIndex].setValue(IFER_Value, 2)
pioSym_PIO_IFSCER[channelIndex].setValue(IFSCER_Value, 2)
def pinSlewRateControlCal(pin, event):
pinNumber = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pinNumber - 1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bitPosition = pinBitPosition[pinNumber - 1].getValue()
slewRateValue = pioSym_PIO_SLEWR[channelIndex].getValue()
if event["value"] == "True":
slewRateValue |= 1 << bitPosition
else:
slewRateValue &= ~(1 << bitPosition)
pioSym_PIO_SLEWR[channelIndex].setValue(slewRateValue, 0)
def pinDriverCal(pin, event):
pinNumber = int((pin.getID()).split("_")[1])
portChannel = pinChannel[pinNumber - 1].getValue()
if portChannel != "":
channelIndex = pioSymChannel.index(portChannel)
bitPosition = pinBitPosition[pinNumber - 1].getValue()
driverValue = pioSym_PIO_DRIVER[channelIndex].getValue()
if event["value"] == "High":
driverValue |= 1 << bitPosition
else:
driverValue &= ~(1 << bitPosition)
pioSym_PIO_DRIVER[channelIndex].setValue(driverValue, 0)
def packageChange(pinoutSymbol, pinout):
import re
global uniquePinout
global package
global pin_map
global pin_position
global pin
global pinChannel
global pinBitPosition
### No need to process if the device has only one pinout but multiple packages eg: TQFP, LQFP and QFN
if uniquePinout > 1:
pin_map = {}
pin_position = []
pinoutNode = ATDF.getNode('/avr-tools-device-file/pinouts/pinout@[name= "' + str(package.get(pinout["value"])) + '"]')
for id in range(0,len(pinoutNode.getChildren())):
if "BGA" in pinout["value"] or "WLCSP" in pinout["value"]:
pin_map[pinoutNode.getChildren()[id].getAttribute("position")] = pinoutNode.getChildren()[id].getAttribute("pad")
else:
pin_map[int(pinoutNode.getChildren()[id].getAttribute("position"))] = pinoutNode.getChildren()[id].getAttribute("pad")
if "BGA" in pinout["value"] or "WLCSP" in pinout["value"]:
## BGA package ID's are alphanumeric unlike TQFP special sorting required
pin_position = sort_alphanumeric(pin_map.keys())
else:
pin_position = sorted(pin_map.keys())
for pinNumber in range(0, len(pinoutNode.getChildren())):
pin[pinNumber].setLabel("Pin " + str(pin_position[pinNumber]))
pinBitPosition[pinNumber].setValue(-1, 2)
pinChannel[pinNumber].setValue("", 2)
if pin_map.get(pin_position[pinNumber]).startswith("P"):
pinBitPosition[pinNumber].setValue(int(re.findall('\d+', pin_map.get(pin_position[pinNumber]))[0]), 2)
pinChannel[pinNumber].setValue(pin_map.get(pin_position[pinNumber])[1], 2)
def sysIOConfigChange(symbol, event):
global pin_map
global sysIOConfigdict
# Find the pin number whose type has changed
pin = int(event["id"].split("_")[1])
# Find the pad associated with the pin
pad = pin_map.get(pin_position[pin - 1])
# check if pad needs SYSIO configuration
sysioPinCfg = sysIOConfigdict.get(pad)
if sysioPinCfg:
sysioNewVal = symbol.getValue()
sysioFunction = sysioPinCfg[0]
sysioMask = sysioPinCfg[1]
# If pin has function which is not sys_io, override the sysio behavior
if (event["value"] and (event["value"] != sysioFunction)):
sysioNewVal = (sysioNewVal | sysioMask)
# else leave the sysio function intact
else:
sysioNewVal = (sysioNewVal & ~sysioMask)
if sysioNewVal != symbol.getValue():
symbol.setValue(sysioNewVal)
###################################################################################################
######################################### Helper functions #######################################
###################################################################################################
def sort_alphanumeric(l):
import re
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
###################################################################################################
######################################### PIO Main Menu ##########################################
###################################################################################################
pioMenu = coreComponent.createMenuSymbol("PIO_MENU", None)
pioMenu.setLabel("Ports (PIO)")
pioMenu.setDescription("Configuration for PIO PLIB")
pioEnable = coreComponent.createBooleanSymbol("PIO_ENABLE", pioMenu)
pioEnable.setLabel("Use PIO PLIB?")
pioEnable.setDefaultValue(True)
pioEnable.setReadOnly(True)
# Needed to map port system APIs to PLIB APIs
pioSymAPI_Prefix = coreComponent.createStringSymbol("PORT_API_PREFIX", None)
pioSymAPI_Prefix.setDefaultValue("PIO")
pioSymAPI_Prefix.setVisible(False)
###################################################################################################
################################# Pin Configuration related code ##################################
###################################################################################################
global pin
pin = []
pinName = []
pinType = []
pinPeripheralFunction = []
global pinBitPosition
pinBitPosition = []
global pinChannel
pinChannel = []
global pinDirection
pinDirection = []
global pinLatch
pinLatch = []
pinOpenDrain = []
pinPullUp = []
pinPullDown = []
global pinInterrupt
pinInterrupt = []
pinGlitchFilter = []
pinFunctionTypelList = []
pinInterruptList = []
pinSlewRateList =[]
pinDriverList = []
# Build package-pinout map
packageNode = ATDF.getNode("/avr-tools-device-file/variants")
for id in range(0,len(packageNode.getChildren())):
package[packageNode.getChildren()[id].getAttribute("package")] = packageNode.getChildren()[id].getAttribute("pinout")
pioPackage = coreComponent.createComboSymbol("COMPONENT_PACKAGE", pioEnable, package.keys())
pioPackage.setLabel("Pin Package")
pioPackage.setReadOnly(True)
pioPackage.setDependencies(packageChange, ["COMPONENT_PACKAGE"])
## Find Number of unique pinouts
uniquePinout = len(set(package.values()))
global packagePinCount
packagePinCount = int(re.findall(r'\d+', package.keys()[0])[0])
pinConfiguration = coreComponent.createMenuSymbol("PIO_PIN_CONFIGURATION", pioEnable)
pinConfiguration.setLabel("Pin Configuration")
pinConfiguration.setDescription("Configuration for PIO Pins")
pinTotalPins = coreComponent.createIntegerSymbol("PIO_PIN_TOTAL" , pinConfiguration)
pinTotalPins.setVisible(False)
pinTotalPins.setDefaultValue(packagePinCount)
# Build pins position-pad map
pinoutNode = ATDF.getNode('/avr-tools-device-file/pinouts/pinout@[name= "' + str(package.get(pioPackage.getValue())) + '"]')
for id in range(0,len(pinoutNode.getChildren())):
if "BGA" in pioPackage.getValue() or "WLCSP" in pioPackage.getValue():
pin_map[pinoutNode.getChildren()[id].getAttribute("position")] = pinoutNode.getChildren()[id].getAttribute("pad")
else:
pin_map[int(pinoutNode.getChildren()[id].getAttribute("position"))] = pinoutNode.getChildren()[id].getAttribute("pad")
if "BGA" in pioPackage.getValue() or "WLCSP" in pioPackage.getValue():
pin_position = sort_alphanumeric(pin_map.keys())
else:
pin_position = sorted(pin_map.keys())
# Note that all the lists below starts from 0th index and goes till "packagePinCount-1"
# But actual pin numbers on the device starts from 1 (not from 0) and goes till "packagePinCount"
# that is why "pinNumber-1" is used to index the lists wherever applicable.
for pinNumber in range(1, packagePinCount + 1):
pin.append(pinNumber)
pin[pinNumber-1]= coreComponent.createMenuSymbol("PIO_PIN_CONFIGURATION" + str(pinNumber - 1), pinConfiguration)
pin[pinNumber-1].setLabel("Pin " + str(pin_position[pinNumber-1]))
pin[pinNumber-1].setDescription("Configuration for Pin " + str(pin_position[pinNumber-1]))
pinName.append(pinNumber)
pinName[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_FUNCTION_NAME", pin[pinNumber-1])
pinName[pinNumber-1].setLabel("Name")
pinName[pinNumber-1].setDefaultValue("")
pinName[pinNumber-1].setReadOnly(True)
pinPeripheralFunction.append(pinNumber)
pinPeripheralFunction[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PERIPHERAL_FUNCTION", pin[pinNumber-1])
pinPeripheralFunction[pinNumber-1].setLabel("Peripheral Selection")
pinPeripheralFunction[pinNumber-1].setReadOnly(True)
pinType.append(pinNumber)
pinType[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_FUNCTION_TYPE", pin[pinNumber-1])
pinType[pinNumber-1].setLabel("Type")
pinType[pinNumber-1].setReadOnly(True)
pinType[pinNumber-1].setDependencies(pinFunctionCal, ["PIN_" + str(pinNumber) + "_PERIPHERAL_FUNCTION"])
pinBitPosition.append(pinNumber)
pinBitPosition[pinNumber-1] = coreComponent.createIntegerSymbol("PIN_" + str(pinNumber) + "_PIO_PIN", pin[pinNumber-1])
pinBitPosition[pinNumber-1].setLabel("Bit Position")
pinBitPosition[pinNumber-1].setReadOnly(True)
pinChannel.append(pinNumber)
pinChannel[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PIO_CHANNEL", pin[pinNumber-1])
pinChannel[pinNumber-1].setLabel("Channel")
pinChannel[pinNumber-1].setDefaultValue("")
pinChannel[pinNumber-1].setReadOnly(True)
if pin_map.get(pin_position[pinNumber-1]).startswith("P"):
pinBitPosition[pinNumber-1].setDefaultValue(int(re.findall('\d+', pin_map.get(pin_position[pinNumber-1]))[0]))
pinChannel[pinNumber-1].setDefaultValue(pin_map.get(pin_position[pinNumber-1])[1])
availablePinDictionary[str(pinNumber)] = "P" + str(pinChannel[pinNumber-1].getValue()) + str(pinBitPosition[pinNumber-1].getValue())
pinDirection.append(pinNumber)
pinDirection[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_DIR", pin[pinNumber-1])
pinDirection[pinNumber-1].setLabel("Direction")
pinDirection[pinNumber-1].setReadOnly(True)
pinDirection[pinNumber-1].setDependencies(pinDirCal, ["PIN_" + str(pinNumber) + "_DIR" ])
pinLatch.append(pinNumber)
pinLatch[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_LAT", pin[pinNumber-1])
pinLatch[pinNumber-1].setLabel("Initial Latch Value")
pinLatch[pinNumber-1].setReadOnly(True)
pinLatch[pinNumber-1].setDefaultValue("")
pinLatch[pinNumber-1].setDependencies(pinLatchCal, ["PIN_" + str(pinNumber) + "_LAT"])
pinOpenDrain.append(pinNumber)
pinOpenDrain[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_OD", pin[pinNumber-1])
pinOpenDrain[pinNumber-1].setLabel("Open Drain")
pinOpenDrain[pinNumber-1].setReadOnly(True)
pinOpenDrain[pinNumber-1].setDependencies(pinOpenDrainCal, ["PIN_" + str(pinNumber) + "_OD"])
pinPullUp.append(pinNumber)
pinPullUp[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PU", pin[pinNumber-1])
pinPullUp[pinNumber-1].setLabel("Pull Up")
pinPullUp[pinNumber-1].setReadOnly(True)
pinPullUp[pinNumber-1].setDependencies(pinPullUpCal, ["PIN_" + str(pinNumber) + "_PU"])
pinPullDown.append(pinNumber)
pinPullDown[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PD", pin[pinNumber-1])
pinPullDown[pinNumber-1].setLabel("Pull Down")
pinPullDown[pinNumber-1].setReadOnly(True)
pinPullDown[pinNumber-1].setDependencies(pinPullDownCal, ["PIN_" + str(pinNumber) + "_PD"])
pinInterrupt.append(pinNumber)
# This symbol ID name is split and pin number is extracted and used inside "pinInterruptCal" function. so be careful while changing the name of this ID.
pinInterrupt[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PIO_INTERRUPT", pin[pinNumber-1])
pinInterrupt[pinNumber-1].setLabel("PIO Interrupt")
pinInterrupt[pinNumber-1].setReadOnly(True)
pinInterrupt[pinNumber-1].setDependencies(pinInterruptCal, ["PIN_" + str(pinNumber) + "_PIO_INTERRUPT"])
pinGlitchFilter.append(pinNumber)
pinGlitchFilter[pinNumber-1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_PIO_FILTER", pin[pinNumber-1])
pinGlitchFilter[pinNumber-1].setLabel("PIO Filter")
pinGlitchFilter[pinNumber-1].setReadOnly(True)
pinGlitchFilter[pinNumber-1].setDependencies(pinFilterCal, ["PIN_" + str(pinNumber) + "_PIO_FILTER"])
if slewRateControlPresent.getValue():
pinSlewRateList.append(pinNumber)
pinSlewRateList[pinNumber - 1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_SLEW_RATE", pin[pinNumber - 1])
pinSlewRateList[pinNumber - 1].setLabel("PIO Slew Rate Control")
pinSlewRateList[pinNumber - 1].setReadOnly(True)
pinSlewRateList[pinNumber - 1].setDependencies(pinSlewRateControlCal, ["PIN_" + str(pinNumber) + "_SLEW_RATE"])
if driverControlPresent.getValue():
pinDriverList.append(pinNumber)
pinDriverList[pinNumber - 1] = coreComponent.createStringSymbol("PIN_" + str(pinNumber) + "_DRIVER", pin[pinNumber - 1])
pinDriverList[pinNumber - 1].setLabel("PIO Drive")
pinDriverList[pinNumber - 1].setReadOnly(True)
pinDriverList[pinNumber - 1].setDependencies(pinDriverCal, ["PIN_" + str(pinNumber) + "_DRIVER"])
#list created only for dependency
pinFunctionTypelList.append(pinNumber)
pinFunctionTypelList[pinNumber-1] = "PIN_" + str(pinNumber) +"_FUNCTION_TYPE"
#list created only for dependency
pinInterruptList.append(pinNumber)
pinInterruptList[pinNumber-1] = "PIN_" + str(pinNumber) +"_PIO_INTERRUPT"
###################################################################################################
################################# PORT Configuration related code #################################
###################################################################################################
def activateInterrupt(symbol, event):
global interruptDependncy
active = False
for i in range(0, len(interruptDependncy)):
if Database.getSymbolValue("core", interruptDependncy[i]):
active = True
break
if active != symbol.getValue():
symbol.setValue(active, 2)
portConfiguration = coreComponent.createMenuSymbol("PIO_CONFIGURATION", pioEnable)
portConfiguration.setLabel("PIO Registers Configuration")
port = []
portInterruptList = []
global portInterrupt
portInterrupt = []
global pioSym_PIO_PDR
pioSym_PIO_PDR = []
global pioSym_PIO_ABCDSR1
pioSym_PIO_ABCDSR1 = []
global pioSym_PIO_ABCDSR2
pioSym_PIO_ABCDSR2 = []
global pioSym_PIO_AIMER
pioSym_PIO_AIMER = []
global pioSym_PIO_LSR
pioSym_PIO_LSR = []
global pioSym_PIO_REHLSR
pioSym_PIO_REHLSR = []
global pioSym_PIO_OER
pioSym_PIO_OER = []
global pioSym_PIO_PUER
pioSym_PIO_PUER = []
global pioSym_PIO_PPDEN
pioSym_PIO_PPDEN = []
global pioSym_PIO_MDER
pioSym_PIO_MDER = []
global pioSym_PIO_SODR
pioSym_PIO_SODR = []
global pioSym_PIO_IFSCER
pioSym_PIO_IFSCER = []
global pioSym_PIO_IFER
pioSym_PIO_IFER = []
pioSym_PIO_SCDR = []
global pioSym_PIO_SLEWR
pioSym_PIO_SLEWR = []
global pioSym_PIO_DRIVER
pioSym_PIO_DRIVER = []
global pioSymInterruptVector
pioSymInterruptVector = []
global pioSymInterruptHandler
pioSymInterruptHandler = []
global pioSymInterruptHandlerLock
pioSymInterruptHandlerLock = []
global pioSymInterruptVectorUpdate
pioSymInterruptVectorUpdate = []
pioSymClkEnComment = []
global pioSymIntEnComment
pioSymIntEnComment = []
global interruptDependncy
portAvailable = {}
for id in pioSymChannel:
node = ATDF.getNode("/avr-tools-device-file/devices/device/peripherals/module@[name=\"PIO\"]/instance@[name=\"" "PIO" + str(id) + "" "\"]")
if node != None:
print "Available"
portAvailable["PIO" + id] = True
else:
portAvailable["PIO" + id] = False
interruptDependncy = []
for portNumber in range(0, len(pioSymChannel)):
#Enable Peripheral clock for all the PORT Channels in Clock Manager
Database.setSymbolValue("core", "PIO" + str(pioSymChannel[portNumber]) + "_CLOCK_ENABLE", True, 1)
if portAvailable["PIO" + pioSymChannel[portNumber]]:
port.append(portNumber)
port[portNumber]= coreComponent.createMenuSymbol("PIO_CONFIGURATION" + str(portNumber), portConfiguration)
port[portNumber].setLabel("PIO " + pioSymChannel[portNumber] + " Configuration")
pioSym_PIO_SCDR.append(portNumber)
pioSym_PIO_SCDR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_SCDR_VALUE", port[portNumber])
pioSym_PIO_SCDR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_SCDR")
pioSym_PIO_SCDR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_SCDR[portNumber].setMin(0x0)
pioSym_PIO_SCDR[portNumber].setMax(0x00003FFF)
portInterrupt.append(portNumber)
portInterrupt[portNumber]= coreComponent.createBooleanSymbol("PIO_" + str(pioSymChannel[portNumber]) + "_INTERRUPT_USED", port[portNumber])
portInterrupt[portNumber].setLabel("Use Interrupt for PIO " + pioSymChannel[portNumber])
portInterrupt[portNumber].setDefaultValue(False)
portInterrupt[portNumber].setVisible(True)
portInterrupt[portNumber].setReadOnly(True)
interruptDependncy.append("PIO_" + str(pioSymChannel[portNumber]) + "_INTERRUPT_USED")
#list created only for dependency
portInterruptList.append(portNumber)
portInterruptList[portNumber] = "PIO_" + str(pioSymChannel[portNumber]) + "_INTERRUPT_USED"
pioSym_PIO_PDR.append(portNumber)
pioSym_PIO_PDR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_PDR_VALUE", port[portNumber])
pioSym_PIO_PDR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_PDR")
pioSym_PIO_PDR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_PDR[portNumber].setReadOnly(True)
pioSym_PIO_ABCDSR1.append(portNumber)
pioSym_PIO_ABCDSR1[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_ABCDSR1_VALUE", port[portNumber])
pioSym_PIO_ABCDSR1[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_ABCDSR1")
pioSym_PIO_ABCDSR1[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_ABCDSR1[portNumber].setReadOnly(True)
pioSym_PIO_ABCDSR2.append(portNumber)
pioSym_PIO_ABCDSR2[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_ABCDSR2_VALUE", port[portNumber])
pioSym_PIO_ABCDSR2[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_ABCDSR2")
pioSym_PIO_ABCDSR2[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_ABCDSR2[portNumber].setReadOnly(True)
pioSym_PIO_OER.append(portNumber)
pioSym_PIO_OER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_OER_VALUE", port[portNumber])
pioSym_PIO_OER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_OER")
pioSym_PIO_OER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_OER[portNumber].setReadOnly(True)
pioSym_PIO_SODR.append(portNumber)
pioSym_PIO_SODR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_SODR_VALUE", port[portNumber])
pioSym_PIO_SODR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_SODR")
pioSym_PIO_SODR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_SODR[portNumber].setReadOnly(True)
pioSym_PIO_AIMER.append(portNumber)
pioSym_PIO_AIMER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_AIMER_VALUE", port[portNumber])
pioSym_PIO_AIMER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_AIMER")
pioSym_PIO_AIMER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_AIMER[portNumber].setReadOnly(True)
pioSym_PIO_LSR.append(portNumber)
pioSym_PIO_LSR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_LSR_VALUE", port[portNumber])
pioSym_PIO_LSR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_LSR")
pioSym_PIO_LSR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_LSR[portNumber].setReadOnly(True)
pioSym_PIO_REHLSR.append(portNumber)
pioSym_PIO_REHLSR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_REHLSR_VALUE", port[portNumber])
pioSym_PIO_REHLSR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_REHLSR")
pioSym_PIO_REHLSR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_REHLSR[portNumber].setReadOnly(True)
pioSym_PIO_PUER.append(portNumber)
pioSym_PIO_PUER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_PUER_VALUE", port[portNumber])
pioSym_PIO_PUER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_PUER")
pioSym_PIO_PUER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_PUER[portNumber].setReadOnly(True)
pioSym_PIO_PPDEN.append(portNumber)
pioSym_PIO_PPDEN[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_PPDEN_VALUE", port[portNumber])
pioSym_PIO_PPDEN[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_PPDEN")
pioSym_PIO_PPDEN[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_PPDEN[portNumber].setReadOnly(True)
pioSym_PIO_MDER.append(portNumber)
pioSym_PIO_MDER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_MDER_VALUE", port[portNumber])
pioSym_PIO_MDER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_MDER")
pioSym_PIO_MDER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_MDER[portNumber].setReadOnly(True)
pioSym_PIO_IFER.append(portNumber)
pioSym_PIO_IFER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_IFER_VALUE", port[portNumber])
pioSym_PIO_IFER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_IFER")
pioSym_PIO_IFER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_IFER[portNumber].setReadOnly(True)
pioSym_PIO_IFSCER.append(portNumber)
pioSym_PIO_IFSCER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_IFSCER_VALUE", port[portNumber])
pioSym_PIO_IFSCER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_IFSCER")
pioSym_PIO_IFSCER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_IFSCER[portNumber].setReadOnly(True)
if slewRateControlPresent.getValue():
pioSym_PIO_SLEWR.append(portNumber)
pioSym_PIO_SLEWR[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_SLEWR_VALUE", port[portNumber])
pioSym_PIO_SLEWR[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_SLEWR")
pioSym_PIO_SLEWR[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_SLEWR[portNumber].setReadOnly(True)
if driverControlPresent.getValue():
pioSym_PIO_DRIVER.append(portNumber)
pioSym_PIO_DRIVER[portNumber] = coreComponent.createHexSymbol("PIO" + str(pioSymChannel[portNumber]) + "_DRIVER_VALUE", port[portNumber])
pioSym_PIO_DRIVER[portNumber].setLabel("PIO" + str(pioSymChannel[portNumber]) + "_DRIVER")
pioSym_PIO_DRIVER[portNumber].setDefaultValue(0x00000000)
pioSym_PIO_DRIVER[portNumber].setReadOnly(True)
#symbols and variables for interrupt handling
pioSymInterruptVector.append(portNumber)
pioSymInterruptVector[portNumber] = "PIO" + str(pioSymChannel[portNumber]) + "_INTERRUPT_ENABLE"
pioSymInterruptHandler.append(portNumber)
pioSymInterruptHandler[portNumber] = "PIO" + str(pioSymChannel[portNumber]) + "_INTERRUPT_HANDLER"
pioSymInterruptHandlerLock.append(portNumber)
pioSymInterruptHandlerLock[portNumber] = "PIO" + str(pioSymChannel[portNumber]) + "_INTERRUPT_HANDLER_LOCK"
pioSymInterruptVectorUpdate.append(portNumber)
pioSymInterruptVectorUpdate[portNumber] = "PIO" + str(pioSymChannel[portNumber]) + "_INTERRUPT_ENABLE_UPDATE"
# Dependency Status for interrupt
pioSymIntEnComment.append(portNumber)
pioSymIntEnComment[portNumber] = coreComponent.createCommentSymbol("PIO_" + str(pioSymChannel[portNumber]) + "_NVIC_ENABLE_COMMENT", pioMenu)
pioSymIntEnComment[portNumber].setVisible(False)
pioSymIntEnComment[portNumber].setLabel("Warning!!! PIO" + str(pioSymChannel[portNumber]) + " Interrupt is Disabled in Interrupt Manager")
pioSymIntEnComment[portNumber].setDependencies(InterruptStatusWarning, ["core." + pioSymInterruptVectorUpdate[portNumber], "PIO_" + str(pioSymChannel[portNumber]) + "_INTERRUPT_USED"])
# Dependency Status for clock
pioSymClkEnComment.append(portNumber)
pioSymClkEnComment[portNumber] = coreComponent.createCommentSymbol("PIO_" + str(pioSymChannel[portNumber]) + "_CLK_ENABLE_COMMENT", pioMenu)
pioSymClkEnComment[portNumber].setVisible(False)
pioSymClkEnComment[portNumber].setLabel("Warning!!! PIO" + str(pioSymChannel[portNumber]) + " Peripheral Clock is Disabled in Clock Manager")
pioSymClkEnComment[portNumber].setDependencies(ClockStatusWarning, ["core.PIO" + str(pioSymChannel[portNumber]) + "_CLOCK_ENABLE"])
else:
port.append(portNumber)
pioSym_PIO_SCDR.append(portNumber)
portInterrupt.append(portNumber)
#list created only for dependency
portInterruptList.append("")
pioSym_PIO_PDR.append(portNumber)
pioSym_PIO_ABCDSR1.append(portNumber)
pioSym_PIO_ABCDSR2.append(portNumber)
pioSym_PIO_OER.append(portNumber)
pioSym_PIO_SODR.append(portNumber)
pioSym_PIO_AIMER.append(portNumber)
pioSym_PIO_LSR.append(portNumber)
pioSym_PIO_REHLSR.append(portNumber)
pioSym_PIO_PUER.append(portNumber)
pioSym_PIO_PPDEN.append(portNumber)
pioSym_PIO_MDER.append(portNumber)
pioSym_PIO_IFER.append(portNumber)
pioSym_PIO_IFSCER.append(portNumber)
if slewRateControlPresent.getValue():
pioSym_PIO_SLEWR.append(portNumber)
if driverControlPresent.getValue():
pioSym_PIO_DRIVER.append(portNumber)
#symbols and variables for interrupt handling
pioSymInterruptVector.append(portNumber)
pioSymInterruptHandler.append(portNumber)
pioSymInterruptHandlerLock.append(portNumber)
pioSymInterruptVectorUpdate.append(portNumber)
# Dependency Status for interrupt
pioSymIntEnComment.append(portNumber)
# Dependency Status for clock
pioSymClkEnComment.append(portNumber)
interruptActive = coreComponent.createBooleanSymbol("INTERRUPT_ACTIVE", portConfiguration)
interruptActive.setDefaultValue(False)
interruptActive.setVisible(False)
interruptActive.setDependencies(activateInterrupt, interruptDependncy)
# Interrupt Dynamic settings
pioSymInterruptControl = coreComponent.createBooleanSymbol("NVIC_PIO_ENABLE", None)
pioSymInterruptControl.setDependencies(pioInterruptControl, portInterruptList)
pioSymInterruptControl.setVisible(False)
###################################################################################################
################################# SYS IO related code ############################################
###################################################################################################
global sysIOConfigdict
matrixName, sysioRegName, sysIOConfigdict = getArchSYSIOInformation()
if matrixName is not None:
pioSymMatrixName = coreComponent.createStringSymbol("MATRIX_NAME", None)
pioSymMatrixName.setVisible(False)
pioSymMatrixName.setDefaultValue(matrixName)
if sysioRegName is not None:
pioSymSysIORegName = coreComponent.createStringSymbol("SYSIO_REG_NAME", None)
pioSymSysIORegName.setVisible(False)
pioSymSysIORegName.setDefaultValue(sysioRegName)
if sysIOConfigdict is not None:
# Note: all sysio config registers are not named as CCFG_SYSIO, symbol name is retained for backward compatibility
pioSymSysIORegVal = coreComponent.createHexSymbol("PIO_CCFG_SYSIO_VALUE", portConfiguration)
pioSymSysIORegVal.setLabel("CCFG_SYSIO")
pioSymSysIORegVal.setDescription("System Pins as GPIO")
pioSymSysIORegVal.setDefaultValue(0x00000000)
pioSymSysIORegVal.setVisible(False)
pioSymSysIORegVal.setDependencies(sysIOConfigChange, pinFunctionTypelList)
###################################################################################################
####################################### Code Generation ##########################################
###################################################################################################
configName = Variables.get("__CONFIGURATION_NAME")
pioHeaderFile = coreComponent.createFileSymbol("PIO_HEADER", None)
pioHeaderFile.setSourcePath("../peripheral/pio_11004/templates/plib_pio.h.ftl")
pioHeaderFile.setOutputName("plib_pio.h")
pioHeaderFile.setDestPath("/peripheral/pio/")
pioHeaderFile.setProjectPath("config/" + configName +"/peripheral/pio/")
pioHeaderFile.setType("HEADER")
pioHeaderFile.setMarkup(True)
pioSource1File = coreComponent.createFileSymbol("PIO_SOURCE", None)
pioSource1File.setSourcePath("../peripheral/pio_11004/templates/plib_pio.c.ftl")
pioSource1File.setOutputName("plib_pio.c")
pioSource1File.setDestPath("/peripheral/pio/")
pioSource1File.setProjectPath("config/" + configName +"/peripheral/pio/")
pioSource1File.setType("SOURCE")
pioSource1File.setMarkup(True)
pioSystemInitFile = coreComponent.createFileSymbol("PIO_INIT", None)
pioSystemInitFile.setType("STRING")
pioSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_CORE")
pioSystemInitFile.setSourcePath("../peripheral/pio_11004/templates/system/initialization.c.ftl")
pioSystemInitFile.setMarkup(True)
pioSystemDefFile = coreComponent.createFileSymbol("PIO_DEF", None)
pioSystemDefFile.setType("STRING")
pioSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
pioSystemDefFile.setSourcePath("../peripheral/pio_11004/templates/system/definitions.h.ftl")
pioSystemDefFile.setMarkup(True)
bspIncludeFile = coreComponent.createFileSymbol("PIO_BSP_H", None)
bspIncludeFile.setType("STRING")
bspIncludeFile.setOutputName("core.LIST_BSP_MACRO_INCLUDES")
bspIncludeFile.setSourcePath("../peripheral/pio_11004/templates/plib_pio_bsp.h.ftl")
bspIncludeFile.setMarkup(True)
bspIncludeFile = coreComponent.createFileSymbol("PIO_BSP_C", None)
bspIncludeFile.setType("STRING")
bspIncludeFile.setOutputName("core.LIST_BSP_INITIALIZATION")
bspIncludeFile.setSourcePath("../peripheral/pio_11004/templates/plib_pio_bsp.c.ftl")
bspIncludeFile.setMarkup(True)
sysPortIncludeFile = coreComponent.createFileSymbol("PIO_SYSPORT_H", None)
sysPortIncludeFile.setType("STRING")
sysPortIncludeFile.setOutputName("core.LIST_SYS_PORT_INCLUDES")
sysPortIncludeFile.setSourcePath("../peripheral/pio_11004/templates/plib_pio_sysport.h.ftl")
sysPortIncludeFile.setMarkup(True)
| 45.7724 | 192 | 0.682506 |
724528d99cf32ac5c770c052145edb962fa73047 | 2,052 | py | Python | paddlevideo/modeling/framework/recognizers/recognizer_gcn.py | zpc-666/CCF_BDCI_Competition | 9914513248c641aa4b7b70acabfee2ed0c8e2525 | [
"Apache-2.0"
] | 1 | 2022-03-27T04:11:55.000Z | 2022-03-27T04:11:55.000Z | paddlevideo/modeling/framework/recognizers/recognizer_gcn.py | zpc-666/CCF_BDCI_Competition | 9914513248c641aa4b7b70acabfee2ed0c8e2525 | [
"Apache-2.0"
] | 1 | 2021-12-03T07:39:37.000Z | 2022-01-21T03:02:43.000Z | paddlevideo/modeling/framework/recognizers/recognizer_gcn.py | zpc-666/CCF_BDCI_Competition | 9914513248c641aa4b7b70acabfee2ed0c8e2525 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ...registry import RECOGNIZERS
from .base import BaseRecognizer
from paddlevideo.utils import get_logger
logger = get_logger("paddlevideo")
@RECOGNIZERS.register()
class RecognizerGCN(BaseRecognizer):
"""GCN Recognizer model framework.
"""
def forward_net(self, data):
"""Define how the model is going to run, from input to output.
"""
feature = self.backbone(data)
cls_score = self.head(feature)
return cls_score
def train_step(self, data_batch):
"""Training step.
"""
data = data_batch[0]
label = data_batch[1:]
# call forward
cls_score = self.forward_net(data)
loss_metrics = self.head.loss(cls_score, label)
return loss_metrics
def val_step(self, data_batch):
"""Validating setp.
"""
data = data_batch[0]
label = data_batch[1:]
# call forward
cls_score = self.forward_net(data)
loss_metrics = self.head.loss(cls_score, label, valid_mode=True)
return loss_metrics, cls_score, label[0]
def test_step(self, data_batch):
"""Test step.
"""
data = data_batch[0]
# call forward
cls_score = self.forward_net(data)
return cls_score
def infer_step(self, data_batch):
"""Infer step.
"""
data = data_batch[0]
# call forward
cls_score = self.forward_net(data)
return cls_score
| 29.314286 | 75 | 0.62232 |
9c949c6dd774cb97de3307c91b07dcd27dbf6213 | 4,526 | py | Python | pyocd/utility/progress.py | laot007/RTTView | 2159ef15455d79c67e93a78219b688e4f1b5454c | [
"MIT"
] | 57 | 2019-01-05T08:55:16.000Z | 2021-06-05T09:28:30.000Z | pyocd/utility/progress.py | laot007/RTTView | 2159ef15455d79c67e93a78219b688e4f1b5454c | [
"MIT"
] | 2 | 2021-11-25T02:34:00.000Z | 2022-01-19T12:04:48.000Z | pyocd/utility/progress.py | laot007/RTTView | 2159ef15455d79c67e93a78219b688e4f1b5454c | [
"MIT"
] | 28 | 2019-03-29T00:47:12.000Z | 2021-04-07T13:58:34.000Z | # pyOCD debugger
# Copyright (c) 2017-2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
log = logging.getLogger('progress')
class ProgressReport(object):
"""!
@brief Base progress report class.
This base class implements the logic but no output.
"""
def __init__(self, file=None):
self._file = file or sys.stdout
self.prev_progress = 0
self.backwards_progress = False
self.done = False
self.last = 0
def __call__(self, progress):
assert progress >= 0.0
# assert progress <= 1.0 # TODO restore this assert when the progress > 1 bug is fixed
# assert (progress == 0 and self.prev_progress == 1.0) or (progress >= self.prev_progress)
if progress > 1.0:
log.debug("progress out of bounds: %.3f", progress)
# Reset state on 0.0
if progress == 0.0:
self._start()
# Check for backwards progress
if progress < self.prev_progress:
self.backwards_progress = True
self.prev_progress = progress
# print progress bar
if not self.done:
self._update(progress)
# Finish on 1.0
if progress >= 1.0:
self._finish()
if self.backwards_progress:
log.warning("Progress went backwards!")
def _start(self):
self.prev_progress = 0
self.backwards_progress = False
self.done = False
self.last = 0
def _update(self, progress):
raise NotImplemented()
def _finish(self):
raise NotImplemented()
class ProgressReportTTY(ProgressReport):
"""!
@brief Progress report subclass for TTYs.
The progress bar is fully redrawn onscreen as progress is updated to give the
impression of animation.
"""
## These width constants can't be changed yet without changing the code below to match.
WIDTH = 20
def _update(self, progress):
self._file.write('\r')
i = int(progress * self.WIDTH)
self._file.write("[%-20s] %3d%%" % ('=' * i, round(progress * 100)))
self._file.flush()
def _finish(self):
self.done = True
self._file.write("\n")
class ProgressReportNoTTY(ProgressReport):
"""!
@brief Progress report subclass for non-TTY output.
A simpler progress bar is used than for the TTY version. Only the difference between
the previous and current progress is drawn for each update, making the output suitable
for piping to a file or similar output.
"""
## These width constants can't be changed yet without changing the code below to match.
WIDTH = 40
def _start(self):
super(ProgressReportNoTTY, self)._start()
self._file.write('[' + '---|' * 9 + '----]\n[')
self._file.flush()
def _update(self, progress):
i = int(progress * self.WIDTH)
delta = i - self.last
self._file.write('=' * delta)
self._file.flush()
self.last = i
def _finish(self):
self.done = True
self._file.write("]\n")
self._file.flush()
def print_progress(file=None):
"""!
@brief Progress printer factory.
This factory function checks whether the output file is a TTY, and instantiates the
appropriate subclass of ProgressReport.
@param file The output file. Optional. If not provided, or if set to None, then sys.stdout
will be used automatically.
"""
if file is None:
file = sys.stdout
try:
istty = os.isatty(file.fileno())
except (OSError, AttributeError):
# Either the file doesn't have a fileno method, or calling it returned an
# error. In either case, just assume we're not connected to a TTY.
istty = False
klass = ProgressReportTTY if istty else ProgressReportNoTTY
return klass(file)
| 30.375839 | 98 | 0.628369 |
35284a0bfd17654d0ad3c398f2e2b8f62fdbebfa | 2,209 | py | Python | A_source_code/carbon/code/general_path.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/general_path.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/general_path.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
#print("Import general_path.")
import os
import sys
# Read the name of the user for the personal version of modules.
name = 'carbon'
if (name == None):
print("***** ERROR ******")
print("Environment parameter DGNM_USER is not set.")
sys.exit(1)
#print("NAME: ",name)
# Read the environment parameter with the rootdirectory
root = os.path.dirname(os.path.abspath(__file__))
if (root == None):
print("***** ERROR ******")
print("Environment parameter DGNM_ROOT is not set.")
sys.exit(1)
if (not os.path.isdir(root)):
print("***** ERROR ******")
print("Environment parameter DGNM_ROOT is not set correctly.")
print("Environment parameter DGNM_ROOT found: ",root)
sys.exit(1)
#print("ROOT: ",root)
# Read the environment parameter with the generalcode directory
generalcode = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'generalcode', 'trunk')
if (generalcode == None):
print("***** ERROR ******")
print("Environment parameter DGNM_GENERALCODE is not set.")
sys.exit(1)
if (not os.path.isdir(generalcode)):
print("***** ERROR ******")
print("Environment parameter DGNM_GENERALCODE is not set correctly.")
print("Environment parameter DGNM_GENERALCODE found: ",generalcode)
sys.exit(1)
#print("GENERALCODE: ",generalcode)
# Set the generalcode directory in the python path
path = generalcode
if os.path.exists(path):
sys.path.insert(0, path)
print(path + " is added to the python search path for modules.")
# Set the core directory in the python path
path = os.path.join(root,'..', '..', 'core')
print(path)
if os.path.exists(path):
sys.path.insert(0, path)
print(path + " is added to the python search path for modules.")
# Set the personal directory in the python path
path = os.path.join(root,'..', name,'code')
if os.path.exists(path):
sys.path.insert(0, path)
print(path + " is added to the python search path for modules.")
| 35.629032 | 106 | 0.646899 |
e60abad02a0202523989712954c54041d5bb5767 | 8,038 | py | Python | torchgeo/models/changestar.py | remtav/torchgeo | d06b103f81edec4f4e0d13ccd621d318364679a2 | [
"MIT"
] | null | null | null | torchgeo/models/changestar.py | remtav/torchgeo | d06b103f81edec4f4e0d13ccd621d318364679a2 | [
"MIT"
] | null | null | null | torchgeo/models/changestar.py | remtav/torchgeo | d06b103f81edec4f4e0d13ccd621d318364679a2 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""ChangeStar implementations."""
from typing import Dict, List
import torch
import torch.nn as nn
from einops import rearrange
from torch import Tensor
from torch.nn.modules import Module
from .farseg import FarSeg
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Module.__module__ = "torch.nn"
class ChangeMixin(Module):
"""This module enables any segmentation model to detect binary change.
The common usage is to attach this module on a segmentation model without the
classification head.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2108.07002
"""
def __init__(
self,
in_channels: int = 128 * 2,
inner_channels: int = 16,
num_convs: int = 4,
scale_factor: float = 4.0,
):
"""Initializes a new ChangeMixin module.
Args:
in_channels: sum of channels of bitemporal feature maps
inner_channels: number of channels of inner feature maps
num_convs: number of convolution blocks
scale_factor: number of upsampling factor
"""
super().__init__()
layers: List[Module] = [
nn.modules.Sequential(
nn.modules.Conv2d(in_channels, inner_channels, 3, 1, 1),
nn.modules.BatchNorm2d(inner_channels), # type: ignore[no-untyped-call]
nn.modules.ReLU(True),
)
]
layers += [
nn.modules.Sequential(
nn.modules.Conv2d(inner_channels, inner_channels, 3, 1, 1),
nn.modules.BatchNorm2d(inner_channels), # type: ignore[no-untyped-call]
nn.modules.ReLU(True),
)
for _ in range(num_convs - 1)
]
cls_layer = nn.modules.Conv2d(inner_channels, 1, 3, 1, 1)
layers.append(cls_layer)
layers.append(nn.modules.UpsamplingBilinear2d(scale_factor=scale_factor))
self.convs = nn.modules.Sequential(*layers)
def forward(self, bi_feature: Tensor) -> List[Tensor]:
"""Forward pass of the model.
Args:
bi_feature: input bitemporal feature maps of shape [b, t, c, h, w]
Returns:
a list of bidirected output predictions
"""
batch_size = bi_feature.size(0)
t1t2 = torch.cat([bi_feature[:, 0, :, :, :], bi_feature[:, 1, :, :, :]], dim=1)
t2t1 = torch.cat([bi_feature[:, 1, :, :, :], bi_feature[:, 0, :, :, :]], dim=1)
c1221 = self.convs(torch.cat([t1t2, t2t1], dim=0))
c12, c21 = torch.split(
c1221, batch_size, dim=0
) # type: ignore[no-untyped-call]
return [c12, c21]
class ChangeStar(Module):
"""The base class of the network architecture of ChangeStar.
ChangeStar is composed of an any segmentation model and a ChangeMixin module.
This model is mainly used for binary/multi-class change detection under bitemporal
supervision and single-temporal supervision. It features the property of
segmentation architecture reusing, which is helpful to integrate advanced dense
prediction (e.g., semantic segmentation) network architecture into change detection.
For multi-class change detection, semantic change prediction can be inferred by a
binary change prediction from the ChangeMixin module and two semantic predictions
from the Segmentation model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2108.07002
"""
def __init__(
self,
dense_feature_extractor: Module,
seg_classifier: Module,
changemixin: ChangeMixin,
inference_mode: str = "t1t2",
) -> None:
"""Initializes a new ChangeStar model.
Args:
dense_feature_extractor: module for dense feature extraction, typically a
semantic segmentation model without semantic segmentation head.
seg_classifier: semantic segmentation head, typically a convolutional layer
followed by an upsampling layer.
changemixin: :class:`torchgeo.models.ChangeMixin` module
inference_mode: name of inference mode ``'t1t2'`` | ``'t2t1'`` | ``'mean'``.
``'t1t2'``: concatenate bitemporal features in the order of t1->t2;
``'t2t1'``: concatenate bitemporal features in the order of t2->t1;
``'mean'``: the weighted mean of the output of ``'t1t2'`` and ``'t1t2'``
"""
super().__init__()
self.dense_feature_extractor = dense_feature_extractor
self.seg_classifier = seg_classifier
self.changemixin = changemixin
if inference_mode not in ["t1t2", "t2t1", "mean"]:
raise ValueError(f"Unknown inference_mode: {inference_mode}")
self.inference_mode = inference_mode
def forward(self, x: Tensor) -> Dict[str, Tensor]:
"""Forward pass of the model.
Args:
x: a bitemporal input tensor of shape [B, T, C, H, W]
Returns:
a dictionary containing bitemporal semantic segmentation logit and binary
change detection logit/probability
"""
b, t, c, h, w = x.shape
x = rearrange(x, "b t c h w -> (b t) c h w")
# feature extraction
bi_feature = self.dense_feature_extractor(x)
# semantic segmentation
bi_seg_logit = self.seg_classifier(bi_feature)
bi_seg_logit = rearrange(bi_seg_logit, "(b t) c h w -> b t c h w", t=t)
bi_feature = rearrange(bi_feature, "(b t) c h w -> b t c h w", t=t)
# change detection
c12, c21 = self.changemixin(bi_feature)
results: Dict[str, Tensor] = {}
if not self.training:
results.update({"bi_seg_logit": bi_seg_logit})
if self.inference_mode == "t1t2":
results.update({"change_prob": c12.sigmoid()})
elif self.inference_mode == "t2t1":
results.update({"change_prob": c21.sigmoid()})
elif self.inference_mode == "mean":
results.update(
{
"change_prob": torch.stack([c12, c21], dim=0)
.sigmoid_()
.mean(dim=0)
}
)
else:
results.update(
{
"bi_seg_logit": bi_seg_logit,
"bi_change_logit": torch.stack([c12, c21], dim=1),
}
)
return results
class ChangeStarFarSeg(ChangeStar):
"""The network architecture of ChangeStar(FarSeg).
ChangeStar(FarSeg) is composed of a FarSeg model and a ChangeMixin module.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2108.07002
"""
def __init__(
self,
backbone: str = "resnet50",
classes: int = 1,
backbone_pretrained: bool = True,
) -> None:
"""Initializes a new ChangeStarFarSeg model.
Args:
backbone: name of ResNet backbone
classes: number of output segmentation classes
backbone_pretrained: whether to use pretrained weight for backbone
"""
model = FarSeg(
backbone=backbone, classes=classes, backbone_pretrained=backbone_pretrained
)
seg_classifier: Module = model.decoder.classifier
model.decoder.classifier = (
nn.modules.Identity() # type: ignore[no-untyped-call, assignment]
)
super().__init__(
dense_feature_extractor=model,
seg_classifier=seg_classifier,
changemixin=ChangeMixin(
in_channels=128 * 2, inner_channels=16, num_convs=4, scale_factor=4.0
),
inference_mode="t1t2",
)
| 35.883929 | 88 | 0.605126 |
67333e31ea3b33ebfa1fc50554f11282f683383b | 226 | py | Python | test_package/tests/unit/composite_calc/test_sigma.py | KMilhan/pytestdocgen | 2af629a23f9f7e315d9943db8554d58b55216b4b | [
"BSD-3-Clause"
] | null | null | null | test_package/tests/unit/composite_calc/test_sigma.py | KMilhan/pytestdocgen | 2af629a23f9f7e315d9943db8554d58b55216b4b | [
"BSD-3-Clause"
] | null | null | null | test_package/tests/unit/composite_calc/test_sigma.py | KMilhan/pytestdocgen | 2af629a23f9f7e315d9943db8554d58b55216b4b | [
"BSD-3-Clause"
] | null | null | null | from mock_package import sigma
def test_sum_of_zero_is_zero():
assert sigma([0 for _ in range(1024)]) == 0
def test_sigma_same_values_with_opposite_sign_returns_zero():
assert sigma([x for x in range(-2, 3)]) == 0
| 22.6 | 61 | 0.730088 |
8b9de8df167f1a5538e0f5e2bd13b52973bbc882 | 553 | py | Python | LeetCode/Leaf-Similar Trees.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 13 | 2021-09-02T07:30:02.000Z | 2022-03-22T19:32:03.000Z | LeetCode/Leaf-Similar Trees.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | null | null | null | LeetCode/Leaf-Similar Trees.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 3 | 2021-08-24T16:06:22.000Z | 2021-09-17T15:39:53.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def dfs(node):
if node:
if node.left is None and node.right is None:
yield node.val
yield from dfs(node.left)
yield from dfs(node.right)
return list(dfs(root1)) == list(dfs(root2)) | 36.866667 | 68 | 0.555154 |
5d7d9ceb0d3a18f0ab0000e7c6278c513680d794 | 393 | py | Python | mayan/apps/events/urls.py | mbehrle/mayan-edms | 9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced | [
"Apache-2.0"
] | null | null | null | mayan/apps/events/urls.py | mbehrle/mayan-edms | 9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced | [
"Apache-2.0"
] | 1 | 2022-03-12T01:03:39.000Z | 2022-03-12T01:03:39.000Z | mayan/apps/events/urls.py | mbehrle/mayan-edms | 9ebf27d2ea1666eaa36ad6ddc0fb9c6accf5cced | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.conf.urls import patterns, url
urlpatterns = patterns('events.views',
url(r'^all/$', 'events_list', (), 'events_list'),
url(r'^for_object/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<object_id>\d+)/$', 'events_list', (), 'events_for_object'),
url(r'^by_verb/(?P<verb>[\w\-]+)/$', 'events_list', (), 'events_by_verb'),
)
| 39.3 | 133 | 0.633588 |
b781d63471b392b299d7a5c2b0916111534b33bd | 4,361 | py | Python | examples/LaTeX/lin_tran_check.py | waldyrious/galgebra | b5eb070340434d030dd737a5656fbf709538b0b1 | [
"BSD-3-Clause"
] | 151 | 2018-09-18T12:30:14.000Z | 2022-03-16T08:02:48.000Z | examples/LaTeX/lin_tran_check.py | waldyrious/galgebra | b5eb070340434d030dd737a5656fbf709538b0b1 | [
"BSD-3-Clause"
] | 454 | 2018-09-19T01:42:30.000Z | 2022-01-18T14:02:00.000Z | examples/LaTeX/lin_tran_check.py | waldyrious/galgebra | b5eb070340434d030dd737a5656fbf709538b0b1 | [
"BSD-3-Clause"
] | 30 | 2019-02-22T08:25:50.000Z | 2022-01-15T05:20:22.000Z | from __future__ import print_function
from sympy import symbols, sin, cos, simplify
from galgebra.ga import Ga
from galgebra.printer import Format, xpdf, Eprint, Print_Function, latex
from galgebra.lt import Symbolic_Matrix
def main():
# Print_Function()
(x, y, z) = xyz = symbols('x,y,z',real=True)
(o3d, ex, ey, ez) = Ga.build('e_x e_y e_z', g=[1, 1, 1], coords=xyz)
grad = o3d.grad
(u, v) = uv = symbols('u,v',real=True)
(g2d, eu, ev) = Ga.build('e_u e_v', coords=uv)
grad_uv = g2d.grad
v_xyz = o3d.mv('v','vector')
A_xyz = o3d.mv('A','vector',f=True)
A_uv = g2d.mv('A','vector',f=True)
print('#3d orthogonal ($A$ is vector function)')
print('A =', A_xyz)
print('%A^{2} =', A_xyz * A_xyz)
print('grad|A =', grad | A_xyz)
print('grad*A =', grad * A_xyz)
print('v|(grad*A) =',v_xyz|(grad*A_xyz))
print('#2d general ($A$ is vector function)')
print('A =', A_uv)
print('%A^{2} =', A_uv * A_uv)
print('grad|A =', grad_uv | A_uv)
print('grad*A =', grad_uv * A_uv)
A = o3d.lt('A')
print('#3d orthogonal ($A,\\;B$ are linear transformations)')
print('A =', A)
print(r'\f{mat}{A} =', A.matrix())
print('\\f{\\det}{A} =', A.det())
print('\\overline{A} =', A.adj())
print('\\f{\\Tr}{A} =', A.tr())
print('\\f{A}{e_x^e_y} =', A(ex^ey))
print('\\f{A}{e_x}^\\f{A}{e_y} =', A(ex)^A(ey))
B = o3d.lt('B')
print('g =', o3d.g)
print('%g^{-1} =', latex(o3d.g_inv))
print('A + B =', A + B)
print('AB =', A * B)
print('A - B =', A - B)
print('General Symmetric Linear Transformation')
Asym = o3d.lt('A',mode='s')
print('A =', Asym)
print('General Antisymmetric Linear Transformation')
Aasym = o3d.lt('A',mode='a')
print('A =', Aasym)
print('#2d general ($A,\\;B$ are linear transformations)')
A2d = g2d.lt('A')
print('g =', g2d.g)
print('%g^{-1} =', latex(g2d.g_inv))
print('%gg^{-1} =', latex(simplify(g2d.g * g2d.g_inv)))
print('A =', A2d)
print(r'\f{mat}{A} =', A2d.matrix())
print('\\f{\\det}{A} =', A2d.det())
A2d_adj = A2d.adj()
print('\\overline{A} =', A2d_adj)
print('\\f{mat}{\\overline{A}} =', latex(simplify(A2d_adj.matrix())))
print('\\f{\\Tr}{A} =', A2d.tr())
print('\\f{A}{e_u^e_v} =', A2d(eu^ev))
print('\\f{A}{e_u}^\\f{A}{e_v} =', A2d(eu)^A2d(ev))
B2d = g2d.lt('B')
print('B =', B2d)
print('A + B =', A2d + B2d)
print('A - B =', A2d - B2d)
# TODO: add this back when we drop Sympy 1.3. The 64kB of output is far too
# printer-dependent
if False:
print('AB =', A2d * B2d)
a = g2d.mv('a','vector')
b = g2d.mv('b','vector')
print(r'a|\f{\overline{A}}{b}-b|\f{\underline{A}}{a} =',((a|A2d.adj()(b))-(b|A2d(a))).simplify())
m4d = Ga('e_t e_x e_y e_z', g=[1, -1, -1, -1],coords=symbols('t,x,y,z',real=True))
T = m4d.lt('T')
print('g =', m4d.g)
print(r'\underline{T} =',T)
print(r'\overline{T} =',T.adj())
print(r'\f{\det}{\underline{T}} =',T.det())
print(r'\f{\mbox{tr}}{\underline{T}} =',T.tr())
a = m4d.mv('a','vector')
b = m4d.mv('b','vector')
print(r'a|\f{\overline{T}}{b}-b|\f{\underline{T}}{a} =',((a|T.adj()(b))-(b|T(a))).simplify())
coords = (r, th, phi) = symbols('r,theta,phi', real=True)
(sp3d, er, eth, ephi) = Ga.build('e_r e_th e_ph', g=[1, r**2, r**2*sin(th)**2], coords=coords)
grad = sp3d.grad
sm_coords = (u, v) = symbols('u,v', real=True)
smap = [1, u, v] # Coordinate map for sphere of r = 1
sph2d = sp3d.sm(smap,sm_coords,norm=True)
(eu, ev) = sph2d.mv()
grad_uv = sph2d.grad
F = sph2d.mv('F','vector',f=True)
f = sph2d.mv('f','scalar',f=True)
print('f =',f)
print('grad*f =',grad_uv * f)
print('F =',F)
print('grad*F =',grad_uv * F)
tp = (th,phi) = symbols('theta,phi',real=True)
smap = [sin(th)*cos(phi),sin(th)*sin(phi),cos(th)]
sph2dr = o3d.sm(smap,tp,norm=True)
(eth, ephi) = sph2dr.mv()
grad_tp = sph2dr.grad
F = sph2dr.mv('F','vector',f=True)
f = sph2dr.mv('f','scalar',f=True)
print('f =',f)
print('grad*f =',grad_tp * f)
print('F =',F)
print('grad*F =',grad_tp * F)
return
if __name__ == "__main__":
#Eprint()
Format()
main()
# xpdf()
xpdf(pdfprog=None)
| 26.430303 | 101 | 0.527402 |
bca0457fb6a18be34f58d715ebe31825c2a01ed2 | 4,002 | py | Python | bootstrap.py | iceisblue/HLavalink | 907dce5df3500e1647c7688a6c2cc679ad46142f | [
"MIT"
] | null | null | null | bootstrap.py | iceisblue/HLavalink | 907dce5df3500e1647c7688a6c2cc679ad46142f | [
"MIT"
] | null | null | null | bootstrap.py | iceisblue/HLavalink | 907dce5df3500e1647c7688a6c2cc679ad46142f | [
"MIT"
] | null | null | null | """
Lavalink on Heroku bootstrap script
Credit to diniboy for sed script
"""
from os import system, environ, popen
class LavalinkBootstrap:
"""
Class we're using to get Lavalink working on Heroku
"""
def prepare_version_number(self):
self._version_number = popen(
"""curl --silent "https://api.github.com/repos/Freyacodes/Lavalink/releases/latest" | grep -Po '"tag_name": "\K.*?(?=")'"""
).read().strip()
def __init__(self):
"""
Doing important stuff here
"""
self.prepare_version_number() # Fixes #1
self.use_dev_lavalink = True if str(environ.get("USE_DEV_LAVALINK")).lower() not in ("no", "0", "n") else False
if self.use_dev_lavalink:
print("[INFO] Using developer Lavalink version")
self.download_command = f"curl -L https://ci.fredboat.com/repository/download/Lavalink_Build/8231:id/Lavalink.jar?guest=1 -o Lavalink.jar" if self.use_dev_lavalink else f"curl -L https://github.com/Freyacodes/Lavalink/releases/download/{self._version_number}/Lavalink.jar -O"
print(f"[INFO] Download command: {self.download_command}")
self.replace_port_command = 'sed -i "s|DYNAMICPORT|$PORT|" application.yml'
self.replace_password_command = 'sed -i "s|DYNAMICPASSWORD|$PASSWORD|" application.yml'
self.replace_password_command_no_password = 'sed -i "s|DYNAMICPASSWORD|youshallnotpass|" application.yml'
self._additional_options = environ.get(
"ADDITIONAL_JAVA_OPTIONS"
) # Heroku provides basic Java configuration based on dyno size, no need in limiting memory
self.run_command = f"java -jar Lavalink.jar {self._additional_options}" # User-provided config, will override heroku's
def replace_password_and_port(self):
"""
Replacing password and port in application.yml
"""
print(
"[INFO] Replacing port..."
)
try:
system(
self.replace_port_command
)
if not environ.get("PASSWORD"):
print(
"""
[WARNING] You have not specified your Lavalink password in config vars. To do this, go to settings
and set the PASSWORD environment variable
"""
)
return system(
self.replace_password_command_no_password
)
system(
self.replace_password_command
)
except BaseException as exc:
print(
f"[ERROR] Failed to replace port/password. Info: {exc}"
)
else:
print(
"[INFO] Done. Config is ready now"
)
def download(self):
"""
Downloads latest release of Lavalink
"""
print(
"[INFO] Downloading latest release of Lavalink..."
)
try:
system(
self.download_command
)
except BaseException as exc:
print(
f"[ERROR] Lavalink download failed. Info: {exc}"
)
else:
print(
"[INFO] Lavalink download OK"
)
def run(self):
"""
Runs Lavalink instance
"""
self.download()
self.replace_password_and_port()
print(
"[INFO] Starting Lavalink..."
)
try:
system(
self.run_command
)
except BaseException as exc:
print(
f"[ERROR] Failed to start Lavalink. Info: {exc}"
)
if __name__ == "__main__":
"""
Starts our instance
"""
LavalinkBootstrap().run()
| 25.819355 | 283 | 0.528986 |
be6b34f3b88a04fcc174e78e98c88a8fd02d05a0 | 4,033 | py | Python | configs/top_down/darkpose/coco/res50_coco_384x288_dark.py | RuisongZhou/mmpose | a79c649ba07e8a9db24f1467826b9432c09134c6 | [
"Apache-2.0"
] | null | null | null | configs/top_down/darkpose/coco/res50_coco_384x288_dark.py | RuisongZhou/mmpose | a79c649ba07e8a9db24f1467826b9432c09134c6 | [
"Apache-2.0"
] | null | null | null | configs/top_down/darkpose/coco/res50_coco_384x288_dark.py | RuisongZhou/mmpose | a79c649ba07e8a9db24f1467826b9432c09134c6 | [
"Apache-2.0"
] | 1 | 2021-12-29T08:21:50.000Z | 2021-12-29T08:21:50.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=5, metric='mAP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='models/pytorch/imagenet/resnet50-19c8e357.pth',
backbone=dict(type='ResNet', depth=50),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process=True,
shift_heatmap=True,
unbiased_decoding=True,
modulate_kernel=11),
loss_pose=dict(type='JointsMSELoss', use_target_weight=True))
data_cfg = dict(
# image_size=[192, 256],
# heatmap_size=[48, 64],
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
bbox_thr=1.0,
use_gt_bbox=False,
image_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2, unbiased_encoding=True),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.435374 | 79 | 0.624597 |
a941caa64a2e25c78330960136845d11fda67919 | 14,602 | py | Python | Co-Simulation/Sumo/sumo-1.7.0/tools/assign/assign.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 4 | 2020-11-13T02:35:56.000Z | 2021-03-29T20:15:54.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/assign/assign.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 9 | 2020-12-09T02:12:39.000Z | 2021-02-18T00:15:28.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/assign/assign.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 1 | 2020-11-20T19:31:26.000Z | 2020-11-20T19:31:26.000Z | # Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2007-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file assign.py
# @author Yun-Pang Floetteroed
# @author Daniel Krajzewicz
# @author Michael Behrisch
# @date 2007-11-25
"""
This script is for executing traffic assignment according to the required assignment model.
The incremental assignment model, the C-Logit assignment model and the Lohse assignment model
are included in this script.
"""
from __future__ import absolute_import
from __future__ import print_function
import math
import operator
import elements
from elements import Vehicle
def doIncAssign(net, vehicles, verbose, iteration, odestimation, endVertices, start, startVertex, matrixPshort,
smallDemand, D, P, AssignedVeh, AssignedTrip, vehID, assignSmallDemand, linkChoiceMap, odPairsMap):
for end, endVertex in enumerate(endVertices):
getlinkChoices = False
if ((odestimation and matrixPshort[start][end] > 0.) or
(matrixPshort[start][end] > 1. or (assignSmallDemand and smallDemand[start][end] > 0.))):
getlinkChoices = True
if startVertex._id != endVertex._id and getlinkChoices:
# if matrixPling and the matrixTruck exist, matrixPlong[start][end]
# > 0.0 or matrixTruck[start][end] > 0.0): should be added.
helpPath = []
vertex = endVertex
demand = 0.
if matrixPshort[start][end] > 1. or odestimation:
demand = matrixPshort[start][end] / float(iteration)
if assignSmallDemand and not odestimation:
demand += smallDemand[start][end]
while vertex != startVertex:
if P[vertex].kind == "real":
helpPath.append(P[vertex])
P[vertex].flow += demand
if getlinkChoices and P[vertex] in net._detectedEdges:
odIndex = odPairsMap[startVertex._id][endVertex._id]
linkChoiceMap[P[vertex].detected][odIndex] += demand
vertex = P[vertex].source
helpPath.reverse()
# the amount of the pathflow, which will be released at this
# iteration
if assignSmallDemand:
smallDemand[start][end] = 0.
if not odestimation:
AssignedTrip[startVertex][endVertex] += demand
vehID = assignVeh(
verbose, vehicles, startVertex, endVertex, helpPath, AssignedVeh, AssignedTrip, vehID)
return vehID, smallDemand, linkChoiceMap
# execute the SUE model with the given path set
def doSUEAssign(net, options, startVertices, endVertices, matrixPshort, iter, lohse, first):
if lohse:
if options.verbose:
foutassign = open('lohse_pathSet.txt', 'a')
foutassign.write('\niter:%s\n' % iter)
# matrixPlong and matrixTruck should be added if available.
if options.verbose:
print('pathNum in doSUEAssign:', elements.pathNum)
# calculate the overlapping factors between any two paths of a given OD
# pair
for start, startVertex in enumerate(startVertices):
for end, endVertex in enumerate(endVertices):
cumulatedflow = 0.
pathcount = 0
if matrixPshort[start][end] > 0. and startVertex._id != endVertex._id:
ODPaths = net._paths[startVertex][endVertex]
for path in ODPaths:
path.getPathTimeUpdate()
calCommonalityAndChoiceProb(ODPaths, options.alpha, lohse)
# calculate the path choice probabilities and the path flows
# for the given OD Pair
for path in ODPaths:
pathcount += 1
if pathcount < len(ODPaths):
path.helpflow = matrixPshort[
start][end] * path.choiceprob
cumulatedflow += path.helpflow
if lohse and options.verbose:
foutassign.write(' path:%s\n' % path.label)
foutassign.write(
' path.choiceprob:%s\n' % path.choiceprob)
foutassign.write(
' path.helpflow:%s\n' % path.helpflow)
foutassign.write(
' cumulatedflow:%s\n' % cumulatedflow)
else:
path.helpflow = matrixPshort[
start][end] - cumulatedflow
if lohse and options.verbose:
foutassign.write(
' last_path.helpflow:%s\n' % path.helpflow)
if first and iter == 1:
for edge in path.edges:
edge.flow += path.helpflow
else:
for edge in path.edges:
edge.helpflow += path.helpflow
# Reset the convergence index for the C-Logit model
notstable = 0
stable = False
# link travel times and link flows will be updated according to the latest
# traffic assingment
for edge in net._edges:
if (first and iter > 1) or (not first):
exflow = edge.flow
edge.flow = edge.flow + (1. / iter) * (edge.helpflow - edge.flow)
if not lohse:
if edge.flow > 0.:
if abs(edge.flow - exflow) / edge.flow > options.sueTolerance:
notstable += 1
elif edge.flow == 0.:
if exflow != 0. and (abs(edge.flow - exflow) / exflow > options.sueTolerance):
notstable += 1
elif edge.flow < 0.:
notstable += 1
edge.flow = 0.
else:
if edge.flow < 0.:
edge.flow = 0.
# reset the edge.helpflow for the next iteration
edge.helpflow = 0.0
edge.getActualTravelTime(options, lohse)
if options.dijkstra == 'boost':
edge.boost.weight = edge.helpacttime
if edge.queuetime > 1.:
notstable += 1
if lohse and options.verbose:
foutassign.close()
if not lohse and iter > 5:
if notstable == 0:
stable = True
elif notstable < math.ceil(net.geteffEdgeCounts() * 0.005) or notstable < 3:
stable = True
if iter > options.maxiteration:
stable = True
print('Number of max. iterations is reached!')
print('stable:', stable)
return stable
# calculate the commonality factors in the C-Logit model
def calCommonalityAndChoiceProb(ODPaths, alpha, lohse):
if len(ODPaths) > 1:
for path in ODPaths:
if not lohse:
path.utility = path.actpathtime + \
alpha * math.log(path.sumOverlap)
else:
path.utility = path.pathhelpacttime + \
alpha * math.log(path.sumOverlap)
if lohse:
minpath = min(ODPaths, key=operator.attrgetter('pathhelpacttime'))
beta = 12. / (1. + math.exp(0.7 - 0.015 * minpath.pathhelpacttime))
else:
theta = getThetaForCLogit(ODPaths)
for pathone in ODPaths:
sum_exputility = 0.
for pathtwo in ODPaths:
if pathone != pathtwo:
if not lohse:
sum_exputility += math.exp(theta *
(pathone.utility - pathtwo.utility))
else:
pathtwoPart = beta * \
(pathtwo.utility / minpath.utility - 1.)
pathonePart = beta * \
(pathone.utility / minpath.utility - 1.)
sum_exputility += math.exp(-(pathtwoPart *
pathtwoPart) + pathonePart * pathonePart)
pathone.choiceprob = 1. / (1. + sum_exputility)
else:
for path in ODPaths:
path.choiceprob = 1.
# calculate the path choice probabilities and the path flows and generate
# the vehicular data for each OD Pair
def doSUEVehAssign(net, vehicles, options, counter, matrixPshort, startVertices, endVertices, AssignedVeh,
AssignedTrip, vehID, lohse):
if options.verbose:
if counter == 0:
foutpath = open('paths.txt', 'w')
fouterror = open('errors.txt', 'w')
else:
foutpath = open('paths.txt', 'a')
fouterror = open('errors.txt', 'a')
if lohse:
foutpath.write(
'begin the doSUEVehAssign based on the lohse assignment model!')
else:
foutpath.write(
'begin the doSUEVehAssign based on the c-logit model!')
foutpath.write('the analyzed matrix=%s' % counter)
TotalPath = 0
for start, startVertex in enumerate(startVertices):
if options.verbose:
foutpath.write('\norigin=%s, ' % startVertex)
for end, endVertex in enumerate(endVertices):
pathcount = 0
cumulatedflow = 0.
if matrixPshort[start][end] > 0. and startVertex._id != endVertex._id:
if options.verbose:
foutpath.write('destination=%s' % endVertex)
ODPaths = net._paths[startVertex][endVertex]
for path in ODPaths:
TotalPath += 1
path.getPathTimeUpdate()
if lohse:
path.pathhelpacttime = path.actpathtime
calCommonalityAndChoiceProb(ODPaths, options.alpha, lohse)
for path in ODPaths:
pathcount += 1
if pathcount < len(ODPaths):
path.pathflow = matrixPshort[
start][end] * path.choiceprob
cumulatedflow += path.pathflow
else:
path.pathflow = matrixPshort[
start][end] - cumulatedflow
if options.verbose and path.pathflow < 0.:
fouterror.write(
'*********************** the path flow on the path:%s < 0.!!' % path.label)
if options.verbose:
foutpath.write('\npathID= %s, path flow=%4.4f, actpathtime=%4.4f, choiceprob=%4.4f, edges='
% (path.label, path.pathflow, path.actpathtime, path.choiceprob))
for item in path.edges:
foutpath.write('%s, ' % (item._id))
AssignedTrip[startVertex][endVertex] += path.pathflow
edges = []
for link in path.edges:
edges.append(link)
vehID = assignVeh(
options.verbose, vehicles, startVertex, endVertex, edges, AssignedVeh, AssignedTrip, vehID)
if options.verbose:
foutpath.write('\n')
if options.verbose:
print(
'total Number of the used paths for the current matrix:', TotalPath)
foutpath.write(
'\ntotal Number of the used paths for the current matrix:%s' % TotalPath)
foutpath.close()
fouterror.close()
return vehID
def assignVeh(verbose, vehicles, startVertex, endVertex, edges, AssignedVeh, AssignedTrip, vehID):
while AssignedVeh[startVertex][endVertex] < int(round(AssignedTrip[startVertex][endVertex])):
vehID += 1
newVehicle = Vehicle(str(vehID))
newVehicle.route = edges
vehicles.append(newVehicle)
AssignedVeh[startVertex][endVertex] += 1
if verbose:
print('vehID:', vehID)
print('AssignedTrip[start][end]', AssignedTrip[startVertex][endVertex])
print('AssignedVeh[start][end]', AssignedVeh[startVertex][endVertex])
return vehID
def getThetaForCLogit(ODPaths):
sum = 0.
diff = 0.
minpath = min(ODPaths, key=operator.attrgetter('actpathtime'))
for path in ODPaths:
sum += path.actpathtime
meanpathtime = sum / float(len(ODPaths))
for path in ODPaths:
diff += (path.actpathtime - meanpathtime)**2.
sdpathtime = (diff / float(len(ODPaths)))**0.5
if sdpathtime > 0.04:
theta = math.pi / (pow(6., 0.5) * sdpathtime * minpath.actpathtime)
else:
theta = 1.
return theta
def doLohseStopCheck(net, options, stable, iter, maxIter, foutlog):
stable = False
# Check if the convergence reaches.
if iter > 1:
counts = 0
for edge in net._edges.itervalues():
stop = edge.stopCheck(options)
if stop:
counts += 1
if counts == net.geteffEdgeCounts():
stable = True
foutlog.write(
'The defined convergence is reached. The number of the required iterations:%s\n' % iter)
elif counts < int(net.geteffEdgeCounts() * 0.05) and float(iter) > options.maxiteration * 0.85:
stable = True
foutlog.write(
('The number of the links with convergence is 95% of the total links.' +
'The number of executed iterations:%s\n') % iter)
if iter >= maxIter:
print('The max. number of iterations is reached!')
foutlog.write('The max. number(%s) of iterations is reached!\n' % iter)
foutlog.write(
'The number of new routes will be set to 0, since the max. number of iterations is reached.')
stable = True
print('stop?:', stable)
print('iter_inside:', iter)
return stable
| 40.787709 | 115 | 0.55376 |
e18f0fdd8d95ed6b45bb9c0417a07f72a77561f4 | 434 | py | Python | saleor/lib/python3.7/site-packages/elasticsearch_dsl/__init__.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | 2 | 2019-12-06T15:40:14.000Z | 2020-07-29T21:30:35.000Z | saleor/lib/python3.7/site-packages/elasticsearch_dsl/__init__.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | 13 | 2020-03-24T17:53:51.000Z | 2022-02-10T20:01:14.000Z | saleor/lib/python3.7/site-packages/elasticsearch_dsl/__init__.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | null | null | null | from .query import Q
from .aggs import A
from .function import SF
from .search import Search, MultiSearch
from .field import *
from .document import DocType, MetaField, InnerDoc
from .mapping import Mapping
from .index import Index, IndexTemplate
from .analysis import analyzer, token_filter, char_filter, tokenizer
from .faceted_search import *
VERSION = (6, 0, 1)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
| 28.933333 | 68 | 0.781106 |
d97e036221dd35529dfb303c4595fa0957bbf657 | 303 | py | Python | extensions/.stubs/clrclasses/__clrclasses__/System/Windows/Input/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | 1 | 2020-03-25T03:27:24.000Z | 2020-03-25T03:27:24.000Z | extensions/.stubs/clrclasses/__clrclasses__/System/Windows/Input/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | null | null | null | extensions/.stubs/clrclasses/__clrclasses__/System/Windows/Input/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | null | null | null | from __clrclasses__.System import EventHandler as _n_0_t_0
import typing
class ICommand():
@property
def CanExecuteChanged(self) -> _n_0_t_0:
"""CanExecuteChanged Event: EventHandler"""
def CanExecute(self, parameter: object) -> bool:...
def Execute(self, parameter: object):...
| 33.666667 | 58 | 0.712871 |
aed04b398c1a7b3f1e01f72309400ecf3e4a39ec | 2,546 | py | Python | fix_scriptorium_annis_corpus.py | CopticScriptorium/publish | 4e5896ec203904f798cd34bf24dbbdef18f0140e | [
"Apache-2.0"
] | null | null | null | fix_scriptorium_annis_corpus.py | CopticScriptorium/publish | 4e5896ec203904f798cd34bf24dbbdef18f0140e | [
"Apache-2.0"
] | null | null | null | fix_scriptorium_annis_corpus.py | CopticScriptorium/publish | 4e5896ec203904f798cd34bf24dbbdef18f0140e | [
"Apache-2.0"
] | null | null | null | """
Script to fix common namespace conversion inconsistencies in Pepper relANNIS output
"""
import io, re, os
from shutil import move
def process_file(fname, outname, reps):
with io.open(fname, encoding="utf8") as f:
lines = f.readlines()
with io.open(outname, "w", encoding="utf8", newline="\n") as o:
for line in lines:
line = line.strip() + "\n"
for rep in reps:
line = line.replace(rep[0], rep[1])
if line.endswith("\ttranslation\n"):
if re.match("[0-9]+\tscriptorium\ttranslation", line) is not None:
continue
o.write(line)
def process_dir(dirname, corpus=None, test=False):
ext = "annis"
reps = {
"node_anno": [
("\tdefault_ns\tentity", "\tcoref\tentity"),
("\tdefault_ns\ttype", "\tcoref\ttype"),
("\tsalt\t", "\tscriptorium\t"),
("\tdefault_ns\t", "\tscriptorium\t"),
],
"component": [("scriptorium\tdep", "dep\tdep")],
}
if not dirname.endswith(os.sep):
dirname += os.sep
process_file(dirname + "node_annotation." + ext, dirname + "node_annotation_fixed." + ext, reps["node_anno"])
process_file(dirname + "component." + ext, dirname + "component_fixed." + ext, reps["component"])
os.remove(dirname + "component." + ext)
os.remove(dirname + "node_annotation." + ext)
move(dirname + "component_fixed." + ext, dirname + "component." + ext)
move(dirname + "node_annotation_fixed." + ext, dirname + "node_annotation." + ext)
if test:
reps = [(corpus + "\t", corpus + "_test" + "\t")]
process_file(dirname + "corpus." + ext, dirname + "corpus_fixed." + ext, reps)
process_file(dirname + "resolver_vis_map." + ext, dirname + "resolver_vis_map_fixed." + ext, reps)
os.remove(dirname + "resolver_vis_map." + ext)
os.remove(dirname + "corpus." + ext)
move(dirname + "corpus_fixed." + ext, dirname + "corpus." + ext)
move(dirname + "resolver_vis_map_fixed." + ext, dirname + "resolver_vis_map." + ext)
if __name__ == "__main__":
ext = "annis"
reps = {
"node_anno": [("\tsalt\t", "\tscriptorium\t"), ("\tdefault_ns\t", "\tscriptorium\t")],
"component": [("scriptorium\tdep", "dep\tdep")],
}
process_file("node_annotation." + ext, "node_annotation_fixed." + ext, reps["node_anno"])
process_file("component." + ext, "component_fixed." + ext, reps["component"])
| 38 | 113 | 0.58209 |
c76f13ac977019d8e545e6abb0151e4e12a3fe10 | 2,713 | py | Python | examples/advanced/additional_learning.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | examples/advanced/additional_learning.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | examples/advanced/additional_learning.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | from copy import deepcopy
import pandas as pd
from fedot.api.main import Fedot
from fedot.core.operations.atomized_model import AtomizedModel
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.utils import fedot_project_root
def run_additional_learning_example():
train_data_path = f'{fedot_project_root()}/cases/data/scoring/scoring_train.csv'
test_data_path = f'{fedot_project_root()}/cases/data/scoring/scoring_test.csv'
train_data = pd.read_csv(train_data_path)
test_data = pd.read_csv(test_data_path)
test_data_target = test_data['target']
del test_data['target']
problem = 'classification'
auto_model = Fedot(problem=problem, seed=42, preset='best_quality', timeout=5,
composer_params={'initial_assumption': Pipeline(
SecondaryNode('logit',
nodes_from=[
PrimaryNode('scaling')]))})
auto_model.fit(features=deepcopy(train_data.head(1000)), target='target')
auto_model.predict_proba(features=deepcopy(test_data))
print('auto_model', auto_model.get_metrics(target=deepcopy(test_data_target)))
prev_model = auto_model.current_pipeline
prev_model.show()
prev_model.unfit()
atomized_model = Pipeline(
SecondaryNode(operation_type=AtomizedModel(prev_model), nodes_from=[PrimaryNode('scaling')]))
non_atomized_model = deepcopy(prev_model)
train_data = train_data.head(5000)
timeout = 1
auto_model_from_atomized = Fedot(problem=problem, seed=42, preset='best_quality', timeout=timeout,
composer_params={'initial_assumption': atomized_model}, verbose_level=2)
auto_model_from_atomized.fit(features=deepcopy(train_data), target='target')
auto_model_from_atomized.predict_proba(features=deepcopy(test_data))
auto_model_from_atomized.current_pipeline.show()
print('auto_model_from_atomized', auto_model_from_atomized.get_metrics(deepcopy(test_data_target)))
auto_model_from_pipeline = Fedot(problem=problem, seed=42, preset='best_quality', timeout=timeout,
composer_params={'initial_assumption': non_atomized_model}, verbose_level=2)
auto_model_from_pipeline.fit(features=deepcopy(train_data), target='target')
auto_model_from_pipeline.predict_proba(features=deepcopy(test_data))
auto_model_from_pipeline.current_pipeline.show()
print('auto_model_from_pipeline', auto_model_from_pipeline.get_metrics(deepcopy(test_data_target)))
if __name__ == '__main__':
run_additional_learning_example()
| 44.47541 | 113 | 0.729082 |
9d7bc2e2f40b4a1e9b2aa81e1c7ccde70cdab78f | 406 | py | Python | fstools/util/shapeutil.py | FireByTrial/farmsimtools | 74cf35108b2883b77effd5b55edf4be436287e36 | [
"CC-BY-4.0"
] | null | null | null | fstools/util/shapeutil.py | FireByTrial/farmsimtools | 74cf35108b2883b77effd5b55edf4be436287e36 | [
"CC-BY-4.0"
] | 1 | 2020-11-03T15:39:37.000Z | 2020-11-03T15:39:37.000Z | fstools/util/shapeutil.py | FireByTrial/farmsimtools | 74cf35108b2883b77effd5b55edf4be436287e36 | [
"CC-BY-4.0"
] | null | null | null | import os
from contextlib import contextmanager
def shape_components(shp: str):
return {
ext: os.path.splitext(shp)[0] + f".{ext}" for ext in ['shp', 'shx', 'dbf']
}
@contextmanager
def shape_readers(shp, shx, dbf):
with open(shp, "rb") as spi:
with open(shx, "rb") as sxi:
with open(dbf, "rb") as dbi:
yield {'shp': spi, 'shx': sxi, 'dbf': dbi}
| 23.882353 | 82 | 0.568966 |
03eb677e8ac96a2b61a48a1e0151ef31de63a2f7 | 6,419 | py | Python | hedwig/models/reg_lstm/__main__.py | arjunnlp/hedwig-anlp | b8f6c50d788509bc9e5670caeee3503257d716d0 | [
"Apache-2.0"
] | 3 | 2019-07-20T15:23:59.000Z | 2021-04-26T02:57:59.000Z | hedwig/models/reg_lstm/__main__.py | arjunnlp/hedwig-anlp | b8f6c50d788509bc9e5670caeee3503257d716d0 | [
"Apache-2.0"
] | null | null | null | hedwig/models/reg_lstm/__main__.py | arjunnlp/hedwig-anlp | b8f6c50d788509bc9e5670caeee3503257d716d0 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import random
from copy import deepcopy
import numpy as np
import torch
from common.evaluate import EvaluatorFactory
from common.train import TrainerFactory
from datasets.aapd import AAPD
from datasets.imdb import IMDB
from datasets.reuters import Reuters
from datasets.yelp2014 import Yelp2014
from datasets.mbti import MBTI
from models.reg_lstm.args import get_args
from models.reg_lstm.model import RegLSTM
from datasets.sst import SST
class UnknownWordVecCache(object):
"""
Caches the first randomly generated word vector for a certain size to make it is reused.
"""
cache = {}
@classmethod
def unk(cls, tensor):
size_tup = tuple(tensor.size())
if size_tup not in cls.cache:
cls.cache[size_tup] = torch.Tensor(tensor.size())
cls.cache[size_tup].uniform_(-0.25, 0.25)
return cls.cache[size_tup]
def get_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device, is_multilabel):
saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device)
if hasattr(saved_model_evaluator, 'is_multilabel'):
saved_model_evaluator.is_multilabel = is_multilabel
scores, metric_names = saved_model_evaluator.get_scores()
print('Evaluation metrics for', split_name)
print(metric_names)
print(scores)
if __name__ == '__main__':
# Set default configuration in args.py
args = get_args()
logger = get_logger()
# Set random seed for reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
random.seed(args.seed)
if not args.cuda:
args.gpu = -1
if torch.cuda.is_available() and args.cuda:
print('Note: You are using GPU for training')
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed(args.seed)
if torch.cuda.is_available() and not args.cuda:
print('Warning: Using CPU for training')
dataset_map = {
'Reuters': Reuters,
'AAPD': AAPD,
'IMDB': IMDB,
'Yelp2014': Yelp2014,
'MBTI': MBTI,
'SST-2': SST
}
if args.dataset not in dataset_map:
raise ValueError('Unrecognized dataset')
else:
dataset_class = dataset_map[args.dataset]
train_iter, dev_iter, test_iter = dataset_class.iters(args.data_dir,
args.word_vectors_file,
args.word_vectors_dir,
batch_size=args.batch_size,
device=args.gpu,
unk_init=UnknownWordVecCache.unk)
config = deepcopy(args)
config.dataset = train_iter.dataset
config.target_class = train_iter.dataset.NUM_CLASSES
config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab)
print('Dataset:', args.dataset)
print('No. of target classes:', train_iter.dataset.NUM_CLASSES)
print('No. of train instances', len(train_iter.dataset))
print('No. of dev instances', len(dev_iter.dataset))
print('No. of test instances', len(test_iter.dataset))
if args.resume_snapshot:
if args.cuda:
model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu))
else:
model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage)
else:
model = RegLSTM(config)
if args.cuda:
model.cuda()
if not args.trained_model:
save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME)
os.makedirs(save_path, exist_ok=True)
parameter = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay)
train_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, train_iter, args.batch_size, args.gpu)
test_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, test_iter, args.batch_size, args.gpu)
dev_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, dev_iter, args.batch_size, args.gpu)
if hasattr(train_evaluator, 'is_multilabel'):
train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL
if hasattr(test_evaluator, 'is_multilabel'):
test_evaluator.is_multilabel = dataset_class.IS_MULTILABEL
if hasattr(dev_evaluator, 'is_multilabel'):
dev_evaluator.is_multilabel = dataset_class.IS_MULTILABEL
trainer_config = {
'optimizer': optimizer,
'batch_size': args.batch_size,
'log_interval': args.log_every,
'patience': args.patience,
'model_outfile': args.save_path,
'logger': logger,
'is_multilabel': dataset_class.IS_MULTILABEL
}
trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, test_evaluator, dev_evaluator)
if not args.trained_model:
trainer.train(args.epochs)
else:
if args.cuda:
model = torch.load(args.trained_model, map_location=lambda storage, location: storage.cuda(args.gpu))
else:
model = torch.load(args.trained_model, map_location=lambda storage, location: storage)
model = torch.load(trainer.snapshot_path)
if model.beta_ema > 0:
old_params = model.get_params()
model.load_ema_params()
# Calculate dev and test metrics
evaluate_dataset('dev', dataset_class, model, None, dev_iter, args.batch_size,
is_multilabel=dataset_class.IS_MULTILABEL,
device=args.gpu)
evaluate_dataset('test', dataset_class, model, None, test_iter, args.batch_size,
is_multilabel=dataset_class.IS_MULTILABEL,
device=args.gpu)
if model.beta_ema > 0:
model.load_params(old_params)
| 36.471591 | 143 | 0.666147 |
3830d822793f3e6d9776be0b4c7a856ce53a9090 | 820 | py | Python | mac/pyobjc-framework-Quartz/Examples/PathDemo/PathDemoController.py | albertz/music-player | d23586f5bf657cbaea8147223be7814d117ae73d | [
"BSD-2-Clause"
] | 132 | 2015-01-01T10:02:42.000Z | 2022-03-09T12:51:01.000Z | mac/pyobjc-framework-Quartz/Examples/PathDemo/PathDemoController.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 6 | 2015-01-06T08:23:19.000Z | 2019-03-14T12:22:06.000Z | mac/pyobjc-framework-Quartz/Examples/PathDemo/PathDemoController.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 27 | 2015-02-23T11:51:43.000Z | 2022-03-07T02:34:18.000Z | from Cocoa import *
import objc
class PathDemoController (NSObject):
button = objc.ivar()
popup = objc.ivar()
window = objc.ivar()
demoView = objc.ivar()
def awakeFromNib(self):
# Add the title of your new demo to the END of this array.
titles = [ 'Rectangles', 'Circles', 'Bezier Paths', 'Circle Clipping' ]
self.popup.removeAllItems()
for t in titles:
self.popup.addItemWithTitle_(t)
@objc.IBAction
def runAgain_(self, sender):
self.select_(self)
@objc.IBAction
def select_(self, sender):
self.demoView.setDemoNumber_(self.popup.indexOfSelectedItem())
self.demoView.setNeedsDisplay_(True)
def applicationShouldTerminateAfterLastWindowClosed_(self, application):
return True
| 26.451613 | 79 | 0.643902 |
ea686e04c31190b1a4bab046a60c64ab4116e67c | 588 | py | Python | tests/test_get_solution_splitted_algorithm.py | kumagaimasahito/IsingRegisterAllocator | 7d20f56ee035fcaff456ab7641e51bad4b68144f | [
"MIT"
] | 1 | 2021-05-04T06:56:42.000Z | 2021-05-04T06:56:42.000Z | tests/test_get_solution_splitted_algorithm.py | kumagaimasahito/IsingRegisterAllocator | 7d20f56ee035fcaff456ab7641e51bad4b68144f | [
"MIT"
] | 1 | 2021-03-31T14:56:27.000Z | 2021-03-31T14:56:27.000Z | tests/test_get_solution_splitted_algorithm.py | kumagaimasahito/IsingRegisterAllocator | 7d20f56ee035fcaff456ab7641e51bad4b68144f | [
"MIT"
] | null | null | null | from IsingRegisterAllocator import get_solution_splitted_algorithm
from dotenv import load_dotenv
import os
load_dotenv()
AMPLIFY_TOKEN = os.environ.get("AMPLIFY_TOKEN")
def test_get_solution_splitted_algorithm():
list_dependent_variables = [
[1, 2, 3],
[0, 2, 3, 4],
[0, 1, 3, 4],
[0, 1, 2, 4],
[1, 2, 3],
[6],
[5],
[]
]
num_registers = 4
chunk_size = 3
overlap_size = 1
solution = get_solution_splitted_algorithm(list_dependent_variables, num_registers, chunk_size, overlap_size, AMPLIFY_TOKEN)
| 24.5 | 128 | 0.644558 |
6813b30dff10207a4008f29a2b226cb019af73fb | 17,036 | py | Python | indra_world/assembly/incremental_assembler.py | kkaris/indra_wm_service | d449e5a251428905290433270fd180279a7ae3f7 | [
"BSD-2-Clause"
] | null | null | null | indra_world/assembly/incremental_assembler.py | kkaris/indra_wm_service | d449e5a251428905290433270fd180279a7ae3f7 | [
"BSD-2-Clause"
] | null | null | null | indra_world/assembly/incremental_assembler.py | kkaris/indra_wm_service | d449e5a251428905290433270fd180279a7ae3f7 | [
"BSD-2-Clause"
] | null | null | null | import logging
from copy import deepcopy
import networkx
from collections import defaultdict
from indra.pipeline import AssemblyPipeline
from indra.belief import extend_refinements_graph
from indra.preassembler.refinement import RefinementConfirmationFilter
from indra_world.belief import get_eidos_scorer
from indra_world.ontology import load_world_ontology
from indra_world.assembly.operations import CompositionalRefinementFilter
from indra_world.assembly.operations import \
location_matches_compositional, location_refinement_compositional, \
add_flattened_grounding_compositional, standardize_names_compositional
logger = logging.getLogger(__name__)
comp_onto_url = 'https://raw.githubusercontent.com/WorldModelers/Ontologies/' \
'master/CompositionalOntology_v2.1_metadata.yml'
world_ontology = load_world_ontology(comp_onto_url)
# TODO: should we use the Bayesian scorer?
eidos_scorer = get_eidos_scorer()
class IncrementalAssembler:
"""Assemble a set of prepared statements and allow incremental extensions.
Parameters
----------
prepared_stmts : list[indra.statements.Statement]
A list of prepared INDRA Statements.
refinement_filters : Optional[list[indra.preassembler.refinement.RefinementFilter]]
A list of refinement filter classes to be used for refinement
finding. Default: the standard set of compositional refinement filters.
matches_fun : Optional[function]
A custom matches function for determining matching statements and
calculating hashes. Default: matches function that takes
compositional grounding and location into account.
curations : list[dict]
A list of user curations to be integrated into the assembly results.
post_processing_steps : list[dict]
Steps that can be used in an INDRA AssemblyPipeline to do
post-processing on statements.
Attributes
----------
refinement_edges : set
A set of tuples of statement hashes representing refinement links
between statements.
"""
def __init__(self, prepared_stmts,
refinement_filters=None,
matches_fun=location_matches_compositional,
curations=None,
post_processing_steps=None):
self.matches_fun = matches_fun
# These are preassembly data structures
self.stmts_by_hash = {}
self.evs_by_stmt_hash = {}
self.refinement_edges = set()
self.prepared_stmts = prepared_stmts
self.known_corrects = set()
if not refinement_filters:
crf = CompositionalRefinementFilter(ontology=world_ontology)
rcf = RefinementConfirmationFilter(ontology=world_ontology,
refinement_fun=location_refinement_compositional)
self.refinement_filters = [crf, rcf]
else:
self.refinement_filters = refinement_filters
self.curations = curations if curations else []
self.post_processing_steps = [
{'function': 'add_flattened_grounding_compositional'},
{'function': 'standardize_names_compositional'},
] \
if post_processing_steps is None else post_processing_steps
self.deduplicate()
self.apply_curations()
self.get_refinements()
self.refinements_graph = \
self.build_refinements_graph(self.stmts_by_hash,
self.refinement_edges)
self.belief_scorer = eidos_scorer
self.beliefs = self.get_beliefs()
def apply_curations(self):
"""Apply the set of curations to the de-duplicated statements."""
hashes_by_uuid = {stmt.uuid: sh
for sh, stmt in self.stmts_by_hash.items()}
for curation in self.curations:
stmt_hash = hashes_by_uuid.get(curation['statement_id'])
if not stmt_hash:
continue
stmt = self.stmts_by_hash[stmt_hash]
# Remove the statement
if curation['update_type'] == 'discard_statement':
self.stmts_by_hash.pop(stmt_hash, None)
self.evs_by_stmt_hash.pop(stmt_hash, None)
# TODO: update belief model here
# Vet the statement
elif curation['update_type'] == 'vet_statement':
self.known_corrects.add(stmt_hash)
# TODO: update belief model here
# Flip the polarity
elif curation['update_type'] == 'factor_polarity':
role, new_pol = parse_factor_polarity_curation(curation)
if role == 'subj':
stmt.subj.delta.polarity = new_pol
elif role == 'obj':
stmt.obj.delta.polarity = new_pol
else:
continue
# Flip subject/object
elif curation['update_type'] == 'reverse_relation':
tmp = stmt.subj
stmt.subj = stmt.obj
stmt.obj = tmp
# TODO: update evidence annotations
# Change grounding
elif curation['update_type'] == 'factor_grounding':
role, txt, grounding = parse_factor_grounding_curation(curation)
# FIXME: It is not clear how compositional groundings will be
# represented in curations. This implementation assumes a single
# grounding entry to which we assign a score of 1.0
if role == 'subj':
stmt.subj.concept.db_refs['WM'][0] = (grounding, 1.0)
elif role == 'obj':
stmt.obj.concept.db_refs['WM'][0] = (grounding, 1.0)
else:
logger.warning('Unknown curation type: %s' %
curation['update_type'])
# We now update statement data structures in case the statement
# changed in a meaningful way
if curation['update_type'] in {'factor_polarity',
'reverse_relation',
'factor_grounding'}:
# First, calculate the new hash
new_hash = stmt.get_hash(matches_fun=self.matches_fun,
refresh=True)
# If we don't have a statement yet with this new hash, we
# move the statement and evidences from the old to the new hash
if new_hash not in self.stmts_by_hash:
self.stmts_by_hash[new_hash] = \
self.stmts_by_hash.pop(stmt_hash)
self.evs_by_stmt_hash[new_hash] = \
self.evs_by_stmt_hash.pop(stmt_hash)
# If there is already a statement with the new hash, we leave
# that as is in stmts_by_hash, and then extend evs_by_stmt_hash
# with the evidences of the curated statement.
else:
self.evs_by_stmt_hash[new_hash] += \
self.evs_by_stmt_hash.pop(stmt_hash)
def deduplicate(self):
"""Build hash-based statement and evidence data structures to
deduplicate."""
for stmt in self.prepared_stmts:
self.annotate_evidences(stmt)
stmt_hash = stmt.get_hash(matches_fun=self.matches_fun)
evs = stmt.evidence
if stmt_hash not in self.stmts_by_hash:
# FIXME: this may be enabled since evidences are kept under
# a separate data structure, however, then tests may need to
# be updated to work around the fact that statements are
# modified.
# stmt.evidence = []
self.stmts_by_hash[stmt_hash] = stmt
if stmt_hash not in self.evs_by_stmt_hash:
self.evs_by_stmt_hash[stmt_hash] = []
self.evs_by_stmt_hash[stmt_hash] += evs
def get_refinements(self):
"""Calculate refinement relationships between de-duplicated statements.
"""
for filter in self.refinement_filters:
filter.initialize(self.stmts_by_hash)
for sh, stmt in self.stmts_by_hash.items():
refinements = None
for filter in self.refinement_filters:
# This gets less specific hashes
refinements = filter.get_related(stmt, refinements)
# Here we need to add less specific first and more specific second
refinement_edges = {(ref, sh) for ref in refinements}
self.refinement_edges |= refinement_edges
@staticmethod
def build_refinements_graph(stmts_by_hash, refinement_edges):
"""Return a refinements graph based on statements and refinement edges.
"""
g = networkx.DiGraph()
nodes = [(sh, {'stmt': stmt}) for sh, stmt in stmts_by_hash.items()]
g.add_nodes_from(nodes)
g.add_edges_from(refinement_edges)
return g
def add_statements(self, stmts):
"""Add new statements for incremental assembly.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of new prepared statements to be incrementally assembled
into the set of existing statements.
Returns
-------
AssemblyDelta
An AssemblyDelta object representing the changes to the assembly
as a result of the new added statements.
"""
# We fist organize statements by hash
stmts_by_hash = defaultdict(list)
for stmt in stmts:
self.annotate_evidences(stmt)
stmts_by_hash[
stmt.get_hash(matches_fun=self.matches_fun)].append(stmt)
stmts_by_hash = dict(stmts_by_hash)
# We next create the new statements and new evidences data structures
new_stmts = {}
new_evidences = defaultdict(list)
for sh, stmts_for_hash in stmts_by_hash.items():
if sh not in self.stmts_by_hash:
new_stmts[sh] = stmts_for_hash[0]
self.stmts_by_hash[sh] = stmts_for_hash[0]
self.evs_by_stmt_hash[sh] = []
for stmt in stmts_for_hash:
for ev in stmt.evidence:
new_evidences[sh].append(ev)
self.evs_by_stmt_hash[sh].append(ev)
new_evidences = dict(new_evidences)
# Here we run some post-processing steps on the new statements
ap = AssemblyPipeline(steps=self.post_processing_steps)
# NOTE: the assumption here is that the processing steps modify the
# statement objects directly, this could be modified to return
# statements that are then set in the hash-keyed dict
ap.run(list(new_stmts.values()))
# Next we extend refinements and re-calculate beliefs
for filter in self.refinement_filters:
filter.extend(new_stmts)
new_refinements = set()
for sh, stmt in new_stmts.items():
refinements = None
for filter in self.refinement_filters:
# Note that this gets less specifics
refinements = filter.get_related(stmt, refinements)
# We order hashes by less specific first and more specific second
new_refinements |= {(ref, sh) for ref in refinements}
# This expects a list of less specific hashes for the statement
extend_refinements_graph(self.refinements_graph,
stmt, list(refinements),
matches_fun=self.matches_fun)
beliefs = self.get_beliefs()
return AssemblyDelta(new_stmts, new_evidences, new_refinements,
beliefs, matches_fun=self.matches_fun)
def get_all_supporting_evidence(self, sh):
"""Return direct and incirect evidence for a statement hash."""
all_evs = set(self.evs_by_stmt_hash[sh])
for supp in networkx.descendants(self.refinements_graph, sh):
all_evs |= set(self.evs_by_stmt_hash[supp])
return all_evs
def get_beliefs(self):
"""Calculate and return beliefs for all statements."""
self.beliefs = {}
for sh, evs in self.evs_by_stmt_hash.items():
if sh in self.known_corrects:
self.beliefs[sh] = 1
# TODO: should we propagate this belief to all the less
# specific statements? One option is to add those statements'
# hashes to the known_corrects list and then at this point
# we won't need any special handling.
else:
self.beliefs[sh] = self.belief_scorer.score_evidence_list(
self.get_all_supporting_evidence(sh))
return self.beliefs
def get_statements(self):
"""Return a flat list of statements with their evidences."""
stmts = []
for sh, stmt in deepcopy(self.stmts_by_hash).items():
stmt.evidence = self.evs_by_stmt_hash.get(sh, [])
stmt.belief = self.beliefs[sh]
stmts.append(stmt)
# TODO: add refinement edges as supports/supported_by?
# Here we run some post-processing steps on the statements
ap = AssemblyPipeline(steps=self.post_processing_steps)
stmts = ap.run(stmts)
return stmts
@staticmethod
def annotate_evidences(stmt):
"""Add annotations to evidences of a given statement."""
for ev in stmt.evidence:
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
else:
ev.annotations['agents'] = {'raw_text': raw_text}
class AssemblyDelta:
"""Represents changes to the assembly structure as a result of new
statements added to a set of existing statements.
Attributes
----------
new_stmts : dict[str, indra.statements.Statement]
A dict of new statement keyed by hash.
new_evidences : dict[str, indra.statements.Evidence]
A dict of new evidences for existing or new statements keyed
by statement hash.
new_refinements: list[tuple]
A list of statement hash pairs representing new refinement links.
beliefs : dict[str, float]
A dict of belief scores keyed by all statement hashes (both old and
new).
matches_fun : Optional[Callable[[Statement], str]]
An optional custom matches function. When using a custom matches
function for assembly, providing it here is necessary to get
correct JSON serialization.
"""
def __init__(self, new_stmts, new_evidences, new_refinements, beliefs,
matches_fun=None):
self.new_stmts = new_stmts
self.new_evidences = new_evidences
self.new_refinements = new_refinements
self.beliefs = beliefs
self.matches_fun = matches_fun
def to_json(self):
"""Return a JSON representation of the assembly delta."""
# Serialize statements with custom matches function to make
# sure matches hashes are consistent
new_stmts_json = {sh: stmt.to_json(matches_fun=self.matches_fun)
for sh, stmt in self.new_stmts.items()}
# Pop out evidence since it is redundant with the new_evidence field
for stmtj in new_stmts_json.values():
stmtj.pop('evidence', None)
# Return the full construct
return {
'new_stmts': new_stmts_json,
'new_evidence': {sh: [ev.to_json() for ev in evs]
for sh, evs in self.new_evidences.items()},
'new_refinements': list(self.new_refinements),
'beliefs': self.beliefs
}
def parse_factor_polarity_curation(cur):
"""Parse details from a curation that changes an event's polarity."""
bef_subj = cur['before']['subj']
bef_obj = cur['before']['obj']
aft_subj = cur['after']['subj']
aft_obj = cur['after']['obj']
if bef_subj['polarity'] != aft_subj['polarity']:
return 'subj', aft_subj['polarity']
elif bef_obj['polarity'] != aft_obj['polarity']:
return 'obj', aft_obj['polarity']
else:
return None, None
def parse_factor_grounding_curation(cur):
"""Parse details from a curation that changes a concept's grounding."""
bef_subj = cur['before']['subj']
bef_obj = cur['before']['obj']
aft_subj = cur['after']['subj']
aft_obj = cur['after']['obj']
if bef_subj['concept'] != aft_subj['concept']:
return 'subj', aft_subj['factor'], aft_subj['concept']
elif bef_obj['concept'] != aft_obj['concept']:
return 'obj', aft_obj['factor'], aft_obj['concept']
else:
return None, None, None
| 44.134715 | 87 | 0.619394 |
69e556791e5b27de7e526a7bc8b054fd3e5a8f94 | 14,633 | py | Python | tests/python/relay/test_external_codegen.py | lhutton1/tvm | e9380e47f0b97c0b98b97f082b075eaa1308038b | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3 | 2021-05-08T17:04:39.000Z | 2021-07-11T17:40:54.000Z | tests/python/relay/test_external_codegen.py | delldu/tvm | d9ec031ec33e046044fd9521f02ed63213ad07b8 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/relay/test_external_codegen.py | delldu/tvm | d9ec031ec33e046044fd9521f02ed63213ad07b8 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
import os
import sys
from collections import OrderedDict
import numpy as np
import pytest
import tvm
from tvm import relay, runtime
from tvm.contrib import utils
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
from aot.aot_test_utils import AOTTestModel, compile_and_run
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++14", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = tvm.runtime.load_module(lib_path)
return lib
def check_vm_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_graph_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
json, lib, _ = relay.build(mod, target=target)
lib = update_lib(lib)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_aot_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
interface_api = "packed"
use_unpacked_api = False
use_calculated_workspaces = True
compile_and_run(
AOTTestModel(module=mod, inputs=map_inputs, outputs=[result]),
interface_api,
use_unpacked_api,
use_calculated_workspaces,
)
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize(
"check_result", [check_vm_result, check_graph_executor_result, check_aot_executor_result]
)
def test_multi_node_subgraph(check_result):
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# subgraph0
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
# subgraph1
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize(
"check_result", [check_vm_result, check_graph_executor_result, check_aot_executor_result]
)
def test_extern_gcc_single_op(check_result):
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize(
"check_result", [check_vm_result, check_graph_executor_result, check_aot_executor_result]
)
def test_extern_gcc_single_op_int(check_result):
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize(
"check_result", [check_vm_result, check_graph_executor_result, check_aot_executor_result]
)
def test_extern_gcc(check_result):
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
# subgraph for mul
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y])
# subgraph for add
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
# subgraph for sub
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
inputs = OrderedDict(
[
("y", y_data),
("x", x_data),
]
)
check_result(mod, inputs, (2, 2), (y_data * y_data) - (x_data + x_data))
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
def test_extern_gcc_consts():
@tvm._ffi.register_func("relay.ext.ccompiler.constant_updater")
def constant_updater(expr, symbol):
"""A dummy constant updater just to test that a custom one works."""
return {"ccompiler_0_p0": tvm.nd.array(y0_data)}
x = relay.var("x", shape=(8, 8))
y0_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x0 = relay.var("x0", shape=(8, 8))
y0_const = relay.const(y0_data, "float32")
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
compiler = relay.backend.vm.VMCompiler()
compiler.lower(mod, "llvm")
compiler.codegen()
params = compiler.get_params()
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
_, _, params = relay.build(mod, target="llvm")
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
tvm._ffi.registry.remove_global_func("relay.ext.ccompiler.constant_updater")
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@pytest.mark.parametrize("check_result", [check_vm_result, check_graph_executor_result])
def test_extern_dnnl(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu())
ref_res = ref_ex.evaluate()(i_data, w_data, w_data)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@pytest.mark.parametrize("check_result", [check_vm_result, check_graph_executor_result])
def test_extern_dnnl_const(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu())
ref_res = ref_ex.evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5)
def test_load_params_with_constants_in_ext_codegen():
# After binding params and partitioning graph_module.get_params()
# might contain parameters that are not an graph executor input but
# for example constants in external function.
y_in = np.ones((1,)).astype("float32")
params = {"y": y_in}
mod = tvm.IRModule()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1,))
xcb = compiler_begin(x, "ccompiler")
ycb = compiler_begin(y, "ccompiler")
z = relay.add(xcb, ycb)
zce = compiler_end(z, "ccompiler")
mod["main"] = relay.Function([x, y], zce)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = relay.transform.PartitionGraph()(mod)
graph_module = relay.build(mod, target="llvm", params=params)
# Params will be stored in metadata module.
assert len(graph_module.get_params()) == 0
lib = update_lib(graph_module.get_lib())
rt_mod = tvm.contrib.graph_executor.create(graph_module.get_graph_json(), lib, tvm.cpu(0))
rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| 36.766332 | 99 | 0.653318 |
a1aae807e88f6f7ff9e2dfe7ce7681151797d067 | 4,903 | py | Python | tools/mytools/ARIA/src/py/aria/AssignmentFilter.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | tools/mytools/ARIA/src/py/aria/AssignmentFilter.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | tools/mytools/ARIA/src/py/aria/AssignmentFilter.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | """
ARIA -- Ambiguous Restraints for Iterative Assignment
A software for automated NOE assignment
Version 2.3
Copyright (C) Benjamin Bardiaux, Michael Habeck, Therese Malliavin,
Wolfgang Rieping, and Michael Nilges
All rights reserved.
NO WARRANTY. This software package is provided 'as is' without warranty of
any kind, expressed or implied, including, but not limited to the implied
warranties of merchantability and fitness for a particular purpose or
a warranty of non-infringement.
Distribution of substantively modified versions of this module is
prohibited without the explicit permission of the copyright holders.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
from aria.ariabase import *
class AssignmentFilter(AriaBaseClass):
def __init__(self):
self.n_residues = None
def filter_weights(self, weights, cutoff, max_n):
check_array(weights)
check_float(cutoff)
check_int(max_n)
import numpy
## sort weights in descending order
indices = numpy.argsort(weights)
indices = numpy.take(indices, numpy.arange(len(indices)-1,-1,-1))
s_weights = numpy.take(weights, indices)
x = numpy.add.accumulate(s_weights)
try:
index = numpy.flatnonzero(numpy.greater(x, cutoff))[1]
except:
index = len(indices)
## we limit the number of contributing
## weights to max_n.
index = min(index, max_n)
## Return set of large and small weights.
return indices[:index], indices[index:]
def getResidueNumbers(self, contributions):
res_numbers = []
for c in contributions:
sp = c.getSpinPairs()[0]
n1 = sp[0].getResidue().getNumber() - 1
n2 = sp[1].getResidue().getNumber() - 1
res_numbers.append((n1, n2))
return res_numbers
def getNResidues(self, restraints):
n_max = 0
for p in restraints:
for c in p.getContributions():
sp = c.getSpinPairs()[0]
n1 = sp[0].getResidue().getNumber()
n2 = sp[1].getResidue().getNumber()
if max(n1, n2) > n_max:
n_max = max(n1, n2)
return n_max
def buildMatrix(self, restraints, n_residues, weight_matrix = None):
import numpy
z = numpy.zeros((self.n_residues, self.n_residues), numpy.float)
for restraint in restraints:
contribs = restraint.getContributions()
n_contribs = len(contribs)
res_numbers = self.getResidueNumbers(contribs)
if weight_matrix is None:
weights = numpy.ones(n_contribs, numpy.Float)
else:
weights = numpy.array([weight_matrix[i[0], i[1]] \
for i in res_numbers])
if numpy.sum(weights) < 1.e-10:
weights = numpy.ones(n_contribs, numpy.Float)
weights = weights / numpy.sum(weights)
for i in range(len(contribs)):
n1, n2 = res_numbers[i]
w = weights[i]
z[n1, n2] += w
if n1 <> n2:
z[n2, n1] += w
return z
def buildContactMatrix(self, restraint_list, n_iterations):
from pystartup import Dump
if self.n_residues is None:
self.n_residues = self.getNResidues(restraint_list)
contact_matrix = None
for i in range(n_iterations):
contact_matrix = self.buildMatrix(restraint_list, \
self.n_residues, \
contact_matrix)
Dump(contact_matrix, '/tmp/cm%d' % (i+1))
return contact_matrix
def filterContributions(self, restraint_list, n_iterations = 5,\
cutoff = 0.8, max_n = 10):
import numpy
cm = self.buildContactMatrix(restraint_list, n_iterations)
cutoff = numpy.sort(numpy.ravel(cm))[-1000]
cm = cm * numpy.greater(cm, cutoff)
for restraint in restraint_list:
contribs = restraint.getContributions()
res_numbers = self.getResidueNumbers(contribs)
weights = numpy.array([cm[i[0], i[1]] for i in res_numbers])
if numpy.sum(weights) < 1.e-10:
restraint.isActive(0)
continue
weights = weights / numpy.sum(weights)
on, off = self.filter_weights(weights, 1.1, max_n)
## active / deactive contributions
[contribs[i].setWeight(0.) for i in off]
[contribs[i].setWeight(weights[i]) for i in on]
| 26.502703 | 74 | 0.564348 |
337b392b52a87bc2b253537c115d718e42d456f6 | 4,105 | py | Python | tests/model_builder_unittest.py | hgl71964/dagbo | de198650268baee324bc495899e34531d10369e2 | [
"MIT"
] | null | null | null | tests/model_builder_unittest.py | hgl71964/dagbo | de198650268baee324bc495899e34531d10369e2 | [
"MIT"
] | null | null | null | tests/model_builder_unittest.py | hgl71964/dagbo | de198650268baee324bc495899e34531d10369e2 | [
"MIT"
] | null | null | null | import os
import sys
import math
import warnings
import logging
import unittest
import torch
import numpy as np
from torch import Size, Tensor
from sklearn.metrics import mean_squared_error
import gpytorch
from gpytorch.models.exact_gp import ExactGP
from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood
import botorch
from botorch.sampling.samplers import SobolQMCNormalSampler
from botorch.optim import optimize_acqf
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.models.utils import gpt_posterior_settings
from dagbo.interface.parse_performance_model import parse_model
from dagbo.models.model_builder import build_model, build_perf_model_from_spec_ssa, build_input_by_topological_order
from dagbo.utils.perf_model_utils import get_dag_topological_order, find_inverse_edges
class perf_model_test(unittest.TestCase):
def setUp(self):
np.random.seed(0), torch.manual_seed(0)
# performance model
param_space, metric_space, obj_space, edges = parse_model(
"dagbo/interface/rosenbrock_20d_dummy.txt")
#"dagbo/interface/rosenbrock_3d_dummy.txt")
#"dagbo/interface/rosenbrock_3d_correct_model.txt")
self.param_space = param_space
self.metric_space = metric_space
self.obj_space = obj_space
self.edges = edges
q = 2
#print(param_space)
#print(edges)
acq_func_config = {
"q": 1,
"num_restarts": 48,
"raw_samples": 128,
"num_samples": 2048,
"y_max": torch.tensor([1.]), # for EI
"beta": 1, # for UCB
}
self.acq_func_config = acq_func_config
# make fake input tensor
self.train_inputs_dict = {
i: np.random.rand(q)
for i in list(param_space.keys())
}
self.train_targets_dict = {
i: np.random.rand(q)
for i in list(metric_space.keys()) + list(obj_space.keys())
}
#print(self.train_inputs_dict)
#print(self.train_targets_dict)
norm = True
device = "cpu"
# build, build_perf_model_from_spec
self.dag = build_perf_model_from_spec_ssa(
self.train_inputs_dict, self.train_targets_dict,
acq_func_config["num_samples"], param_space, metric_space,
obj_space, edges, device)
@unittest.skip("ok")
def test_input_build(self):
node_order = get_dag_topological_order(self.obj_space, self.edges)
train_input_names, train_target_names, train_inputs, train_targets = build_input_by_topological_order(
self.train_inputs_dict, self.train_targets_dict, self.param_space,
self.metric_space, self.obj_space, node_order)
print("input build:")
print()
print("input name: ", train_input_names)
print("target name: ", train_target_names)
print("input: ", train_inputs.shape)
print(train_inputs)
print("target: ", train_targets.shape)
print(train_targets)
print()
@unittest.skip("ok")
def test_print_dag(self):
print()
print("print dag")
print(self.dag)
print()
print("input map: ")
print(self.dag.train_inputs_name2tensor_mapping)
print("target map: ")
print(self.dag.train_targets_name2tensor_mapping)
print()
@unittest.skip("ok")
def test_print_node(self):
print()
print("print node")
print(self.dag._modules["final"].input_names)
print(self.dag._modules["final"].train_inputs)
print(self.dag._modules["final"].train_targets)
print()
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
# create formatter and add it to the handler
formatter = logging.Formatter(
'%(asctime)s - %(filename)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handler to the logger
logger.addHandler(handler)
unittest.main()
| 33.104839 | 116 | 0.667235 |
331aaee9585d91924ceca92cf5f6dee92b149162 | 5,945 | py | Python | azure-devops/azext_devops/devops_sdk/v5_0/token_administration/token_administration_client.py | keithlemon/azure-devops-cli-extension | 4989e5f53650f186e638ccc186605986c76d59bf | [
"MIT"
] | 326 | 2019-04-10T12:38:23.000Z | 2022-03-31T23:07:49.000Z | azure-devops/azext_devops/devops_sdk/v5_0/token_administration/token_administration_client.py | keithlemon/azure-devops-cli-extension | 4989e5f53650f186e638ccc186605986c76d59bf | [
"MIT"
] | 562 | 2019-04-10T07:36:12.000Z | 2022-03-28T07:37:54.000Z | azure-devops/azext_devops/devops_sdk/v5_0/token_administration/token_administration_client.py | keithlemon/azure-devops-cli-extension | 4989e5f53650f186e638ccc186605986c76d59bf | [
"MIT"
] | 166 | 2019-04-10T07:59:40.000Z | 2022-03-16T14:17:13.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class TokenAdministrationClient(Client):
"""TokenAdministration
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(TokenAdministrationClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '95935461-9e54-44bd-b9fb-04f4dd05d640'
def list_identities_with_global_access_tokens(self, revocations, is_public=None):
"""ListIdentitiesWithGlobalAccessTokens.
[Preview API] Revokes the listed OAuth authorizations.
:param [TokenAdminRevocation] revocations: The list of identities containing the authorization IDs of the OAuth authorizations, such as session tokens retrieved by listed a users PATs, that should be checked for global access tokens.
:param bool is_public: Set to false for PAT tokens and true for SSH tokens.
:rtype: [str]
"""
query_parameters = {}
if is_public is not None:
query_parameters['isPublic'] = self._serialize.query('is_public', is_public, 'bool')
content = self._serialize.body(revocations, '[TokenAdminRevocation]')
response = self._send(http_method='POST',
location_id='30d3a12b-66c3-4669-b016-ecb0706c8d0f',
version='5.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response))
def list_personal_access_tokens(self, audience, subject_descriptor, page_size=None, continuation_token=None, si_public=None):
"""ListPersonalAccessTokens.
[Preview API] Lists of all the session token details of the personal access tokens (PATs) for a particular user.
:param [str] audience:
:param :class:`<str> <azure.devops.v5_0.token_administration.models.str>` subject_descriptor: The descriptor of the target user.
:param int page_size: The maximum number of results to return on each page.
:param str continuation_token: An opaque data blob that allows the next page of data to resume immediately after where the previous page ended. The only reliable way to know if there is more data left is the presence of a continuation token.
:param bool si_public:
:rtype: :class:`<TokenAdminPagedSessionTokens> <azure.devops.v5_0.token_administration.models.TokenAdminPagedSessionTokens>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if page_size is not None:
query_parameters['pageSize'] = self._serialize.query('page_size', page_size, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if si_public is not None:
query_parameters['siPublic'] = self._serialize.query('si_public', si_public, 'bool')
content = self._serialize.body(audience, '[str]')
response = self._send(http_method='POST',
location_id='1bb7db14-87c5-4762-bf77-a70ad34a9ab3',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('TokenAdminPagedSessionTokens', response)
def revoke_authorizations(self, revocations, host_id, is_public=None):
"""RevokeAuthorizations.
[Preview API] Revokes the listed OAuth authorizations.
:param :class:`<TokenAdministrationRevocation> <azure.devops.v5_0.token_administration.models.TokenAdministrationRevocation>` revocations: The list of objects containing the authorization IDs of the OAuth authorizations, such as session tokens retrieved by listed a users PATs, that should be revoked.
:param str host_id: Host Id to display on the notification page to manage tokens.
:param bool is_public: Set to false for PAT tokens and true for SSH tokens.
:rtype: [SessionToken]
"""
query_parameters = {}
if host_id is not None:
query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str')
if is_public is not None:
query_parameters['isPublic'] = self._serialize.query('is_public', is_public, 'bool')
content = self._serialize.body(revocations, 'TokenAdministrationRevocation')
response = self._send(http_method='POST',
location_id='a2e4520b-1cc8-4526-871e-f3a8f865f221',
version='5.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('[SessionToken]', self._unwrap_collection(response))
| 61.927083 | 309 | 0.64508 |
567949b094da6f28a43b0ef26f880cb501bc8bff | 2,024 | py | Python | cognigraph/nodes/tests/test_MNE.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | cognigraph/nodes/tests/test_MNE.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | cognigraph/nodes/tests/test_MNE.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from cognigraph.nodes.processors import MNE
from cognigraph.nodes.sources import FileSource
from cognigraph.nodes.tests.prepare_tests_data import (info, # noqa
fwd_model_path,
data_path)
@pytest.fixture
def inv_model(info, fwd_model_path, data_path): # noqa
snr = 1
method = 'MNE'
inv_model = MNE(
snr=snr, fwd_path=fwd_model_path, method=method)
inv_model.mne_info = info
N_SEN = len(info['ch_names'])
inv_model.input = np.random.rand(N_SEN)
parent = FileSource(data_path)
parent.output = np.random.rand(info['nchan'], 1)
parent.mne_info = info
inv_model.parent = parent
return inv_model
@pytest.fixture
def inv_model_def(info): # noqa
inv_model_def = MNE()
parent = FileSource()
parent.mne_info = info
parent.output = np.random.rand(info['nchan'], 1)
inv_model_def.parent = parent
return inv_model_def
def test_defaults(inv_model_def):
assert(inv_model_def.fwd_path is None)
assert(inv_model_def.mne_info is None)
def test_initialize(inv_model):
inv_model.initialize()
def test_change_api_attributes(inv_model):
inv_model.initialize()
l2_old = inv_model._lambda2
snr_old = inv_model.snr
arbitrary_value = 1
inv_model.snr += arbitrary_value
inv_model.update()
assert l2_old != inv_model._lambda2
assert inv_model._lambda2 == 1 / (snr_old + arbitrary_value) ** 2
def test_input_hist_invalidation_defined(inv_model):
"""
Change source attribute which triggers on_upstream_change and see if
inv_model fails
"""
inv_model.parent.initialize()
inv_model.initialize()
inv_model.parent.source_name = 'new_name' # triggers reset for source
def test_update(inv_model):
inv_model._initialize()
inv_model._update()
def test_check_value(inv_model):
with pytest.raises(ValueError):
inv_model.snr = -1
| 25.620253 | 74 | 0.681324 |
8ab9a3962b268edfe034214f44f76498ad6ccb06 | 3,294 | py | Python | capsul/pipeline/custom_nodes/cvfilter_node.py | M40V/capsul | 38331bd47e7b815e6d5162f7973e33088320af6e | [
"CECILL-B"
] | null | null | null | capsul/pipeline/custom_nodes/cvfilter_node.py | M40V/capsul | 38331bd47e7b815e6d5162f7973e33088320af6e | [
"CECILL-B"
] | null | null | null | capsul/pipeline/custom_nodes/cvfilter_node.py | M40V/capsul | 38331bd47e7b815e6d5162f7973e33088320af6e | [
"CECILL-B"
] | null | null | null | '''
:class:`CVFilterNode`
---------------------
'''
from capsul.pipeline.pipeline_nodes import Node
from soma.controller import Controller
import traits.api as traits
import six
import sys
if sys.version_info[0] >= 3:
unicode = str
class CVFilterNode(Node):
'''
This "inert" node filters a list to separate it into (typically) learn and test sublists.
The "outputs" may be either an output trait (to serve as inputs to
other nodes), or an input trait (to assign output values to other nodes).
'''
def __init__(self, pipeline, name, is_output=True, input_type=None):
in_traitsl = ['inputs', 'fold', 'nfolds']
if is_output:
out_traitsl = ['learn_list', 'test_list']
else:
out_traitsl = []
in_traitsl += ['learn_list', 'test_list']
in_traits = []
out_traits = []
for tr in in_traitsl:
in_traits.append({'name': tr, 'optional': True})
for tr in out_traitsl:
out_traits.append({'name': tr, 'optional': True})
super(CVFilterNode, self).__init__(pipeline, name, in_traits,
out_traits)
if input_type:
ptype = input_type
else:
ptype = traits.Any(traits.Undefined)
self.add_trait('inputs', traits.List(ptype, output=False))
self.add_trait('fold', traits.Int())
self.add_trait('nfolds', traits.Int(10))
self.add_trait('learn_list', traits.List(ptype, output=is_output))
self.add_trait('test_list', traits.List(ptype, output=is_output))
self.set_callbacks()
def set_callbacks(self, update_callback=None):
inputs = ['inputs', 'fold', 'nfolds']
if update_callback is None:
update_callback = self.filter_callback
for name in inputs:
self.on_trait_change(update_callback, name)
def filter_callback(self):
n = len(self.inputs) // self.nfolds
ninc = len(self.inputs) % self.nfolds
begin = self.fold * n + min((ninc, self.fold))
end = min((self.fold + 1) * n + min((ninc, self.fold + 1)),
len(self.inputs))
self.learn_list = self.inputs[:begin] + self.inputs[end:]
self.test_list = self.inputs[begin:end]
def configured_controller(self):
c = self.configure_controller()
c.param_type = self.trait('inputs').inner_traits[0].trait_type.__class__.__name__
c.is_output = self.trait('learn_list').output
return c
@classmethod
def configure_controller(cls):
c = Controller()
c.add_trait('param_type', traits.Str('Str'))
c.add_trait('is_output', traits.Bool(True))
return c
@classmethod
def build_node(cls, pipeline, name, conf_controller):
t = None
if conf_controller.param_type == 'Str':
t = traits.Str(traits.Undefined)
elif conf_controller.param_type == 'File':
t = traits.File(traits.Undefined)
elif conf_controller.param_type not in (None, traits.Undefined):
t = getattr(traits, conf_controller.param_type)()
node = CVFilterNode(pipeline, name, conf_controller.is_output,
input_type=t)
return node
| 34.673684 | 93 | 0.607165 |
79b8379256bd5fb53f00b6bad947053a6e215138 | 4,520 | py | Python | scripts/regridding/KdTree_IDW.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | 1 | 2022-02-22T01:07:31.000Z | 2022-02-22T01:07:31.000Z | scripts/regridding/KdTree_IDW.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | null | null | null | scripts/regridding/KdTree_IDW.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 13:57:10 2020
@author: Philipe_Leal
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import geopandas as gpd
import cartopy.crs as ccrs
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
import xarray as xr
from IDW_over_xarray import (create_output_grid,
get_coord_limits_from_dataarray,
apply_kernel_over_distance_array)
from submodules.kdtree import (KDTree, get_tutorial_dataset)
from submodules.array_to_xarray import rebuild_dataarray
end = '\n'*2 + '-'*40 + '\n'*2
def standard_plot_dataarray(da, xdim, ydim, suptitle):
projection = ccrs.PlateCarree()
fig, ax = plt.subplots(subplot_kw=dict(projection=projection))
ax.coastlines()
ax.gridlines(draw_labels=True)
da.plot(ax=ax, x=xdim, y=ydim, transform=projection)
fig.suptitle(suptitle)
fig.tight_layout()
fig.show()
if '__main__' == __name__:
######################################################################
# Getting data
######################################################################
Tair = get_tutorial_dataset()
da = Tair.mean('time')
suptitle='Original data'
standard_plot_dataarray(da, xdim='xc', ydim='yc', suptitle=suptitle)
######################################################################
# Getting Grid
######################################################################
xmin, xmax, ymin, ymax = get_coord_limits_from_dataarray(da, 'xc', 'yc')
xres = 5
yres = 5
K = 1
pixels_XY, output_shape, Xcoords, Ycoords = create_output_grid(xmin=xmin, xmax=xmax, xres=xres, ymin=ymin, ymax=ymax, yres=yres)
pixels_YX = np.flip(pixels_XY, axis=1)
##########################################################################
ground_pixel_tree = KDTree(da, xcor='xc', ycor='yc')
######################################################################
# Nearest K points for Paris
######################################################################
paris = (48.8566, 2.3522)
K = 3
pixels_index , distances = ground_pixel_tree.query(paris, k=K)
dd= (1/distances)
dd = dd/dd.sum()
Nearest_da = da[pixels_index]
Nearest_coords = Nearest_da.coords
IDW = ground_pixel_tree.idw(paris, k=K)
################
#
# KNN mean with K = 1
#
#############
K=1
Pixels_IDW, distances = ground_pixel_tree.knn(pixels_YX, k=K)
if hasattr(Pixels_IDW, 'knn'):
Pixels_IDW_mean = Pixels_IDW.mean('knn')
else:
Pixels_IDW_mean = Pixels_IDW
Pixels_IDW_mean = rebuild_dataarray(Pixels_IDW_mean.values.reshape(output_shape),
Xcoords, Ycoords, xdim='lon', ydim='lat')
suptitle = 'KNN mean with K: {0}'.format(K)
standard_plot_dataarray(Pixels_IDW_mean, xdim='lon', ydim='lat', suptitle=suptitle)
################
#
# KNN mean with K = N
#
#############
K=10
Pixels_IDW, distances = ground_pixel_tree.knn(pixels_YX, k=K)
if hasattr(Pixels_IDW, 'knn'):
Pixels_IDW_mean = Pixels_IDW.mean('knn')
else:
Pixels_IDW_mean = Pixels_IDW
Pixels_IDW_mean = rebuild_dataarray(Pixels_IDW_mean.values.reshape(output_shape),
Xcoords, Ycoords,
xdim='lon', ydim='lat')
suptitle = 'KNN mean with K: {0}'.format(K)
standard_plot_dataarray(Pixels_IDW_mean,
xdim='lon', ydim='lat',
suptitle=suptitle)
############################################
################
#
# IDW
#
#############3
K = 5
Pixels_IDW = ground_pixel_tree.idw(pixels_YX, k=K)
Pixels_IDW_da = rebuild_dataarray(Pixels_IDW.reshape(output_shape), Xcoords, Ycoords, xdim='lon', ydim='lat')
suptitle = 'IDW with K: {0}'.format(K)
standard_plot_dataarray(Pixels_IDW_da, xdim='lon', ydim='lat', suptitle=suptitle)
| 23.179487 | 132 | 0.485398 |
5dce39c99ce8ccb5a8fcd3394c666283a32402d8 | 464 | py | Python | src/Shared/Helpers/Transformer.py | DigiChanges/python-experience | 3332d2c4d922a5eb302fa151582a4f63c668a570 | [
"MIT"
] | null | null | null | src/Shared/Helpers/Transformer.py | DigiChanges/python-experience | 3332d2c4d922a5eb302fa151582a4f63c668a570 | [
"MIT"
] | null | null | null | src/Shared/Helpers/Transformer.py | DigiChanges/python-experience | 3332d2c4d922a5eb302fa151582a4f63c668a570 | [
"MIT"
] | null | null | null | import abc
from abc import abstractmethod
from typing import Any
from mongoengine import QuerySet
class Transformer(abc.ABC):
@abstractmethod
def transform(self, data: Any) -> Any:
pass
def handle(self, data: Any) -> Any:
result = None
if isinstance(data, QuerySet):
result = list(map(lambda element: self.transform(element), data))
else:
result = self.transform(data)
return result
| 22.095238 | 77 | 0.637931 |
745da1aebce0d496a5d99fa2315d385058d635d4 | 1,081 | py | Python | MySQL/mysql_homwork2/core/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 3 | 2017-12-27T14:08:17.000Z | 2018-02-10T13:01:08.000Z | MySQL/mysql_homwork2/core/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 4 | 2017-05-24T10:37:05.000Z | 2021-06-10T18:35:32.000Z | MySQL/mysql_homwork2/core/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 1 | 2018-02-14T19:05:30.000Z | 2018-02-14T19:05:30.000Z | #! -*- coding:utf8 -*-
#! /usr/bin/Python
from lib import db_handler
from lib import permission_manager, user_manager
def login():
'Login module'
while True:
username = input("Please input your username: ")
password = input("Please input your password: ")
db_handler_obj = db_handler.OperatrDB()
user_id, user_name, per = db_handler_obj.output_name_permission(username, password)
if user_id :
print("login success!, your permission is %s" % per)
if per == "admin":
admin()
else :
print("username or password is incorrect!")
def admin():
'Admin interface'
while True:
msg = """
===========功能选择===========
1、权限管理
2、用户管理
"""
print(msg)
ipt = input(">> :")
if ipt == "1":
p_m = permission_manager.Permission()
p_m.run()
elif ipt == "2":
u_m = user_manager.User()
u_m.run()
elif ipt == 'exit':
break | 28.447368 | 91 | 0.506013 |
01aba7faa50d4f06b3584fb6bde6f748d52459ea | 2,232 | py | Python | api/main.py | enacom/python-bootcamp | e99847442bdd2a6ddbc7fd1d745f7e54950697d9 | [
"MIT"
] | null | null | null | api/main.py | enacom/python-bootcamp | e99847442bdd2a6ddbc7fd1d745f7e54950697d9 | [
"MIT"
] | null | null | null | api/main.py | enacom/python-bootcamp | e99847442bdd2a6ddbc7fd1d745f7e54950697d9 | [
"MIT"
] | 1 | 2022-03-31T02:11:46.000Z | 2022-03-31T02:11:46.000Z | """
ENACOM Python bootcamp
API (interface de programação de aplicações)
para resolução de problemas de otimização.
"""
from typing import Union
from fastapi import FastAPI
from http import HTTPStatus
from api.schemas import (
HealthCheckResponse, OptimizationInput, NotFoundError, OptimizationOutput
)
from api.optimization import problem
api = FastAPI(
title='ENACOM Python bootcamp API',
version='0.1.1',
description=(
'API (Interface de programação de aplicações)'
'para resolução de problemas de otimização.\n'
),
)
@api.get(
'/'
)
def root():
response = {
"message": f"{api.title}. versão: {api.version}\n{api.description}"
}
return response
@api.get(
'/healthcheck',
tags=['healthcheck'],
summary='Integridade do sistema',
description='Verifica se o servidor da API está ativo',
response_model=HealthCheckResponse
)
def healthcheck():
message = HealthCheckResponse()
return message
@api.post(
'/results/{code}',
summary='Resultado da otimização por código',
response_model=OptimizationOutput,
responses={
HTTPStatus.NOT_FOUND.value: {
'description': 'Resultado da otimização não encontrado',
'model': NotFoundError
}
}
)
def post_results_code(code: int) -> Union[OptimizationOutput, NotFoundError]:
response = OptimizationOutput(
code=code,
message=(
f"Resultado do problema {code} ainda não está sendo salvo."
"Isso é parte do desafio!"
),
# TODO: Resultados (models e allocation) do problema de otimização
# precisam ser preenchido nesse parâmetro
results=None
)
return response
@api.post(
'/solve',
summary='Resolver o problema de otimização',
responses={
HTTPStatus.NOT_FOUND.value: {
'description': 'Otimização não resolvida',
'model': NotFoundError
}
}
)
def post_solve(
optimization_input: OptimizationInput
):
"""
Resolver problema de otimização
"""
problem.solve(
optimization_input=optimization_input
)
response = {"message": "Problema recebido."}
return response
| 22.545455 | 77 | 0.65009 |
a513c9e024282c0d0ae3c7123b450b84ff729cee | 9,889 | py | Python | python/GafferSceneTest/CameraTest.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/CameraTest.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/CameraTest.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import math
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class CameraTest( GafferSceneTest.SceneTestCase ) :
def testConstruct( self ) :
p = GafferScene.Camera()
self.assertEqual( p.getName(), "Camera" )
self.assertEqual( p["name"].getValue(), "camera" )
def testCompute( self ) :
p = GafferScene.Camera()
p["projection"].setValue( "perspective" )
p["fieldOfView"].setValue( 45 )
self.assertEqual( p["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( p["out"].transform( "/" ), imath.M44f() )
self.assertEqual( p["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "camera" ] ) )
self.assertEqual( p["out"].transform( "/camera" ), imath.M44f() )
self.assertEqual( p["out"].childNames( "/camera" ), IECore.InternedStringVectorData() )
o = p["out"].object( "/camera" )
self.failUnless( isinstance( o, IECoreScene.Camera ) )
self.assertEqual( o.getProjection(), "perspective" )
self.assertEqual( o.getAperture(), imath.V2f( 1.0 ) )
self.assertAlmostEqual( o.getFocalLength(), 1.0 / ( 2.0 * math.tan( IECore.degreesToRadians( 0.5 * 45 ) ) ), places = 6 )
self.assertSceneValid( p["out"] )
def testHashes( self ) :
p = GafferScene.Camera()
p["projection"].setValue( "perspective" )
p["fieldOfView"].setValue( 45 )
for i in p['renderSettingOverrides']:
i["enabled"].setValue( True )
with Gaffer.Context() as c :
c["scene:path"] = IECore.InternedStringVectorData()
# We ignore the enabled and sets plugs because they aren't hashed (instead
# their values are used to decide how the hash should be computed). We ignore
# the transform plug because it isn't affected by any inputs when the path is "/".
# We ignore the set plug because that needs a different context - we test that below.
self.assertHashesValid( p, inputsToIgnore = [ p["enabled"], p["sets"] ], outputsToIgnore = [ p["out"]["transform"], p["out"]["set"] ] )
c["scene:path"] = IECore.InternedStringVectorData( [ "camera" ] )
# We ignore the childNames because it doesn't use any inputs to compute when
# the path is not "/". We ignore the bound plug because although it has a dependency
# on the transform plug, that is only relevant when the path is "/".
self.assertHashesValid( p, inputsToIgnore = [ p["enabled"], p["sets"] ], outputsToIgnore = [ p["out"]["childNames"], p["out"]["bound"], p["out"]["set"] ] )
with Gaffer.Context() as c :
c["scene:setName"] = IECore.InternedStringData( "__cameras" )
self.assertHashesValid( p, inputsToIgnore = [ p["enabled"], p["sets"] ], outputsToIgnore = [ c for c in p["out"] if c != p["out"]["set"] ] )
def testBound( self ) :
p = GafferScene.Camera()
p["projection"].setValue( "perspective" )
p["fieldOfView"].setValue( 45 )
self.failIf( p["out"].bound( "/" ).isEmpty() )
self.failIf( p["out"].bound( "/camera" ).isEmpty() )
def testClippingPlanes( self ) :
p = GafferScene.Camera()
o = p["out"].object( "/camera" )
self.assertEqual( o.parameters()["clippingPlanes"].value, imath.V2f( 0.01, 100000 ) )
p["clippingPlanes"].setValue( imath.V2f( 1, 10 ) )
o = p["out"].object( "/camera" )
self.assertEqual( o.parameters()["clippingPlanes"].value, imath.V2f( 1, 10 ) )
def testEnableBehaviour( self ) :
c = GafferScene.Camera()
self.assertTrue( c.enabledPlug().isSame( c["enabled"] ) )
self.assertEqual( c.correspondingInput( c["out"] ), None )
self.assertEqual( c.correspondingInput( c["enabled"] ), None )
self.assertEqual( c.correspondingInput( c["projection"] ), None )
self.assertEqual( c.correspondingInput( c["fieldOfView"] ), None )
def testCameraSet( self ) :
c = GafferScene.Camera()
cameraSet = c["out"].set( "__cameras" )
self.assertEqual(
cameraSet,
IECore.PathMatcherData(
IECore.PathMatcher( [ "/camera" ] )
)
)
c["name"].setValue( "renderCam" )
cameraSet = c["out"].set( "__cameras" )
self.assertEqual(
cameraSet,
IECore.PathMatcherData(
IECore.PathMatcher( [ "/renderCam" ] )
)
)
def testDirtyPropagation( self ) :
c = GafferScene.Camera()
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["transform"]["translate"]["x"].setValue( 10 )
self.failUnless( c["out"]["transform"] in [ p[0] for p in dirtied ] )
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["name"].setValue( "renderCam" )
self.failUnless( c["out"]["childNames"] in [ p[0] for p in dirtied ] )
self.failUnless( c["out"]["set"] in [ p[0] for p in dirtied ] )
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["projection"].setValue( "orthographic" )
self.failUnless( c["out"]["object"] in [ p[0] for p in dirtied ] )
self.failUnless( c["out"]["bound"] in [ p[0] for p in dirtied ] )
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["fieldOfView"].setValue( 100 )
self.failUnless( c["out"]["object"] in [ p[0] for p in dirtied ] )
self.failUnless( c["out"]["bound"] in [ p[0] for p in dirtied ] )
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["clippingPlanes"]["x"].setValue( 100 )
self.failUnless( c["out"]["object"] in [ p[0] for p in dirtied ] )
self.failUnless( c["out"]["bound"] in [ p[0] for p in dirtied ] )
def testFrustum( self ) :
c = GafferScene.Camera()
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, 2.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
c["fieldOfView"].setValue( 100 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, 2.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[1] * 2.0, 2.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
c["apertureAspectRatio"].setValue( 3 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, 2.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[1] * 2.0, 2.0 / 3.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
c["perspectiveMode"].setValue( GafferScene.Camera.PerspectiveMode.ApertureFocalLength )
self.assertNotAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, 2.0 * math.tan( 0.5 * math.radians( c["fieldOfView"].getValue() ) ), places = 6 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, c["aperture"].getValue()[0] / c["focalLength"].getValue(), places = 6 )
c["aperture"].setValue( imath.V2f( 100 ) )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, c["aperture"].getValue()[0] / c["focalLength"].getValue(), places = 6 )
c["focalLength"].setValue( 200 )
self.assertAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, c["aperture"].getValue()[0] / c["focalLength"].getValue(), places = 6 )
c["projection"].setValue( "orthographic" )
self.assertNotAlmostEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max()[0] * 2.0, c["aperture"].getValue()[0] / c["focalLength"].getValue(), places = 6 )
self.assertEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max() * 2.0, c["orthographicAperture"].getValue() )
c["orthographicAperture"].setValue( imath.V2f( 0.1, 12 ) )
self.assertEqual( c["out"].object( "/camera" ).frustum( IECoreScene.Camera.FilmFit.Distort ).max() * 2.0, c["orthographicAperture"].getValue() )
if __name__ == "__main__":
unittest.main()
| 46.21028 | 204 | 0.668116 |
5d2787bab2999aa1d70545b8bf952d1309eb46e1 | 4,096 | py | Python | sidekick-seq/sidekick/seq/lib_augmenting.py | fabiommendes/sidekick | 993ae7b8496347ad9720d3ff11e10ab946c3a800 | [
"MIT"
] | 32 | 2017-08-10T09:42:51.000Z | 2022-03-18T17:21:26.000Z | sidekick-seq/sidekick/seq/lib_augmenting.py | fabiommendes/sidekick | 993ae7b8496347ad9720d3ff11e10ab946c3a800 | [
"MIT"
] | 1 | 2019-04-10T13:07:45.000Z | 2019-04-18T02:27:47.000Z | sidekick-seq/sidekick/seq/lib_augmenting.py | fabiommendes/sidekick | 993ae7b8496347ad9720d3ff11e10ab946c3a800 | [
"MIT"
] | 5 | 2017-10-10T13:43:23.000Z | 2019-06-18T00:09:15.000Z | import itertools
from collections import deque
from .iter import Iter, generator
from .lib_basic import uncons
from .. import _toolz as toolz
from ..functions import fn
from ..typing import Seq, TYPE_CHECKING, NOT_GIVEN, Func, T
if TYPE_CHECKING:
from .. import api as sk # noqa: F401
@fn.curry(2)
def interpose(elem, seq: Seq) -> Iter:
"""
Introduce element between each pair of elements in seq.
Examples:
>>> sk.interpose("a", [1, 2, 3])
sk.iter([1, 'a', 2, 'a', 3])
"""
return Iter(toolz.interpose(elem, seq))
@fn.curry(2)
def pad(value, seq: Seq, size: int = None, step: int = None) -> Iter:
"""
Fill resulting sequence with value after the first sequence terminates.
Args:
value:
Value used to pad sequence.
seq:
Input sequence.
size:
Optional minimum size of sequence.
step:
If given, pad at a multiple of step.
Examples:
>>> sk.pad(0, [1, 2, 3])
sk.iter([1, 2, 3, 0, 0, 0, ...])
>>> sk.pad(0, [1, 2, 3], step=2)
sk.iter([1, 2, 3, 0])
>>> sk.pad(0, [1, 2, 3], size=5)
sk.iter([1, 2, 3, 0, 0])
"""
if step is None:
out = itertools.chain(seq, itertools.repeat(value))
else:
out = _pad_multiple(value, seq, step)
if size is not None:
out = itertools.islice(out, size)
return Iter(out)
def _pad_multiple(value, seq, n):
i = 0
for i, x in enumerate(seq, 1):
yield x
i %= n
if i:
yield from itertools.repeat(value, i)
@fn.curry(2)
def pad_with(func: Func, seq: Seq, nargs=1, default=NOT_GIVEN) -> Iter:
"""
Pad sequence iterating the last item with func.
If func is None, fill in with the last value or default, if sequence
is empty.
Args:
func:
A function to iterate the tail of sequence.
seq:
Input sequence.
nargs:
The number of elements to pass to func to construct the next
argument.
default:
Fill sequence with this value if it is not large enough.
Examples:
>>> sk.pad_with(None, [1, 2, 3])
sk.iter([1, 2, 3, 3, 3, 3, ...])
>>> sk.pad_with(op.add(2), [1, 2, 3])
sk.iter([1, 2, 3, 5, 7, 9, ...])
Fibonacci numbers
>>> sk.pad_with(op.add, [1, 1], nargs=2)
sk.iter([1, 1, 2, 3, 5, 8, ...])
"""
if func is None:
out = _pad_last(seq, default)
elif nargs == 1:
out = _pad_iterate(func, seq, default)
else:
if default is not NOT_GIVEN:
seq = itertools.chain(itertools.repeat(default, nargs), seq)
out = itertools.islice(_pad_iterate_n(nargs, func, seq), nargs)
else:
out = _pad_iterate_n(nargs, func, seq)
return Iter(out)
def _pad_last(seq, default):
x, rest = uncons(seq, default=default)
yield x
for x in rest:
yield x
yield from itertools.repeat(x)
def _pad_iterate(func, seq, default):
x, rest = uncons(seq, default=default)
yield x
for x in rest:
yield x
while True:
x = func(x)
yield x
def _pad_iterate_n(n, func, seq):
args = deque((), n)
for x in seq:
yield x
args.append(x)
if len(args) != n:
raise ValueError(f"sequence must have at least {n} items")
while True:
x = func(*args)
yield x
args.append(x)
@fn.curry(2)
def append(elem: T, seq: Seq[T]) -> Iter[T]:
"""
Return a new sequence with element appended to the end.
Examples:
>>> sk.append(4, [1, 2, 3])
sk.iter([1, 2, 3, 4])
"""
return Iter(itertools.chain(seq, [elem]))
@fn.curry(3)
@generator
def insert(idx: int, value: T, seq: Seq[T]) -> Iter[T]:
"""
Return sequence that inserts value at the given index.
Examples:
>>> sk.insert(2, 2.5, [1, 2, 3])
sk.iter([1, 2, 2.5, 3])
"""
seq = iter(seq)
yield from itertools.islice(seq, idx)
yield value
yield from seq
| 23.813953 | 75 | 0.552734 |
c8133b3810419e64bb4e045c16fdc7b3d688f3f0 | 5,265 | py | Python | src/schemathesis/lazy.py | PrayagS/schemathesis | 80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b | [
"MIT"
] | 1 | 2021-03-24T08:55:10.000Z | 2021-03-24T08:55:10.000Z | src/schemathesis/lazy.py | PrayagS/schemathesis | 80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b | [
"MIT"
] | null | null | null | src/schemathesis/lazy.py | PrayagS/schemathesis | 80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b | [
"MIT"
] | null | null | null | from inspect import signature
from typing import Any, Callable, Dict, Optional, Union
import attr
import pytest
from _pytest.fixtures import FixtureRequest
from pytest_subtests import SubTests
from .exceptions import InvalidSchema
from .hooks import HookDispatcher, HookScope
from .models import Endpoint
from .schemas import BaseSchema
from .types import Filter, GenericTest, NotSet
from .utils import NOT_SET
@attr.s(slots=True) # pragma: no mutate
class LazySchema:
fixture_name: str = attr.ib() # pragma: no mutate
method: Optional[Filter] = attr.ib(default=NOT_SET) # pragma: no mutate
endpoint: Optional[Filter] = attr.ib(default=NOT_SET) # pragma: no mutate
tag: Optional[Filter] = attr.ib(default=NOT_SET) # pragma: no mutate
operation_id: Optional[Filter] = attr.ib(default=NOT_SET) # pragma: no mutate
hooks: HookDispatcher = attr.ib(factory=lambda: HookDispatcher(scope=HookScope.SCHEMA)) # pragma: no mutate
validate_schema: bool = attr.ib(default=True) # pragma: no mutate
def parametrize( # pylint: disable=too-many-arguments
self,
method: Optional[Filter] = NOT_SET,
endpoint: Optional[Filter] = NOT_SET,
tag: Optional[Filter] = NOT_SET,
operation_id: Optional[Filter] = NOT_SET,
validate_schema: Union[bool, NotSet] = NOT_SET,
) -> Callable:
if method is NOT_SET:
method = self.method
if endpoint is NOT_SET:
endpoint = self.endpoint
if tag is NOT_SET:
tag = self.tag
if operation_id is NOT_SET:
operation_id = self.operation_id
def wrapper(func: Callable) -> Callable:
def test(request: FixtureRequest, subtests: SubTests) -> None:
"""The actual test, which is executed by pytest."""
if hasattr(test, "_schemathesis_hooks"):
func._schemathesis_hooks = test._schemathesis_hooks # type: ignore
schema = get_schema(
request=request,
name=self.fixture_name,
method=method,
endpoint=endpoint,
tag=tag,
operation_id=operation_id,
hooks=self.hooks,
test_function=func,
validate_schema=validate_schema,
)
fixtures = get_fixtures(func, request)
# Changing the node id is required for better reporting - the method and endpoint will appear there
node_id = subtests.item._nodeid
settings = getattr(test, "_hypothesis_internal_use_settings", None)
tests = list(schema.get_all_tests(func, settings))
request.session.testscollected += len(tests)
for _endpoint, sub_test in tests:
actual_test = get_test(sub_test)
subtests.item._nodeid = _get_node_name(node_id, _endpoint)
run_subtest(_endpoint, fixtures, actual_test, subtests)
subtests.item._nodeid = node_id
# Needed to prevent a failure when settings are applied to the test function
test.is_hypothesis_test = True # type: ignore
return test
return wrapper
def get_test(test: Union[Callable, InvalidSchema]) -> Callable:
"""For invalid schema exceptions construct a failing test function, return the original test otherwise."""
if isinstance(test, InvalidSchema):
message = test.args[0]
def actual_test(*args: Any, **kwargs: Any) -> None:
pytest.fail(message)
return actual_test
return test
def _get_node_name(node_id: str, endpoint: Endpoint) -> str:
"""Make a test node name. For example: test_api[GET:/users]."""
return f"{node_id}[{endpoint.method}:{endpoint.full_path}]"
def run_subtest(endpoint: Endpoint, fixtures: Dict[str, Any], sub_test: Callable, subtests: SubTests) -> None:
"""Run the given subtest with pytest fixtures."""
with subtests.test(method=endpoint.method, path=endpoint.path):
sub_test(**fixtures)
def get_schema(
*,
request: FixtureRequest,
name: str,
method: Optional[Filter] = None,
endpoint: Optional[Filter] = None,
tag: Optional[Filter] = None,
operation_id: Optional[Filter] = None,
test_function: GenericTest,
hooks: HookDispatcher,
validate_schema: Union[bool, NotSet] = NOT_SET,
) -> BaseSchema:
"""Loads a schema from the fixture."""
# pylint: disable=too-many-arguments
schema = request.getfixturevalue(name)
if not isinstance(schema, BaseSchema):
raise ValueError(f"The given schema must be an instance of BaseSchema, got: {type(schema)}")
return schema.clone(
method=method,
endpoint=endpoint,
tag=tag,
operation_id=operation_id,
test_function=test_function,
hooks=hooks,
validate_schema=validate_schema,
)
def get_fixtures(func: Callable, request: FixtureRequest) -> Dict[str, Any]:
"""Load fixtures, needed for the test function."""
sig = signature(func)
return {name: request.getfixturevalue(name) for name in sig.parameters if name != "case"}
| 39 | 115 | 0.643685 |
f5a12225b2fda3ad4196abb693d28336310cc33f | 2,587 | py | Python | koans/about_dictionaries.py | ProtKsen/learn_python_koans | 557d1eb21fab655a8c8eabaab830aa513871dfc5 | [
"MIT"
] | null | null | null | koans/about_dictionaries.py | ProtKsen/learn_python_koans | 557d1eb21fab655a8c8eabaab830aa513871dfc5 | [
"MIT"
] | null | null | null | koans/about_dictionaries.py | ProtKsen/learn_python_koans | 557d1eb21fab655a8c8eabaab830aa513871dfc5 | [
"MIT"
] | null | null | null | from koans.helpers.comparators import dict_comparator
from koans_plugs import *
def test_create_dictionary_with_literal():
"""
Словарь в Python можно создать с помощью литерала словаря
Литерал словаря – это фигурные скобки: {},
в которых пары ключ-значения разделены запятыми, а ключ от значения отделяется двоеточием
"""
d = {
'a': 1,
'b': 2
}
assert dict_comparator(d, {'a': 1, 'b': 2}) # попробуйте подстваить объект вида {key1: value1, key2: value2,...}
def test_create_dictionary_with_constructor():
"""
Словарь в Python можно создать с помощью конструктора словаря
"""
d = dict(a=1, b=2)
assert dict_comparator(d, {'a': 1, 'b': 2})
def test_create_dictionary_with_list_of_tuples():
"""
Словарь в Python можно создать из списка кортежей
"""
list_of_tuples = [('a', 1), ('b', 2), ('c', 3)]
d = dict(list_of_tuples)
assert dict_comparator(d, {'a': 1, 'b': 2, 'c': 3})
def test_get_value_by_key():
"""
В словаре можно получать значение по ключу
"""
d = {
'a': 1,
'b': 2
}
assert d['a'] == 1 # попробуйте такие варианты: False, True, 1, 2
def test_add_key_and_value_to_dictionary():
"""
В словарь можно добавлять пару ключ-значение
"""
d = {
'a': 1,
'b': 2,
}
d['c'] = 3
assert dict_comparator(d, {'a': 1, 'b': 2, 'c': 3})
def test_if_existing_key_in_dict():
"""
Можно проверять, есть ли определенный ключ в словаре (для существующего ключа)
"""
d = {
'a': 1,
'b': 2
}
var = 'a' in d
assert var == True # попробуйте такие варианты: False, True, 1, 2
def test_if_not_existing_key_in_dict():
"""
Можно проверять, есть ли определенный ключ в словаре (для ключа, которого нет в словаре)
"""
d = {
'a': 1,
'b': 2
}
var = 'c' in d
assert var == False # попробуйте такие варианты: False, True, 1, 2
def test_get_method():
"""
Можно устанавливать значение по умолчанию для ключей, которых нет в словаре с помощью метода словаря get()
"""
d = {
'a': 1,
'b': 2
}
var = d.get('c', 0)
assert var == 0 # попробуйте такие варианты: False, True, 1, 2, 0
def test_get_method_default_value():
"""
Значением по умолчанию для метода словаря get() является None
"""
d = {
'a': 1,
'b': 2
}
var = d.get('c')
assert var == None # попробуйте такие варианты: False, True, 1, 2, 0, None | 23.733945 | 116 | 0.573251 |
151fd8f662713c9856fafbd906e3715fcfb3dfbf | 2,739 | py | Python | test/test_inference/test_stdlib.py | kirat-singh/jedi | 65bc1c117b3175cb4d492484775c3fd7f207bc92 | [
"MIT"
] | 4,213 | 2015-01-02T15:43:22.000Z | 2022-03-31T16:15:01.000Z | test/test_inference/test_stdlib.py | kirat-singh/jedi | 65bc1c117b3175cb4d492484775c3fd7f207bc92 | [
"MIT"
] | 1,392 | 2015-01-02T18:43:39.000Z | 2022-03-27T18:43:59.000Z | test/test_inference/test_stdlib.py | PeterJCLaw/jedi | 070f191f550990c23220d7f209df076178307cf6 | [
"MIT"
] | 525 | 2015-01-02T19:07:31.000Z | 2022-03-13T02:03:20.000Z | """
Tests of various stdlib related things that could not be tested
with "Black Box Tests".
"""
from textwrap import dedent
import pytest
@pytest.mark.parametrize(['letter', 'expected'], [
('n', ['name']),
('s', ['smart']),
])
def test_namedtuple_str(letter, expected, Script):
source = dedent("""\
import collections
Person = collections.namedtuple('Person', 'name smart')
dave = Person('Dave', False)
dave.%s""") % letter
result = Script(source).complete()
completions = set(r.name for r in result)
assert completions == set(expected)
def test_namedtuple_list(Script):
source = dedent("""\
import collections
Cat = collections.namedtuple('Person', ['legs', u'length', 'large'])
garfield = Cat(4, '85cm', True)
garfield.l""")
result = Script(source).complete()
completions = set(r.name for r in result)
assert completions == {'legs', 'length', 'large'}
def test_namedtuple_content(Script):
source = dedent("""\
import collections
Foo = collections.namedtuple('Foo', ['bar', 'baz'])
named = Foo(baz=4, bar=3.0)
unnamed = Foo(4, '')
""")
def d(source):
x, = Script(source).infer()
return x.name
assert d(source + 'unnamed.bar') == 'int'
assert d(source + 'unnamed.baz') == 'str'
assert d(source + 'named.bar') == 'float'
assert d(source + 'named.baz') == 'int'
def test_nested_namedtuples(Script):
"""
From issue #730.
"""
s = Script(dedent('''
import collections
Dataset = collections.namedtuple('Dataset', ['data'])
Datasets = collections.namedtuple('Datasets', ['train'])
train_x = Datasets(train=Dataset('data_value'))
train_x.train.'''))
assert 'data' in [c.name for c in s.complete()]
def test_namedtuple_infer(Script):
source = dedent("""
from collections import namedtuple
Foo = namedtuple('Foo', 'id timestamp gps_timestamp attributes')
Foo""")
from jedi.api import Script
d1, = Script(source).infer()
assert d1.get_line_code() == "class Foo(tuple):\n"
assert d1.module_path is None
assert d1.docstring() == 'Foo(id, timestamp, gps_timestamp, attributes)'
def test_re_sub(Script, environment):
"""
This whole test was taken out of completion/stdlib.py, because of the
version differences.
"""
def run(code):
defs = Script(code).infer()
return {d.name for d in defs}
names = run("import re; re.sub('a', 'a', 'f')")
assert names == {'str'}
# This param is missing because of overloading.
names = run("import re; re.sub('a', 'a')")
assert names == {'str', 'bytes'}
| 27.94898 | 76 | 0.6046 |
fcef2328a1ea129a7047b271849e04f43f0b44d3 | 4,965 | py | Python | mkdocs/tests/utils_tests.py | modulardata/mkdocs | 24fffb766e4da64ea5f5afdd7e2452e1f1c66f38 | [
"BSD-2-Clause"
] | null | null | null | mkdocs/tests/utils_tests.py | modulardata/mkdocs | 24fffb766e4da64ea5f5afdd7e2452e1f1c66f38 | [
"BSD-2-Clause"
] | 1 | 2022-02-17T04:10:43.000Z | 2022-02-17T04:10:43.000Z | mkdocs/tests/utils_tests.py | modulardata/mkdocs | 24fffb766e4da64ea5f5afdd7e2452e1f1c66f38 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import os
import unittest
from mkdocs import nav, utils
class UtilsTests(unittest.TestCase):
def test_html_path(self):
expected_results = {
'index.md': 'index.html',
'api-guide.md': 'api-guide/index.html',
'api-guide/index.md': 'api-guide/index.html',
'api-guide/testing.md': 'api-guide/testing/index.html',
}
for file_path, expected_html_path in expected_results.items():
html_path = utils.get_html_path(file_path)
self.assertEqual(html_path, expected_html_path)
def test_url_path(self):
expected_results = {
'index.md': '/',
'api-guide.md': '/api-guide/',
'api-guide/index.md': '/api-guide/',
'api-guide/testing.md': '/api-guide/testing/',
}
for file_path, expected_html_path in expected_results.items():
html_path = utils.get_url_path(file_path)
self.assertEqual(html_path, expected_html_path)
def test_is_markdown_file(self):
expected_results = {
'index.md': True,
'index.MARKDOWN': True,
'index.txt': False,
'indexmd': False
}
for path, expected_result in expected_results.items():
is_markdown = utils.is_markdown_file(path)
self.assertEqual(is_markdown, expected_result)
def test_is_html_file(self):
expected_results = {
'index.htm': True,
'index.HTML': True,
'index.txt': False,
'indexhtml': False
}
for path, expected_result in expected_results.items():
is_html = utils.is_html_file(path)
self.assertEqual(is_html, expected_result)
def test_create_media_urls(self):
pages = [
{'Home': 'index.md'},
{'About': 'about.md'},
{'Sub': [
{'Sub Home': 'index.md'},
{'Sub About': 'about.md'},
]}
]
expected_results = {
'https://media.cdn.org/jq.js': 'https://media.cdn.org/jq.js',
'http://media.cdn.org/jquery.js': 'http://media.cdn.org/jquery.js',
'//media.cdn.org/jquery.js': '//media.cdn.org/jquery.js',
'media.cdn.org/jquery.js': './media.cdn.org/jquery.js',
'local/file/jquery.js': './local/file/jquery.js',
'image.png': './image.png',
}
site_navigation = nav.SiteNavigation(pages)
for path, expected_result in expected_results.items():
urls = utils.create_media_urls(site_navigation, [path])
self.assertEqual(urls[0], expected_result)
def test_create_relative_media_url_sub_index(self):
'''
test special case where there's a sub/index.md page
'''
site_navigation = nav.SiteNavigation([
{'Home': 'index.md'},
{'Sub': [
{'Sub Home': '/subpage/index.md'},
]}
])
site_navigation.url_context.set_current_url('/subpage/')
site_navigation.file_context.current_file = "subpage/index.md"
def assertPathGenerated(declared, expected):
url = utils.create_relative_media_url(site_navigation, declared)
self.assertEqual(url, expected)
assertPathGenerated("img.png", "./img.png")
assertPathGenerated("./img.png", "./img.png")
assertPathGenerated("/img.png", "../img.png")
def test_reduce_list(self):
self.assertEqual(
utils.reduce_list([1, 2, 3, 4, 5, 5, 2, 4, 6, 7, 8]),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_get_themes(self):
self.assertEqual(
sorted(utils.get_theme_names()),
sorted(['flatly', 'cerulean', 'slate', 'bootstrap', 'yeti',
'spacelab', 'united', 'readable', 'simplex', 'mkdocs',
'cosmo', 'journal', 'cyborg', 'readthedocs', 'amelia']))
def test_nest_paths(self):
j = os.path.join
result = utils.nest_paths([
'index.md',
j('user-guide', 'configuration.md'),
j('user-guide', 'styling-your-docs.md'),
j('user-guide', 'writing-your-docs.md'),
j('about', 'contributing.md'),
j('about', 'license.md'),
j('about', 'release-notes.md'),
])
self.assertEqual(
result,
[
'index.md',
{'User guide': [
j('user-guide', 'configuration.md'),
j('user-guide', 'styling-your-docs.md'),
j('user-guide', 'writing-your-docs.md')]},
{'About': [
j('about', 'contributing.md'),
j('about', 'license.md'),
j('about', 'release-notes.md')]}
]
)
| 34.479167 | 79 | 0.534139 |
78d29c861537fe8e5bbd0dffd1de87e08b411e8a | 12,812 | py | Python | youtube_dl/extractor/afreecatv.py | imnx/youtube-dl_rg3-src | e7bfe83e3a000b3cb37bf39a0ab5aaf9e08fc858 | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/afreecatv.py | imnx/youtube-dl_rg3-src | e7bfe83e3a000b3cb37bf39a0ab5aaf9e08fc858 | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/afreecatv.py | imnx/youtube-dl_rg3-src | e7bfe83e3a000b3cb37bf39a0ab5aaf9e08fc858 | [
"Unlicense"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_xpath
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
xpath_text,
)
class AfreecaTVIE(InfoExtractor):
IE_NAME = 'afreecatv'
IE_DESC = 'afreecatv.com'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
(?:
/app/(?:index|read_ucc_bbs)\.cgi|
/player/[Pp]layer\.(?:swf|html)
)\?.*?\bnTitleNo=|
vod\.afreecatv\.com/PLAYER/STATION/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
'info_dict': {
'id': '36164052',
'ext': 'mp4',
'title': '데일리 에이프릴 요정들의 시상식!',
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'upload_date': '20160503',
},
'skip': 'Video is gone',
}, {
'url': 'http://afbbs.afreecatv.com:8080/app/read_ucc_bbs.cgi?nStationNo=16711924&nTitleNo=36153164&szBjId=dailyapril&nBbsNo=18605867',
'info_dict': {
'id': '36153164',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
},
'playlist_count': 2,
'playlist': [{
'md5': 'd8b7c174568da61d774ef0203159bf97',
'info_dict': {
'id': '36153164_1',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}, {
'md5': '58f2ce7f6044e34439ab2d50612ab02b',
'info_dict': {
'id': '36153164_2',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}],
'skip': 'Video is gone',
}, {
'url': 'http://vod.afreecatv.com/PLAYER/STATION/18650793',
'info_dict': {
'id': '18650793',
'ext': 'mp4',
'title': '오늘은 다르다! 쏘님의 우월한 위아래~ 댄스리액션!',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '윈아디',
'uploader_id': 'badkids',
'duration': 107,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://vod.afreecatv.com/PLAYER/STATION/10481652',
'info_dict': {
'id': '10481652',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'duration': 6492,
},
'playlist_count': 2,
'playlist': [{
'md5': 'd8b7c174568da61d774ef0203159bf97',
'info_dict': {
'id': '20160502_c4c62b9d_174361386_1',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!' (part 1)",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'upload_date': '20160502',
'duration': 3601,
},
}, {
'md5': '58f2ce7f6044e34439ab2d50612ab02b',
'info_dict': {
'id': '20160502_39e739bb_174361386_2',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!' (part 2)",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'upload_date': '20160502',
'duration': 2891,
},
}],
'params': {
'skip_download': True,
},
}, {
# non standard key
'url': 'http://vod.afreecatv.com/PLAYER/STATION/20515605',
'info_dict': {
'id': '20170411_BE689A0E_190960999_1_2_h',
'ext': 'mp4',
'title': '혼자사는여자집',
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': '♥이슬이',
'uploader_id': 'dasl8121',
'upload_date': '20170411',
'duration': 213,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
'only_matching': True,
}, {
'url': 'http://vod.afreecatv.com/PLAYER/STATION/15055030',
'only_matching': True,
}]
@staticmethod
def parse_video_key(key):
video_key = {}
m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key)
if m:
video_key['upload_date'] = m.group('upload_date')
video_key['part'] = int(m.group('part'))
return video_key
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
video_id, query={'nTitleNo': video_id})
video_element = video_xml.findall(compat_xpath('./track/video'))[1]
if video_element is None or video_element.text is None:
raise ExtractorError('Specified AfreecaTV video does not exist',
expected=True)
video_url = video_element.text.strip()
title = xpath_text(video_xml, './track/title', 'title', fatal=True)
uploader = xpath_text(video_xml, './track/nickname', 'uploader')
uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id')
duration = int_or_none(xpath_text(
video_xml, './track/duration', 'duration'))
thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail')
common_entry = {
'uploader': uploader,
'uploader_id': uploader_id,
'thumbnail': thumbnail,
}
info = common_entry.copy()
info.update({
'id': video_id,
'title': title,
'duration': duration,
})
if not video_url:
entries = []
file_elements = video_element.findall(compat_xpath('./file'))
one = len(file_elements) == 1
for file_num, file_element in enumerate(file_elements, start=1):
file_url = file_element.text
if not file_url:
continue
key = file_element.get('key', '')
upload_date = self._search_regex(
r'^(\d{8})_', key, 'upload date', default=None)
file_duration = int_or_none(file_element.get('duration'))
format_id = key if key else '%s_%s' % (video_id, file_num)
formats = self._extract_m3u8_formats(
file_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls',
note='Downloading part %d m3u8 information' % file_num)
title = title if one else '%s (part %d)' % (title, file_num)
file_info = common_entry.copy()
file_info.update({
'id': format_id,
'title': title,
'upload_date': upload_date,
'duration': file_duration,
'formats': formats,
})
entries.append(file_info)
entries_info = info.copy()
entries_info.update({
'_type': 'multi_video',
'entries': entries,
})
return entries_info
info = {
'id': video_id,
'title': title,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'thumbnail': thumbnail,
}
if determine_ext(video_url) == 'm3u8':
info['formats'] = self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
else:
app, playpath = video_url.split('mp4:')
info.update({
'url': app,
'ext': 'flv',
'play_path': 'mp4:' + playpath,
'rtmp_live': True, # downloading won't end without this
})
return info
class AfreecaTVGlobalIE(AfreecaTVIE):
IE_NAME = 'afreecatv:global'
_VALID_URL = r'https?://(?:www\.)?afreeca\.tv/(?P<channel_id>\d+)(?:/v/(?P<video_id>\d+))?'
_TESTS = [{
'url': 'http://afreeca.tv/36853014/v/58301',
'info_dict': {
'id': '58301',
'title': 'tryhard top100',
'uploader_id': '36853014',
'uploader': 'makgi Hearthstone Live!',
},
'playlist_count': 3,
}]
def _real_extract(self, url):
channel_id, video_id = re.match(self._VALID_URL, url).groups()
video_type = 'video' if video_id else 'live'
query = {
'pt': 'view',
'bid': channel_id,
}
if video_id:
query['vno'] = video_id
video_data = self._download_json(
'http://api.afreeca.tv/%s/view_%s.php' % (video_type, video_type),
video_id or channel_id, query=query)['channel']
if video_data.get('result') != 1:
raise ExtractorError('%s said: %s' % (self.IE_NAME, video_data['remsg']))
title = video_data['title']
info = {
'thumbnail': video_data.get('thumb'),
'view_count': int_or_none(video_data.get('vcnt')),
'age_limit': int_or_none(video_data.get('grade')),
'uploader_id': channel_id,
'uploader': video_data.get('cname'),
}
if video_id:
entries = []
for i, f in enumerate(video_data.get('flist', [])):
video_key = self.parse_video_key(f.get('key', ''))
f_url = f.get('file')
if not video_key or not f_url:
continue
entries.append({
'id': '%s_%s' % (video_id, video_key.get('part', i + 1)),
'title': title,
'upload_date': video_key.get('upload_date'),
'duration': int_or_none(f.get('length')),
'url': f_url,
'protocol': 'm3u8_native',
'ext': 'mp4',
})
info.update({
'id': video_id,
'title': title,
'duration': int_or_none(video_data.get('length')),
})
if len(entries) > 1:
info['_type'] = 'multi_video'
info['entries'] = entries
elif len(entries) == 1:
i = entries[0].copy()
i.update(info)
info = i
else:
formats = []
for s in video_data.get('strm', []):
s_url = s.get('purl')
if not s_url:
continue
stype = s.get('stype')
if stype == 'HLS':
formats.extend(self._extract_m3u8_formats(
s_url, channel_id, 'mp4', m3u8_id=stype, fatal=False))
elif stype == 'RTMP':
format_id = [stype]
label = s.get('label')
if label:
format_id.append(label)
formats.append({
'format_id': '-'.join(format_id),
'url': s_url,
'tbr': int_or_none(s.get('bps')),
'height': int_or_none(s.get('brt')),
'ext': 'flv',
'rtmp_live': True,
})
self._sort_formats(formats)
info.update({
'id': channel_id,
'title': self._live_title(title),
'is_live': True,
'formats': formats,
})
return info
| 36.19209 | 162 | 0.470809 |
e339013beb7b92e377cd2ea95ec8b04b9079afcf | 2,830 | py | Python | p2p/tools/asyncio_streams.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | 1 | 2021-04-07T07:33:28.000Z | 2021-04-07T07:33:28.000Z | p2p/tools/asyncio_streams.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | p2p/tools/asyncio_streams.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | import asyncio
from typing import Any, Dict, Sequence, Tuple
class MemoryProtocol(asyncio.Protocol):
def __init__(self) -> None:
self._closed_event = asyncio.Event()
async def _drain_helper(self) -> None:
pass
@property
async def _closed(self) -> None:
await self._closed_event.wait()
class MemoryWriteTransport(asyncio.WriteTransport):
"""
A fake version of the ``asyncio.BaseTransport``:
https://docs.python.org/3/library/asyncio-protocol.html#asyncio.BaseTransport
"""
def __init__(self,
reader: asyncio.StreamReader,
extra: Dict[str, Any] = None) -> None:
self._is_closing = False
self._reader = reader
super().__init__(extra)
#
# BaseTransport methods
#
# methods we don't overwrite because they already raise NotImplementedError
# and we don't need them
# - set_protocol
# - get_protocol
def close(self) -> None:
self._is_closing = True
def is_closing(self) -> bool:
return self._is_closing
#
# WriteTransport methods
#
# methods we don't overwrite because they already raise NotImplementedError
# and we don't need them
# - set_write_buffer_limits
# - get_write_buffer_size
def write(self, data: bytes) -> None:
self._reader.feed_data(data)
def writelines(self, list_of_data: Sequence[bytes]) -> None:
data = b''.join(list_of_data)
self.write(data)
def write_eof(self) -> None:
self._is_closing = True
def can_write_eof(self) -> bool:
return True
def abort(self) -> None:
self._is_closing = True
TConnectedStreams = Tuple[
Tuple[asyncio.StreamReader, asyncio.StreamWriter],
Tuple[asyncio.StreamReader, asyncio.StreamWriter],
]
def get_directly_connected_streams(alice_extra_info: Dict[str, Any] = None,
bob_extra_info: Dict[str, Any] = None,
loop: asyncio.AbstractEventLoop = None) -> TConnectedStreams:
if loop is None:
loop = asyncio.get_event_loop()
alice_reader = asyncio.StreamReader()
bob_reader = asyncio.StreamReader()
alice_transport = MemoryWriteTransport(bob_reader, extra=alice_extra_info)
bob_transport = MemoryWriteTransport(alice_reader, extra=bob_extra_info)
alice_protocol = MemoryProtocol()
bob_protocol = MemoryProtocol()
# Link the alice's writer to the bob's reader, and the bob's writer to the
# alice's reader.
bob_writer = asyncio.StreamWriter(bob_transport, bob_protocol, alice_reader, loop=loop)
alice_writer = asyncio.StreamWriter(alice_transport, alice_protocol, bob_reader, loop=loop)
return (
(alice_reader, alice_writer),
(bob_reader, bob_writer),
)
| 29.479167 | 96 | 0.660777 |
b41e303f7a872ae71d3a7bdaac2a7483262b50bd | 3,086 | py | Python | project/server/models.py | sebotic/repurpos-backend | 5637d68530947f2ca20e4d3a4257d6128625a95b | [
"MIT"
] | null | null | null | project/server/models.py | sebotic/repurpos-backend | 5637d68530947f2ca20e4d3a4257d6128625a95b | [
"MIT"
] | null | null | null | project/server/models.py | sebotic/repurpos-backend | 5637d68530947f2ca20e4d3a4257d6128625a95b | [
"MIT"
] | 3 | 2018-02-14T19:47:08.000Z | 2018-03-16T09:19:09.000Z | # project/server/models.py
import jwt
import datetime
from project.server import app, db, bcrypt
class User(db.Model):
""" User Model for storing user related details """
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
confirmed = db.Column(db.Boolean, nullable=False, default=False)
confirmed_on = db.Column(db.DateTime, nullable=True)
def __init__(self, email, password, confirmed, admin=False, confirmed_on=None):
self.email = email
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode('utf-8')
self.registered_on = datetime.datetime.now()
self.admin = admin
self.confirmed = confirmed
self.confirmed_on = confirmed_on
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class BlacklistToken(db.Model):
"""
Token Model for storing JWT tokens
"""
__tablename__ = 'blacklist_tokens'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
# check whether auth token has been blacklisted
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
| 32.145833 | 90 | 0.616008 |
1b362a7b9a1c661a0778644f972d72ce6b4b9c75 | 1,168 | py | Python | python/pyduino_mk/__init__.py | nanpuhaha/pyduino-mk | 6220d243410b883b02493dfa7f605553d62b6678 | [
"MIT"
] | null | null | null | python/pyduino_mk/__init__.py | nanpuhaha/pyduino-mk | 6220d243410b883b02493dfa7f605553d62b6678 | [
"MIT"
] | null | null | null | python/pyduino_mk/__init__.py | nanpuhaha/pyduino-mk | 6220d243410b883b02493dfa7f605553d62b6678 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015 Nelson Tran
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .arduino import Arduino
from .constants import *
| 46.72 | 80 | 0.777397 |
b8f6e673f2e582994ca18da7a4c714781db3633a | 4,114 | py | Python | tensorflow/python/data/service/server_lib.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 8 | 2020-07-29T18:50:45.000Z | 2021-07-25T07:06:43.000Z | tensorflow/python/data/service/server_lib.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 203 | 2019-06-14T23:53:10.000Z | 2022-02-10T02:27:23.000Z | tensorflow/python/data/service/server_lib.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 11 | 2020-05-31T13:14:56.000Z | 2021-12-14T04:39:25.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating dataset servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-import-order,g-bad-import-order, unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.service import _pywrap_server_lib
class MasterServer(object):
"""An in-process tf.data service master, for use in testing."""
def __init__(self, protocol):
"""Creates and starts a new tf.data master server.
The server will choose an available port. Use `target()` to get the string
for connecting to the server.
Args:
protocol: A string representing the type of protocol to use when creating
channels. For no security, use "grpc". For local credentials, use
"grpc+local", and make sure your binary links in
`data/service:local_credentials`.
"""
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewMasterServer(0, protocol)
self._running = True
@property
def target(self):
"""Returns the target for connecting to this server.
The returned string will be in the form protocol://address:port, e.g.
"grpc://localhost:1000".
"""
port = _pywrap_server_lib.TF_DATA_MasterServerBoundPort(self._server)
return "{0}://localhost:{1}".format(self._protocol, port)
def num_tasks(self):
"""Returns the number of tasks on the master."""
return _pywrap_server_lib.TF_DATA_MasterServerNumTasks(self._server)
def stop(self):
"""Shuts down and deletes the server.
This method will block until all outstanding rpcs have completed and the
server has been shut down.
"""
if self._running:
self._running = False
_pywrap_server_lib.TF_DATA_DeleteMasterServer(self._server)
def __del__(self):
self.stop()
class WorkerServer(object):
"""An in-process tf.data service worker, for use in testing."""
def __init__(self, protocol, master_address, port=0):
"""Creates and starts a new tf.data worker server.
The server will choose an available port. Use `target()` to get the string
for connecting to the server.
Args:
protocol: A string representing the type of protocol to use when creating
channels. For no security, use "grpc". For local credentials, use
"grpc+local", and make sure your binary links in
`data/service:local_credentials`.
master_address: The address of the tf.data master server to register with.
port: The port to bind to.
"""
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(
port, protocol, master_address, "localhost:%port%")
self._running = True
@property
def target(self):
"""Returns the target for connecting to this server.
The returned string will be in the form protocol://address:port, e.g.
"grpc://localhost:1000".
"""
port = _pywrap_server_lib.TF_DATA_WorkerServerBoundPort(self._server)
return "{0}://localhost:{1}".format(self._protocol, port)
def stop(self):
"""Shuts down and deletes the server.
This method will block until all outstanding rpcs have completed and the
server has been shut down.
"""
if self._running:
self._running = False
_pywrap_server_lib.TF_DATA_DeleteWorkerServer(self._server)
def __del__(self):
self.stop()
| 35.162393 | 80 | 0.705882 |
91c9ed0b0b0709e780853d64d8c5ce038299b0ff | 5,385 | py | Python | pkgs/ops-pkg/src/genie/libs/ops/lldp/iosxe/lldp.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/ops-pkg/src/genie/libs/ops/lldp/iosxe/lldp.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/ops-pkg/src/genie/libs/ops/lldp/iosxe/lldp.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | '''
LLDP Genie Ops Object for IOSXE - CLI.
'''
# Genie
from genie.ops.base import Base
from genie.ops.base import Context
class Lldp(Base):
'''LLDP Genie Ops Object'''
def tx_rx_both_enabled(self, item):
'''return True when logic and for tx and rx is True'''
try:
if 'enabled' in item['tx'] and 'enabled' in item['rx']:
return True
else:
return False
except Exception:
return False
def learn(self):
'''Learn lldp Ops'''
########################################################################
# info
########################################################################
# unsupported keys
# enabled
# hello_timer
# hold_timer
# suppress_tlv_advertisement: - NA
# chassis_id - N/A
# port_id - N/A
# port_description - N/A
# system_name - N/A
# system_description - N/A
# system_capabilities - N/A
# management_address - N/A
# system_name - N/A
# system_description - N/A
# chassis_id - N/A
# chassis_id_type - N/A
# counters
# frame_in
# frame_out
# frame_error_in
# frame_discard
# tlv_discard - N/A
# tlv_unknown - N/A
# last_clear - N/A
# tlv_accepted - N/A
# entries_aged_out
# interfaces
# if_name
# if_name
# enabled
# counters
# frame_in
# frame_out
# frame_error_in
# frame_discard
# tlv_discard
# tlv_unknown
# last_clear - N/A
# frame_error_out - N/A
# entries_aged_out
# pord_id
# neighbors
# neighbor_id
# neighbor_id
# system_name
# system_description
# chassis_id
# chassis_id_type - N/A
# id
# age
# last_update
# port_id
# port_id_type - N/A
# port_description
# management_address
# management_address_type - N/A
# custom_tlvs' - N/A
# [type oui oui_subtype] - N/A
# type - N/A
# oui - N/A
# oui_subtype - N/A
# value - N/A
# capabilities
# name
# name - N/A
# enabled
for key in ['enabled', 'hello_timer', 'hold_timer']:
self.add_leaf(cmd='show lldp',
src='[{}]'.format(key),
dest='info[{}]'.format(key))
for key in ['frame_in', 'frame_out', 'frame_error_in', 'frame_discard',
'tlv_discard', 'tlv_unknown', 'entries_aged_out']:
self.add_leaf(cmd='show lldp traffic',
src='[{}]'.format(key),
dest='info[counters][{}]'.format(key))
intf_src = '[interfaces][(?P<intf>.*)]'
intf_dest = 'info[interfaces][(?P<intf>.*)]'
nbr_src = '[interfaces][(?P<intf>.*)][port_id][(?P<p_id>.*)][neighbors][(?P<nei>.*)]'
nbr_dest = 'info[interfaces][(?P<intf>.*)][port_id][(?P<p_id>.*)][neighbors][(?P<nei>.*)]'
self.add_leaf(cmd='show lldp entry *',
src=intf_src + '[if_name]',
dest=intf_dest + '[if_name]')
self.add_leaf(cmd='show lldp neighbors detail',
src=intf_src + '[if_name]',
dest=intf_dest + '[if_name]')
for key in ['[chassis_id]', '[port_id]', '[neighbor_id]', '[system_name]',
'[system_description]', '[port_description]', '[management_address]',
'[capabilities][(?P<cap>.*)][enabled]','[capabilities][(?P<cap>.*)][name]' ]:
self.add_leaf(cmd='show lldp entry *',
src=nbr_src + key,
dest=nbr_dest + key)
for key in ['[chassis_id]', '[port_id]', '[neighbor_id]', '[system_name]',
'[system_description]', '[port_description]', '[management_address]',
'[capabilities][(?P<cap>.*)][enabled]','[capabilities][(?P<cap>.*)][name]' ]:
self.add_leaf(cmd='show lldp neighbors detail',
src=nbr_src + key,
dest=nbr_dest + key)
# enabled
self.add_leaf(cmd='show lldp interface',
src=intf_src,
dest=intf_dest + '[enabled]',
action=self.tx_rx_both_enabled)
# make to write in cache
self.make(final_call=True)
| 38.191489 | 98 | 0.405571 |
058afa2600787f0ee639e85b2abbe8ea5984e23e | 841 | py | Python | db_migrate.py | pugong/microblog | cb93214de99528370718877ea9f09f041f64a0a4 | [
"BSD-3-Clause"
] | null | null | null | db_migrate.py | pugong/microblog | cb93214de99528370718877ea9f09f041f64a0a4 | [
"BSD-3-Clause"
] | null | null | null | db_migrate.py | pugong/microblog | cb93214de99528370718877ea9f09f041f64a0a4 | [
"BSD-3-Clause"
] | null | null | null | #!flask/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
migration = SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
| 49.470588 | 140 | 0.840666 |
86a77505f71b4767b949f34aef3391c6301a2649 | 2,620 | py | Python | main.py | jsanc623/PyBenchTime | 2dd0a0ca499d89968dda9fde36a6c5a2bb5a1460 | [
"MIT"
] | null | null | null | main.py | jsanc623/PyBenchTime | 2dd0a0ca499d89968dda9fde36a6c5a2bb5a1460 | [
"MIT"
] | null | null | null | main.py | jsanc623/PyBenchTime | 2dd0a0ca499d89968dda9fde36a6c5a2bb5a1460 | [
"MIT"
] | null | null | null | from time import time
# Handle the running state of the timer
RUNNING = 1
PAUSED = 0
STOPPED = -1
def get_current_time():
return time()
class Timer:
def __init__(self):
# Resets the timers, laps and summary
self.state = STOPPED # Maintains the state of the timer (RUNNING, PAUSED, STOPPED)
self._start_time = 0 # Time that self.start() was called
self._end_time = 0 # Time that self.end() was called
self._pause_time = 0 # Time spent in pause
self._total_pause_time = 0 # Total time spent in pause
self._laps = [] # All laps
self._lap_count = 0 # Total lap count, inclusive of the current lap
def start(self, name="start"):
"""Starts the timer"""
self.state = RUNNING
self._start_time = get_current_time() # Set the start time
self.lap(name) # Create a lap with this start time
def end(self):
"""Ends the timer"""
self.state = STOPPED
self._end_time = get_current_time() # Set the end time
self._end_lap() # end the last lap
def lap(self, name=None):
"""Ends the current lap and creates a new lap in lap object"""
self._end_lap()
lap = {
"name": name if name else self._lap_count,
"start": get_current_time(),
"end": -1,
"total": -1,
}
self._laps.append(lap)
self._lap_count += 1
def _end_lap(self):
"""Assign end and total times to the previous lap"""
self._lap_count = len(self._laps) - 1
if len(self._laps) > 0:
self._laps[self._lap_count]['end'] = get_current_time()
self._laps[self._lap_count]['total'] = \
self._laps[self._lap_count]['end'] - self._laps[self._lap_count]['start']
def summary(self):
"""Returns a summary of all timer activity so far"""
return {
'running': self.state,
'start': self._start_time,
'end': self._end_time,
'total': self._end_time - self._start_time,
'paused': self._total_pause_time,
'laps': self._laps
}
def pause(self):
"""Initiates a pause in the timer"""
self.state = PAUSED
self._pause_time = get_current_time()
def unpause(self):
"""Cancels the pause previously set"""
self.state = RUNNING
self._total_pause_time = self._total_pause_time + (get_current_time() - self._pause_time)
self._pause_time = 0
| 33.164557 | 98 | 0.567557 |
1990f239cf686835c5c5d14c2fe75fbe295dac43 | 8,730 | py | Python | sunpy/io/fits.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | sunpy/io/fits.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | sunpy/io/fits.py | derdon/sunpy | 619102cd48c73a326c45263369446be9b74366e8 | [
"MIT"
] | null | null | null | """
FITS File Reader
Notes
-----
FITS
[1] FITS files allow comments to be attached to every value in the header.
This is implemented in this module as a KEYCOMMENTS dictionary in the
sunpy header. To add a comment to the file on write, add a comment to this
dictionary with the same name as a key in the header (upcased).
PyFITS
[1] Due to the way PyFITS works with images the header dictionary may
differ depending on whether is accessed before or after the fits[0].data
is requested. If the header is read before the data then the original
header will be returned. If the header is read after the data has been
accessed then the data will have been scaled and a modified header
reflecting these changes will be returned: BITPIX may differ and
BSCALE and B_ZERO may be dropped in the modified version.
[2] The verify('fix') call attempts to handle violations of the FITS
standard. For example, nan values will be converted to "nan" strings.
Attempting to cast a pyfits header to a dictionary while it contains
invalid header tags will result in an error so verifying it early on
makes the header easier to work with later.
References
----------
| http://stackoverflow.com/questions/456672/class-factory-in-python
| http://stsdas.stsci.edu/download/wikidocs/The_PyFITS_Handbook.pdf
"""
from __future__ import absolute_import, division, print_function
import os
import re
import itertools
import collections
from astropy.io import fits
from sunpy.io.header import FileHeader
from sunpy.extern.six.moves import zip
__all__ = ['read', 'get_header', 'write', 'extract_waveunit']
__author__ = "Keith Hughitt, Stuart Mumford, Simon Liedtke"
__email__ = "keith.hughitt@nasa.gov"
def read(filepath, hdus=None):
"""
Read a fits file
Parameters
----------
filepath : `str`
The fits file to be read
hdu: `int` or iterable
The HDU indexes to read from the file
Returns
-------
pairs : `list`
A list of (data, header) tuples
Notes
-----
This routine reads all the HDU's in a fits file and returns a list of the
data and a FileHeader instance for each one.
Also all comments in the original file are concatenated into a single
'comment' key in the returned FileHeader.
"""
hdulist = fits.open(filepath)
if hdus is not None:
if isinstance(hdus, int):
hdulist = hdulist[hdus]
elif isinstance(hdus, collections.Iterable):
hdulist = [hdulist[i] for i in hdus]
try:
hdulist.verify('silentfix+warn')
headers = get_header(hdulist)
pairs = []
for hdu,header in zip(hdulist, headers):
pairs.append((hdu.data, header))
finally:
hdulist.close()
return pairs
def get_header(afile):
"""
Read a fits file and return just the headers for all HDU's. In each header,
the key WAVEUNIT denotes the wavelength unit which is used to describe the
value of the key WAVELNTH.
Parameters
----------
afile : `str` or fits.HDUList
The file to be read, or HDUList to process.
Returns
-------
headers : `list`
A list of FileHeader headers.
"""
if isinstance(afile,fits.HDUList):
hdulist = afile
close = False
else:
hdulist = fits.open(afile)
hdulist.verify('silentfix')
close=True
try:
headers= []
for hdu in hdulist:
try:
comment = "".join(hdu.header['COMMENT']).strip()
except KeyError:
comment = ""
try:
history = "".join(hdu.header['HISTORY']).strip()
except KeyError:
history = ""
header = FileHeader(hdu.header)
header['COMMENT'] = comment
header['HISTORY'] = history
# Strip out KEYCOMMENTS to a dict, the hard way
keydict = {}
for card in hdu.header.cards:
if card.comment != '':
keydict.update({card.keyword:card.comment})
header['KEYCOMMENTS'] = keydict
header['WAVEUNIT'] = extract_waveunit(header)
headers.append(header)
finally:
if close:
hdulist.close()
return headers
def write(fname, data, header, **kwargs):
"""
Take a data header pair and write a FITS file.
Parameters
----------
fname : `str`
File name, with extension
data : `numpy.ndarray`
n-dimensional data array
header : `dict`
A header dictionary
"""
# Copy header so the one in memory is left alone while changing it for
# write.
header = header.copy()
# The comments need to be added to the header separately from the normal
# kwargs. Find and deal with them:
fits_header = fits.Header()
# Check Header
key_comments = header.pop('KEYCOMMENTS', False)
for k,v in header.items():
if isinstance(v, fits.header._HeaderCommentaryCards):
if k == 'comments':
comments = str(v).split('\n')
for com in comments:
fits_header.add_comments(com)
elif k == 'history':
hists = str(v).split('\n')
for hist in hists:
fits_header.add_history(hist)
elif k != '':
fits_header.append(fits.Card(k, str(v).split('\n')))
else:
fits_header.append(fits.Card(k, v))
if isinstance(key_comments, dict):
for k,v in key_comments.items():
fits_header.comments[k] = v
elif key_comments:
raise TypeError("KEYCOMMENTS must be a dictionary")
fitskwargs = {'output_verify':'fix'}
fitskwargs.update(kwargs)
fits.writeto(os.path.expanduser(fname), data, header=fits_header,
**fitskwargs)
def extract_waveunit(header):
"""Attempt to read the wavelength unit from a given FITS header.
Parameters
----------
header : FileHeader
One :class:`sunpy.io.header.FileHeader` instance which was created by
reading a FITS file. :func:`sunpy.io.fits.get_header` returns a list of
such instances.
Returns
-------
waveunit : `str`
The wavelength unit that could be found or ``None`` otherwise.
Examples
--------
The goal of this function is to return a string that can be used in
conjunction with the astropy.units module so that the return value can be
directly passed to ``astropy.units.Unit``::
>>> import astropy.units
>>> waveunit = extract_waveunit(header)
>>> if waveunit is not None:
... unit = astropy.units.Unit(waveunit)
"""
# algorithm: try the following procedures in the following order and return
# as soon as a waveunit could be detected
# 1. read header('WAVEUNIT'). If None, go to step 2.
# 1.1 -9 -> 'nm'
# 1.2 -10 -> 'angstrom'
# 1.3 0 -> go to step 2
# 1.4 if neither of the above, return the value itself in lowercase
# 2. parse waveunit_comment
# 2.1 'in meters' -> 'm'
# 3. parse wavelnth_comment
# 3.1 "[$UNIT] ..." -> $UNIT
# 3.2 "Observed wavelength ($UNIT)" -> $UNIT
def parse_waveunit_comment(waveunit_comment):
if waveunit_comment == 'in meters':
return 'm'
waveunit_comment = header['KEYCOMMENTS'].get('WAVEUNIT')
wavelnth_comment = header['KEYCOMMENTS'].get('WAVELNTH')
waveunit = header.get('WAVEUNIT')
if waveunit is not None:
metre_submultiples = {
0: parse_waveunit_comment(waveunit_comment),
-1: 'dm',
-2: 'cm',
-3: 'mm',
-6: 'um',
-9: 'nm',
-10: 'angstrom',
-12: 'pm',
-15: 'fm',
-18: 'am',
-21: 'zm',
-24: 'ym'}
waveunit = metre_submultiples.get(waveunit, str(waveunit).lower())
elif waveunit_comment is not None:
waveunit = parse_waveunit_comment(waveunit_comment)
elif wavelnth_comment is not None:
# supported formats (where $UNIT is the unit like "nm" or "Angstrom"):
# "Observed wavelength ($UNIT)"
# "[$UNIT] ..."
parentheses_pattern = r'Observed wavelength \((\w+?)\)$'
brackets_pattern = r'^\[(\w+?)\]'
for pattern in [parentheses_pattern, brackets_pattern]:
m = re.search(pattern, wavelnth_comment)
if m is not None:
waveunit = m.group(1)
break
if waveunit == '':
return None # To fix problems associated with HMI FITS.
return waveunit
| 31.861314 | 79 | 0.60504 |
bcbfee807b2bcae1f08ee4a549ed33a4c5c16767 | 224 | py | Python | src/cos.py | Joebennett19/Monday_morning | 35869a775d51e3c23a05ba2326c174bbe2d2dd58 | [
"MIT"
] | null | null | null | src/cos.py | Joebennett19/Monday_morning | 35869a775d51e3c23a05ba2326c174bbe2d2dd58 | [
"MIT"
] | null | null | null | src/cos.py | Joebennett19/Monday_morning | 35869a775d51e3c23a05ba2326c174bbe2d2dd58 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 10:38:34 2021
@author: mm16j2b
"""
import matplotlib.pyplot as plt
import numpy
x=numpy.linspace(0,5,100)
u=numpy.cos(x)
fig, ax1 = plt.subplots()
ax1.plot(x,u)
plt.show() | 14.933333 | 35 | 0.669643 |
5db5e6c2b2c1e2e6b015828d68f86706d1f38083 | 5,425 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_scale_set.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_scale_set.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_scale_set.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineScaleSet(Resource):
"""Describes a Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param sku: The virtual machine scale set sku.
:type sku: ~azure.mgmt.compute.v2017_12_01.models.Sku
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_12_01.models.Plan
:param upgrade_policy: The upgrade policy.
:type upgrade_policy: ~azure.mgmt.compute.v2017_12_01.models.UpgradePolicy
:param virtual_machine_profile: The virtual machine profile.
:type virtual_machine_profile:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetVMProfile
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param overprovision: Specifies whether the Virtual Machine Scale Set
should be overprovisioned.
:type overprovision: bool
:ivar unique_id: Specifies the ID which uniquely identifies a Virtual
Machine Scale Set.
:vartype unique_id: str
:param single_placement_group: When true this limits the scale set to a
single placement group, of max size 100 virtual machines.
:type single_placement_group: bool
:param zone_balance: Whether to force strictly even Virtual Machine
distribution cross x-zones in case there is zone outage.
:type zone_balance: bool
:param platform_fault_domain_count: Fault Domain count for each placement
group.
:type platform_fault_domain_count: int
:param identity: The identity of the virtual machine scale set, if
configured.
:type identity:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineScaleSetIdentity
:param zones: The virtual machine scale set zones.
:type zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSet, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.plan = kwargs.get('plan', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.virtual_machine_profile = kwargs.get('virtual_machine_profile', None)
self.provisioning_state = None
self.overprovision = kwargs.get('overprovision', None)
self.unique_id = None
self.single_placement_group = kwargs.get('single_placement_group', None)
self.zone_balance = kwargs.get('zone_balance', None)
self.platform_fault_domain_count = kwargs.get('platform_fault_domain_count', None)
self.identity = kwargs.get('identity', None)
self.zones = kwargs.get('zones', None)
| 46.367521 | 122 | 0.658249 |
78b144129b9b09504d390fe836de7b506fd90bad | 7,226 | py | Python | imsegm/utils/read_zvi.py | alexlib/pyImSegm | eb75b48066047a9201ba45ad85f816995b35eef4 | [
"BSD-3-Clause"
] | null | null | null | imsegm/utils/read_zvi.py | alexlib/pyImSegm | eb75b48066047a9201ba45ad85f816995b35eef4 | [
"BSD-3-Clause"
] | null | null | null | imsegm/utils/read_zvi.py | alexlib/pyImSegm | eb75b48066047a9201ba45ad85f816995b35eef4 | [
"BSD-3-Clause"
] | 1 | 2019-04-07T09:35:23.000Z | 2019-04-07T09:35:23.000Z | """
https://searchcode.com/codesearch/view/40141634/
read ZVI (Zeiss) image file
- incomplete support
- open uncompressed image from multi item image (Count>0)
- require OleFileIO_PL - a Python module to read MS OLE2 files
http: //www.decalage.info/en/python/olefileio#attachments
.. code-block:: python
>>> import os, sys
>>> sys.path += [os.path.abspath(os.path.join('..', '..'))]
>>> import imsegm.utils.data_io as tl_io
>>> path_file = os.path.join('data_images', 'others', 'sample.zvi')
>>> path_file = tl_io.update_path(path_file)
>>> n = get_layer_count(path_file)
>>> get_dir(path_file) # doctest: +ELLIPSIS
[...]
>>> for p in range(n):
... zvi = zvi_read(path_file, p)
... arr = zvi.Image.Array
... arr.shape
(488, 648)
(488, 648)
(488, 648)
(488, 648)
>>> img = load_image(path_file)
>>> img.shape
(4, 488, 648)
"""
import struct
from collections import namedtuple
import OleFileIO_PL
import numpy as np
def i32(data):
""" return int32 from len4 string"""
low, high = struct.unpack('<hh', data[:4])
return (high << 16) + low
def get_hex(data, n=16):
return '|'.join(['%02x'%(ord(data[i])) for i in range(n)])
def read_struct(data, t):
""" read a t type from data(str)"""
# vartype = (ord(data[0]), ord(data[1]))
# print t, vartype
next_data = data[2:] # skip vartype I16
if t is '?':
return [None, next_data]
if t is 'EMPTY':
return [None, next_data]
if t is 'NULL':
return [None, next_data]
if t is 'I2':
low = struct.unpack('<h', next_data[:2])
return [low[0], next_data[2:]]
if t is 'I4':
r = i32(next_data[:4])
return [r, next_data[4:]]
if t is 'BLOB':
size = i32(next_data[:4])
r = next_data[4:4+size]
return [r, next_data[4+size:]]
if t is 'BSTR':
# ! 4 extra bytes escaped
low, high = struct.unpack('<hh', next_data[:4])
size = (high << 16) + low
if size>0:
s = struct.unpack('s', next_data[4:4+size])
next_data = next_data[4+4+size:]
else:
s=''
next_data = next_data[4+4:]
return [s, next_data]
raise ValueError('unknown type:%s'%type)
ZviImageTuple = namedtuple(
'ZviImageTuple',
'Version FileName Width Height Depth PIXEL_FORMAT Count '
'ValidBitsPerPixel m_PluginCLSID Others Layers Scaling'
)
def read_image_container_content(stream):
""" returns a ZviImageTuple from a stream"""
data = stream.read()
next_data = data
[version, next_data] = read_struct(next_data, 'I4')
# [Type, next] = read_struct(next, 'I4')
# [TypeDescription, next] = read_struct(next, 'BSTR')
[filename, next_data] = read_struct(next_data, 'BSTR')
[width, next_data] = read_struct(next_data, 'I4')
[height, next_data] = read_struct(next_data, 'I4')
[depth, next_data] = read_struct(next_data, 'I4')
[pixel_format, next_data] = read_struct(next_data, 'I4')
[count, next_data] = read_struct(next_data, 'I4')
[valid_bits_per_pixel, next_data] = read_struct(next_data, 'I4')
[m_PluginCLSID, next_data] = read_struct(next_data, 'I4')
[others, next_data] = read_struct(next_data, 'I4')
[layers, next_data] = read_struct(next_data, 'I4')
[scaling, next_data] = read_struct(next_data, 'I2')
zvi_image = ZviImageTuple(version, filename, width, height, depth,
pixel_format, count, valid_bits_per_pixel,
m_PluginCLSID, others, layers, scaling)
return zvi_image
ZviItemTuple = namedtuple(
'ZviItemTuple',
'Version FileName Width Height Depth PIXEL_FORMAT Count '
'ValidBitsPerPixel Others Layers Scaling Image'
)
PIXEL_FORMAT = {
1: (3, 'ByteBGR'),
2: (4, 'ByteBGRA'),
3: (1, 'Byte'),
4: (2, 'Word'),
5: (4, 'Long'),
6: (4, 'Float'),
7: (8, 'Double'),
8: (6, 'WordBGR'),
9: (4, 'LongBGR'),
}
def read_item_storage_content(stream):
""" returns ZviItemTuple from the stream"""
data = stream.read()
next_data = data
[version, next_data] = read_struct(next_data, 'I4')
# [Type, next] = read_struct(next, 'I4')
# [TypeDescription, next] = read_struct(next, 'BSTR')
[filename, next_data] = read_struct(next_data, 'BSTR')
[width, next_data] = read_struct(next_data, 'I4')
[height, next_data] = read_struct(next_data, 'I4')
[depth, next_data] = read_struct(next_data, 'I4')
[pixel_format, next_data] = read_struct(next_data, 'I4')
[count, next_data] = read_struct(next_data, 'I4')
[valid_bits_per_pixel, next_data] = read_struct(next_data, 'I4')
[others, next_data] = read_struct(next_data, 'BLOB')
[layers, next_data] = read_struct(next_data, 'BLOB')
[scaling, next_data] = read_struct(next_data, 'BLOB')
# offset is image size + header size(28)
offset = width*height * PIXEL_FORMAT[pixel_format][0] + 28
# parse the actual image data
image = parse_image(data[-offset:])
# group results into one single structure (namedtuple)
zvi_item = ZviItemTuple(version, filename, width, height, depth,
pixel_format, count, valid_bits_per_pixel, others,
layers, scaling, image)
return zvi_item
ImageTuple = namedtuple(
'ImageTuple',
'Version Width Height Depth PixelWidth PIXEL_FORMAT '
'ValidBitsPerPixel Array'
)
def parse_image(data):
""" returns ImageTuple from raw image data(header+image)"""
version = i32(data[:4])
width = i32(data[4:8])
height = i32(data[8:12])
depth = i32(data[12:16])
pixel_width = i32(data[16:20])
pixel_format = i32(data[20:24])
valid_bits_per_pixel = i32(data[24:28])
raw = np.fromstring(data[28:], 'uint16')
array = np.reshape(raw, (height, width))
image = ImageTuple(version, width, height, depth, pixel_width,
pixel_format, valid_bits_per_pixel, array)
return image
def get_layer_count(file_name, ole=None):
""" returns the number of image planes"""
if ole is None:
ole = OleFileIO_PL.OleFileIO(file_name)
s = ['Image', 'Contents']
stream = ole.openstream(s)
zvi_image = read_image_container_content(stream)
return zvi_image.Count
def get_dir(file_name, ole=None):
""" returns the content structure(streams) of the zvi file
+ length of each streams """
dirs = []
if ole is None:
ole = OleFileIO_PL.OleFileIO(file_name)
for s in ole.listdir():
stream = ole.openstream(s)
dirs.append('%10d %s'%(len(stream.read()), s))
return dirs
def zvi_read(fname, plane, ole=None):
""" returns ZviItemTuple of the plane from zvi file fname """
if ole is None:
ole = OleFileIO_PL.OleFileIO(fname)
s = ['Image', 'Item(%d)' % plane, 'Contents']
stream = ole.openstream(s)
return read_item_storage_content(stream)
def load_image(path_img):
ole = OleFileIO_PL.OleFileIO(path_img)
nb = get_layer_count('', ole=ole)
# logging.debug('Count layers = %i', nb)
image = []
for i in range(nb):
zvi = zvi_read('', i, ole=ole)
image.append(zvi.Image.Array)
image = np.array(image)
return image
| 31.012876 | 78 | 0.628702 |
90f15fa1814c42f47fc39bcdc1700f6f6a464c46 | 7,705 | py | Python | tests/test_dwavesampler.py | wbernoudy/dwave-system | 21be9a868a156cb3d8f929285d67470331da8ad8 | [
"Apache-2.0"
] | 1 | 2021-03-15T19:29:09.000Z | 2021-03-15T19:29:09.000Z | tests/test_dwavesampler.py | wbernoudy/dwave-system | 21be9a868a156cb3d8f929285d67470331da8ad8 | [
"Apache-2.0"
] | null | null | null | tests/test_dwavesampler.py | wbernoudy/dwave-system | 21be9a868a156cb3d8f929285d67470331da8ad8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import random
import warnings
from collections import namedtuple
from concurrent.futures import Future
import numpy as np
import dimod
import dwave_networkx as dnx
from dwave.system.samplers import DWaveSampler
try:
# py3
import unittest.mock as mock
except ImportError:
# py2
import mock
C16 = dnx.chimera_graph(16)
# remove one node from C16 to simulate a not-fully-yielded system
C16.remove_node(42)
edges = set(tuple(edge) for edge in C16.edges)
edges.update([(v, u) for u, v in edges]) # solver has bi-directional
class MockSolver():
nodes = set(range(2048))
edges = edges
properties = {'readout_thermalization_range': [0, 10000],
'annealing_time_range': [1, 2000],
'default_readout_thermalization': 0,
'parameters': {'num_spin_reversal_transforms': '',
'programming_thermalization': '',
'anneal_offsets': '',
'num_reads': '',
'max_answers': '',
'readout_thermalization': '',
'beta': "",
'answer_mode': '',
'auto_scale': '',
'postprocess': "",
'anneal_schedule': '',
'chains': ""},
'chip_id': 'MockSolver'}
def sample_ising(self, h, J, **kwargs):
for key in kwargs:
if key not in self.properties['parameters']:
raise ValueError
result = {'num_variables': 2048,
'format': 'qp',
'num_occurrences': [1],
'active_variables': list(range(2048)),
'solutions': [[random.choice((-1, +1)) for __ in range(2048)]],
'timing': {'total_real_time': 11511, 'anneal_time_per_run': 20,
'post_processing_overhead_time': 2042, 'qpu_sampling_time': 164,
'readout_time_per_run': 123,
'qpu_delay_time_per_sample': 21,
'qpu_anneal_time_per_sample': 20,
'total_post_processing_time': 2042,
'qpu_programming_time': 8740,
'run_time_chip': 164,
'qpu_access_time': 11511,
'qpu_readout_time_per_sample': 123},
'occurrences': [1]}
result['samples'] = result['solutions']
result['energies'] = [dimod.ising_energy(sample, h, J) for sample in result['samples']]
future = Future()
future.set_result(result)
return future
def sample_qubo(self, Q, **kwargs):
for key in kwargs:
if key not in self.properties['parameters']:
raise ValueError
result = {'num_variables': 2048,
'format': 'qp',
'num_occurrences': [1],
'active_variables': list(range(2048)),
'solutions': [[random.choice((0, 1)) for __ in range(2048)]],
'timing': {'total_real_time': 11511, 'anneal_time_per_run': 20,
'post_processing_overhead_time': 2042, 'qpu_sampling_time': 164,
'readout_time_per_run': 123,
'qpu_delay_time_per_sample': 21,
'qpu_anneal_time_per_sample': 20,
'total_post_processing_time': 2042,
'qpu_programming_time': 8740,
'run_time_chip': 164,
'qpu_access_time': 11511,
'qpu_readout_time_per_sample': 123},
'occurrences': [1]}
result['samples'] = result['solutions']
result['energies'] = [dimod.qubo_energy(sample, Q) for sample in result['samples']]
future = Future()
future.set_result(result)
return future
class TestDwaveSampler(unittest.TestCase):
@mock.patch('dwave.system.samplers.dwave_sampler.Client')
def setUp(self, MockClient):
# using the mock
self.sampler = DWaveSampler()
self.sampler.solver = MockSolver()
@mock.patch('dwave.system.samplers.dwave_sampler.Client')
def test_solver_init(self, MockClient):
"""Deprecation warning is raised for `solver_features` use, but it still works."""
# assertWarns not available in py2
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
DWaveSampler(solver_features={'qpu': True})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
DWaveSampler(solver={'qpu': True})
self.assertEqual(len(w), 0)
MockClient.reset_mock()
solver = {'qpu': True, 'num_qubits__gt': 1000}
sampler = DWaveSampler(solver=solver)
MockClient.from_config.assert_called_once_with(solver=solver)
def test_sample_ising_variables(self):
sampler = self.sampler
response = sampler.sample_ising({0: -1, 1: 1}, {})
rows, cols = response.record.sample.shape
self.assertEqual(cols, 2)
response = sampler.sample_ising({}, {(0, 4): 1})
rows, cols = response.record.sample.shape
self.assertEqual(cols, 2)
self.assertFalse(np.any(response.record.sample == 0))
self.assertIs(response.vartype, dimod.SPIN)
self.assertIn('num_occurrences', response.record.dtype.fields)
self.assertIn('timing', response.info)
def test_sample_qubo_variables(self):
sampler = self.sampler
response = sampler.sample_qubo({(0, 0): -1, (1, 1): 1})
rows, cols = response.record.sample.shape
self.assertEqual(cols, 2)
response = sampler.sample_qubo({(0, 0): -1, (1, 1): 1})
rows, cols = response.record.sample.shape
self.assertEqual(cols, 2)
self.assertTrue(np.all(response.record.sample >= 0))
self.assertIs(response.vartype, dimod.BINARY)
self.assertIn('num_occurrences', response.record.dtype.fields)
self.assertIn('timing', response.info)
class TestDWaveSamplerAnnealSchedule(unittest.TestCase):
def test_typical(self):
class MockScheduleSampler(DWaveSampler):
parameters = {'anneal_schedule': ''}
properties = {'max_anneal_schedule_points': 4,
'annealing_time_range': [1, 2000]}
def __init__(self):
pass
DWaveSampler.validate_anneal_schedule(MockScheduleSampler(), [(0, 1), (55.0, 0.45), (155.0, 0.45), (210.0, 1)])
| 37.402913 | 119 | 0.559247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.